diff --git a/.github/workflows/issueclose.yml b/.github/workflows/issueclose.yml index b81f23a0..9e4ee0b3 100644 --- a/.github/workflows/issueclose.yml +++ b/.github/workflows/issueclose.yml @@ -14,12 +14,54 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + - name: Check out LLGo + uses: actions/checkout@v4 + with: + repository: '1351914167/llgo' + path: .llgo + ref: get_pip + - name: Check out LLPyg + uses: actions/checkout@v4 + with: + repository: 'toaction/llpyg' + path: .llpyg + ref: feat/v1 - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.23.x + go-version: 1.24.x - name: Set up Tool - run: go install -v github.com/goplus/llpkgstore/cmd/llpkgstore@latest + run: | + git clone https://github.com/PengPengPeng717/llpkgstore.git + cd llpkgstore + git checkout v3 + go build -o llpkgstore ./cmd/llpkgstore + sudo mv llpkgstore /usr/local/bin/ + - name: Setup LLGo + working-directory: .llgo + run: | + go install -v ./cmd/... + export LLGO_ROOT=$PWD + echo "LLGO_ROOT=$LLGO_ROOT" >> $GITHUB_ENV + echo "LLGO_RPATH_CHANGE=ON" >> $GITHUB_ENV + - name: Set up Python environment + run: | + echo "GOTOOLCHAIN=go1.24.5" >> $GITHUB_ENV + export PYTHONHOME=$LLGO_ROOT/python + export PATH=$PYTHONHOME/bin:$PATH + export DYLD_LIBRARY_PATH=$PYTHONHOME/lib + export PKG_CONFIG_PATH=$PYTHONHOME/lib/pkgconfig + echo "PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "PATH=$PATH" >> $GITHUB_ENV + echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" >> $GITHUB_ENV + echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH" >> $GITHUB_ENV + - name: Setup LLPyg + working-directory: .llpyg + run: | + cd _xtool + llgo install ./... + cd .. + go install -v ./cmd/... - name: Run cleaner process env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/labelcreate.yml b/.github/workflows/labelcreate.yml index 258eed4a..10f2b5cd 100644 --- a/.github/workflows/labelcreate.yml +++ b/.github/workflows/labelcreate.yml @@ -24,12 +24,54 @@ jobs: ref: main path: .main fetch-depth: 0 + - name: Check out LLGo + uses: actions/checkout@v4 + with: + repository: '1351914167/llgo' + path: .llgo + ref: get_pip + - name: Check out LLPyg + uses: actions/checkout@v4 + with: + repository: 'toaction/llpyg' + path: .llpyg + ref: feat/v1 - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.23.x + go-version: 1.24.x - name: Set up Tool - run: go install -v github.com/goplus/llpkgstore/cmd/llpkgstore@latest + run: | + git clone https://github.com/PengPengPeng717/llpkgstore.git + cd llpkgstore + git checkout v3 + go build -o llpkgstore ./cmd/llpkgstore + sudo mv llpkgstore /usr/local/bin/ + - name: Setup LLGo + working-directory: .llgo + run: | + go install -v ./cmd/... + export LLGO_ROOT=$PWD + echo "LLGO_ROOT=$LLGO_ROOT" >> $GITHUB_ENV + echo "LLGO_RPATH_CHANGE=ON" >> $GITHUB_ENV + - name: Set up Python environment + run: | + echo "GOTOOLCHAIN=go1.24.5" >> $GITHUB_ENV + export PYTHONHOME=$LLGO_ROOT/python + export PATH=$PYTHONHOME/bin:$PATH + export DYLD_LIBRARY_PATH=$PYTHONHOME/lib + export PKG_CONFIG_PATH=$PYTHONHOME/lib/pkgconfig + echo "PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "PATH=$PATH" >> $GITHUB_ENV + echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" >> $GITHUB_ENV + echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH" >> $GITHUB_ENV + - name: Setup LLPyg + working-directory: .llpyg + run: | + cd _xtool + llgo install ./... + cd .. + go install -v ./cmd/... - name: Copy llpkgstore.json to root continue-on-error: true run: | diff --git a/.github/workflows/postprocessing.yml b/.github/workflows/postprocessing.yml index b28a2428..ff4bf56d 100644 --- a/.github/workflows/postprocessing.yml +++ b/.github/workflows/postprocessing.yml @@ -23,32 +23,97 @@ jobs: matrix: os: - macos-13 - - macos-latest - - ubuntu-24.04 - - ubuntu-24.04-arm + # - macos-latest + # - ubuntu-24.04 + # - ubuntu-24.04-arm runs-on: ${{matrix.os}} steps: - name: Checkout uses: actions/checkout@v4 + - name: Check out LLGo (for Python packages) + uses: actions/checkout@v4 + with: + repository: '1351914167/llgo' + path: .llgo + ref: get_pip + - name: Check out LLPyg (for Python packages) + uses: actions/checkout@v4 + with: + repository: 'toaction/llpyg' + path: .llpyg + ref: feat/v1 - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.23.x + go-version: 1.24.x - name: Set up Tool - run: go install -v github.com/goplus/llpkgstore/cmd/llpkgstore@latest - - name: Install dependencies + run: | + git clone https://github.com/PengPengPeng717/llpkgstore.git + cd llpkgstore + # git checkout version + # git checkout v2 + git checkout v3 + go build -o llpkgstore ./cmd/llpkgstore + sudo mv llpkgstore /usr/local/bin/ + - name: Install dependencies (macOS) if: startsWith(matrix.os, 'macos') run: | - brew update - brew install cmake conan cjson - - name: Install dependencies - if: startsWith(matrix.os, 'ubuntu') + # 先卸载可能冲突的cmake + brew uninstall cmake --ignore-dependencies || true + # 安装依赖包 + brew install llvm@19 bdw-gc openssl libffi libuv cmake conan lld@19 + brew link --force libffi + echo "$(brew --prefix llvm@19)/bin" >> $GITHUB_PATH + echo "$(brew --prefix lld@19)/bin" >> $GITHUB_PATH + echo "Dependencies installed for both Python and C++ packages" + # - name: Install dependencies (Ubuntu) + # if: startsWith(matrix.os, 'ubuntu') + # run: | + # sudo apt-get update + # sudo apt-get install -y cmake python3 python3-pip pkg-config + # python3 -m pip install conan + # echo "Dependencies installed for both Python and C++ packages" + - name: Setup LLGo (for Python packages) + working-directory: .llgo + run: | + go install -v ./cmd/... + export LLGO_ROOT=$PWD + echo "LLGO_ROOT=$LLGO_ROOT" >> $GITHUB_ENV + echo "LLGO_RPATH_CHANGE=ON" >> $GITHUB_ENV + - name: Set up Python environment (for Python packages) + run: | + echo "GOTOOLCHAIN=go1.24.5" >> $GITHUB_ENV + # 保存原始Python环境变量 + echo "ORIGINAL_PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "ORIGINAL_PATH=$PATH" >> $GITHUB_ENV + # 设置LLGo Python环境 + export PYTHONHOME=$LLGO_ROOT/python + export PATH=$PYTHONHOME/bin:$PATH + export DYLD_LIBRARY_PATH=$PYTHONHOME/lib + export PKG_CONFIG_PATH=$PYTHONHOME/lib/pkgconfig + echo "PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "PATH=$PATH" >> $GITHUB_ENV + echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" >> $GITHUB_ENV + echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH" >> $GITHUB_ENV + - name: Setup LLPyg (for Python packages) + working-directory: .llpyg run: | - sudo apt-get update - sudo apt-get install -y cmake python3 python3-pip pkg-config - python3 -m pip install conan - - name: Set up Conan - run: conan profile detect + cd _xtool + llgo install ./... + cd .. + go install -v ./cmd/... + - name: Set up Conan (for C++ packages) + run: | + # 临时恢复系统Python环境用于Conan + if [ -n "$ORIGINAL_PYTHONHOME" ]; then + export PYTHONHOME="$ORIGINAL_PYTHONHOME" + else + unset PYTHONHOME + fi + # 恢复原始PATH,但保留必要的工具路径 + export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$PATH" + echo "Using system Python for Conan: $(which python3 || which python)" + conan profile detect - name: Run release process env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -56,87 +121,225 @@ jobs: - name: Upload binary file to artifact uses: actions/upload-artifact@v4 with: - name: ${{env.BIN_FILENAME}} - path: ${{env.BIN_PATH}} + name: ${{env.BIN_FILENAME || 'llpkg-binary'}} + path: ${{env.BIN_PATH || './dist'}} retention-days: 1 post-processing: runs-on: ubuntu-latest needs: [release-binary-files] steps: - - name: Checkout + - name: Checkout current branch uses: actions/checkout@v4 with: - ref: main + ref: ${{ github.head_ref || github.ref_name }} path: .main - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.23.x + go-version: 1.24.x + - name: Install GitHub CLI + run: | + type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \ + && sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ + && sudo apt update \ + && sudo apt install gh -y + - name: Configure GitHub CLI + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token + gh auth status - name: Set up Tool - run: go install -v github.com/goplus/llpkgstore/cmd/llpkgstore@latest - - name: Checkout to website + run: | + git clone https://github.com/PengPengPeng717/llpkgstore.git + cd llpkgstore + # git checkout version + # git checkout v2 + git checkout v3 + go build -o llpkgstore ./cmd/llpkgstore + sudo mv llpkgstore /usr/local/bin/ + - name: Checkout to website (for C++ packages) uses: actions/checkout@v4 with: ref: website path: .website - - name: Copy llpkgstore.json to root + - name: Copy llpkgstore.json to root (for C++ packages) continue-on-error: true run: | ls .website .website/public cp .website/public/llpkgstore.json .main rm -rf .website - - name: Run post-processing process + - name: Detect package types and process accordingly working-directory: .main env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: llpkgstore postprocessing - - name: Checkout website branch + run: | + echo "Current branch: $(git branch --show-current)" + echo "Current commit: $(git log -1 --pretty=format:%s)" + + # 从 commit 消息中提取源分支名并切换 + COMMIT_MSG=$(git log -1 --pretty=format:%s) + if [[ $COMMIT_MSG =~ Merge\ pull\ request.*from\ PengPengPeng717/([^[:space:]]+) ]]; then + SOURCE_BRANCH="${BASH_REMATCH[1]}" + echo "Detected source branch from commit message: $SOURCE_BRANCH" + git fetch origin $SOURCE_BRANCH:$SOURCE_BRANCH + git checkout $SOURCE_BRANCH + echo "Switched to branch: $(git branch --show-current)" + # 切换到源分支后,重新获取commit消息 + COMMIT_MSG=$(git log -1 --pretty=format:%s) + echo "Updated commit message after branch switch: $COMMIT_MSG" + fi + + # 简化的包检测逻辑 + echo "=== Detecting packages to process ===" + + # 获取目标包(从commit消息) + TARGET_PACKAGE="" + if echo "$COMMIT_MSG" | grep -q "Release-as:"; then + TARGET_PACKAGE=$(echo "$COMMIT_MSG" | sed 's/Release-as: \([^/]*\)\/.*/\1/') + echo "Target package from commit: $TARGET_PACKAGE" + fi + + # 检测所有包 + PYTHON_PACKAGES="" + CPP_PACKAGES="" + + # 检查根目录下的包 + for dir in */; do + if [ -d "$dir" ] && [ -f "$dir/llpkg.cfg" ]; then + package_name=$(basename "$dir") + if [[ ! "$package_name" =~ ^(py|\.github|\.git|public|\.DS_Store)$ ]]; then + # 检查是否是符号链接 + if [ -L "$dir" ]; then + # 符号链接,跳过(这些是Python包的符号链接) + echo "Skipping symlink: $package_name" + continue + fi + PACKAGE_TYPE=$(grep -o '"type":\s*"[^"]*"' "$dir/llpkg.cfg" | cut -d'"' -f4 || echo "") + if [ "$PACKAGE_TYPE" = "python" ]; then + PYTHON_PACKAGES="$PYTHON_PACKAGES $package_name" + else + CPP_PACKAGES="$CPP_PACKAGES $package_name" + fi + fi + fi + done + + # 检查py目录下的包 + if [ -d "py" ]; then + for subdir in py/*/; do + if [ -d "$subdir" ] && [ -f "$subdir/llpkg.cfg" ]; then + package_name=$(basename "$subdir") + PYTHON_PACKAGES="$PYTHON_PACKAGES $package_name" + fi + done + fi + + echo "Python packages: $PYTHON_PACKAGES" + echo "C++ packages: $CPP_PACKAGES" + + # 处理Python包(只有当目标包是Python包时才处理) + if [ -n "$PYTHON_PACKAGES" ] && [ -n "$TARGET_PACKAGE" ] && echo "$PYTHON_PACKAGES" | grep -q "$TARGET_PACKAGE"; then + echo "=== Processing Python packages ===" + echo "Target package $TARGET_PACKAGE is a Python package, processing it" + + # 为Python包创建符号链接并处理 + echo "Processing Python package: $TARGET_PACKAGE" + + # 创建符号链接 + if [ -d "$TARGET_PACKAGE" ]; then + rm -rf "$TARGET_PACKAGE" + fi + ln -s "py/$TARGET_PACKAGE" "$TARGET_PACKAGE" + + # 执行postprocessing(在根目录中运行,让llpkgstore自己处理包目录查找) + llpkgstore postprocessing + + # 清理符号链接,避免影响后续包的处理 + rm -f "$TARGET_PACKAGE" + elif [ -n "$PYTHON_PACKAGES" ]; then + echo "=== Skipping Python packages ===" + echo "Target package is not a Python package, skipping Python package processing" + echo "Available Python packages: $PYTHON_PACKAGES" + fi + + # 处理C++包(只有当目标包是C++包时才处理) + if [ -n "$CPP_PACKAGES" ] && [ -n "$TARGET_PACKAGE" ] && echo "$CPP_PACKAGES" | grep -q "$TARGET_PACKAGE"; then + echo "=== Processing C++ packages ===" + echo "Target package $TARGET_PACKAGE is a C++ package, processing it" + + # 处理目标C++包 + echo "Processing C++ package: $TARGET_PACKAGE" + # 执行postprocessing(在根目录中运行,让llpkgstore自己处理包目录查找) + llpkgstore postprocessing + elif [ -n "$CPP_PACKAGES" ]; then + echo "=== Skipping C++ packages ===" + echo "Target package is not a C++ package, skipping C++ package processing" + echo "Available C++ packages: $CPP_PACKAGES" + fi + - name: Checkout website branch (for C++ packages) uses: actions/checkout@v4 with: ref: website path: .website - - name: Move llpkgstore.json to website - run: mv .main/llpkgstore.json .website/public - - name: Commit and push changes - working-directory: .website + - name: Move llpkgstore.json to website (for C++ packages) run: | - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - git add public/llpkgstore.json - git commit -m "Update llpkgstore.json" - git push - build-and-upload: - runs-on: ubuntu-latest - needs: [post-processing] - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: website - - name: Setup Node.js - uses: actions/setup-node@v3 - with: - node-version: 23.8.0 - - run: corepack enable - - name: Install Dependencies - uses: borales/actions-yarn@v4 - with: - cmd: install - - name: Build - uses: borales/actions-yarn@v4 - with: - cmd: build - - name: Upload artifact - uses: actions/upload-pages-artifact@v3 - with: - path: 'dist' - deploy: - needs: build-and-upload - runs-on: ubuntu-latest - permissions: - pages: write - id-token: write - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 + if [ -f ".main/llpkgstore.json" ]; then + mv .main/llpkgstore.json .website/public + echo "Moved llpkgstore.json to website for C++ packages" + else + echo "No llpkgstore.json found for C++ packages" + fi + # - name: Commit and push changes (for C++ packages) + # working-directory: .website + # run: | + # git config --local user.email "action@github.com" + # git config --local user.name "GitHub Action" + # git add public/llpkgstore.json + # git commit -m "Update llpkgstore.json" || echo "No changes to commit" + # git push + - name: Handle Python packages upload + working-directory: .main + run: | + if [ -d "py" ]; then + echo "=== Python packages ready for upload ===" + echo "Python packages processed (no llpkgstore.json required)" + ls -la py/ + else + echo "No py directory found" + fi +# build-and-upload: +# runs-on: ubuntu-latest +# needs: [post-processing] +# steps: +# - name: Checkout code +# uses: actions/checkout@v4 +# with: +# ref: website +# - name: Setup Node.js +# uses: actions/setup-node@v3 +# with: +# node-version: 23.8.0 +# - run: corepack enable +# - name: Install Dependencies +# uses: borales/actions-yarn@v4 +# with: +# cmd: install +# - name: Build +# uses: borales/actions-yarn@v4 +# with: +# cmd: build +# - name: Upload artifact +# uses: actions/upload-pages-artifact@v3 +# with: +# path: 'dist' +# deploy: +# needs: build-and-upload +# runs-on: ubuntu-latest +# permissions: +# pages: write +# id-token: write +# steps: +# - name: Deploy to GitHub Pages +# id: deployment +# uses: actions/deploy-pages@v4 \ No newline at end of file diff --git a/.github/workflows/verification.yml b/.github/workflows/verification.yml index 65fb003c..8ccd03b1 100644 --- a/.github/workflows/verification.yml +++ b/.github/workflows/verification.yml @@ -14,106 +14,274 @@ on: - '.gitignore' jobs: - llcppg-verification: + llpkg-verification: name: Verify Go Module strategy: matrix: os: - macos-13 - - macos-latest - - ubuntu-24.04 - - ubuntu-24.04-arm + # - macos-latest + # - ubuntu-24.04 + # - ubuntu-24.04-arm llvm: [19] - llgo: [e68355d94f9f9ec17caae6e0d397bd55f8c5ec33] - llcppg: [v0.7.3] runs-on: ${{matrix.os}} steps: - name: Checkout uses: actions/checkout@v4 - - name: Check out LLGo + - name: Check out LLGo (for Python packages) uses: actions/checkout@v4 with: - repository: 'goplus/llgo' + repository: '1351914167/llgo' path: .llgo - ref: ${{matrix.llgo}} - - name: Check out LLCppg + ref: get_pip + - name: Check out LLPyg (for Python packages) + uses: actions/checkout@v4 + with: + repository: 'toaction/llpyg' + path: .llpyg + ref: feat/v1 + - name: Check out LLCppg (for C++ packages) uses: actions/checkout@v4 with: repository: 'goplus/llcppg' path: .llcppg - ref: ${{matrix.llcppg}} + ref: v0.7.3 - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.23.x + go-version: 1.24.x - name: Set up Tool run: | - go install -v github.com/goplus/llpkgstore/cmd/llpkgstore@latest - - name: Install dependencies + git clone https://github.com/PengPengPeng717/llpkgstore.git + cd llpkgstore + git checkout v3 + go build -o llpkgstore ./cmd/llpkgstore + sudo mv llpkgstore /usr/local/bin/ + - name: Install dependencies (macOS) if: startsWith(matrix.os, 'macos') run: | + # 先卸载可能冲突的cmake + brew uninstall cmake --ignore-dependencies || true + # 安装依赖包 brew install llvm@${{matrix.llvm}} bdw-gc openssl libffi libuv cmake conan lld@${{matrix.llvm}} brew link --force libffi echo "$(brew --prefix llvm@${{matrix.llvm}})/bin" >> $GITHUB_PATH echo "$(brew --prefix lld@${{matrix.llvm}})/bin" >> $GITHUB_PATH - - name: Install dependencies - if: startsWith(matrix.os, 'ubuntu') - run: | - sudo apt-get update - echo "deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${{matrix.llvm}} main" | sudo tee /etc/apt/sources.list.d/llvm.list - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - - sudo apt-get install -y llvm-${{matrix.llvm}}-dev \ - clang-${{matrix.llvm}} \ - libclang-${{matrix.llvm}}-dev \ - lld-${{matrix.llvm}} \ - libunwind-${{matrix.llvm}}-dev \ - libc++-${{matrix.llvm}}-dev \ - pkg-config libgc-dev libssl-dev zlib1g-dev libffi-dev libuv1-dev - echo "/usr/lib/llvm-${{matrix.llvm}}/bin" >> $GITHUB_PATH - - name: Pre setup Conan - if: startsWith(matrix.os, 'ubuntu') + echo "Dependencies installed for both Python and C++ packages" + # - name: Install dependencies (Ubuntu) + # if: startsWith(matrix.os, 'ubuntu') + # run: | + # sudo apt-get update + # echo "deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${{matrix.llvm}} main" | sudo tee /etc/apt/sources.list.d/llvm.list + # wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - + # sudo apt-get install -y llvm-${{matrix.llvm}}-dev \ + # clang-${{matrix.llvm}} \ + # libclang-${{matrix.llvm}}-dev \ + # lld-${{matrix.llvm}} \ + # libunwind-${{matrix.llvm}}-dev \ + # libc++-${{matrix.llvm}}-dev \ + # pkg-config libgc-dev libssl-dev zlib1g-dev libffi-dev libuv1-dev + # echo "/usr/lib/llvm-${{matrix.llvm}}/bin" >> $GITHUB_PATH + - name: Pre setup Python and Conan (for Python packages) + if: startsWith(matrix.os, 'macos') run: | - sudo apt install -y python3 - python3 -m pip install conan - - name: Set up Conan + python3 -m pip install conan pydump --break-system-packages + echo "Python dependencies installed for Python packages" + - name: Pre setup Conan (for C++ packages) + if: startsWith(matrix.os, 'macos') run: | + # 临时恢复系统Python环境用于Conan + if [ -n "$ORIGINAL_PYTHONHOME" ]; then + export PYTHONHOME="$ORIGINAL_PYTHONHOME" + else + unset PYTHONHOME + fi + # 恢复原始PATH,但保留必要的工具路径 + export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$PATH" + echo "Using system Python for Conan: $(which python3 || which python)" conan profile detect - - name: Setup LLGo + - name: Setup LLGo (for Python packages) working-directory: .llgo run: | - go install -v ./cmd/llgo/... - export LLGO_ROOT=$(pwd) + go install -v ./cmd/... + export LLGO_ROOT=$PWD echo "LLGO_ROOT=$LLGO_ROOT" >> $GITHUB_ENV - # https://github.com/goplus/llgo/issues/1135 echo "LLGO_RPATH_CHANGE=ON" >> $GITHUB_ENV - - name: Setup LLCppg + - name: Set up Python environment (for Python packages) + run: | + echo "GOTOOLCHAIN=go1.24.5" >> $GITHUB_ENV + # 保存原始Python环境变量 + echo "ORIGINAL_PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "ORIGINAL_PATH=$PATH" >> $GITHUB_ENV + # 设置LLGo Python环境 + export PYTHONHOME=$LLGO_ROOT/python + export PATH=$PYTHONHOME/bin:$PATH + export DYLD_LIBRARY_PATH=$PYTHONHOME/lib + export PKG_CONFIG_PATH=$PYTHONHOME/lib/pkgconfig + echo "PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV + echo "PATH=$PATH" >> $GITHUB_ENV + echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" >> $GITHUB_ENV + echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH" >> $GITHUB_ENV + - name: Setup LLPyg (for Python packages) + working-directory: .llpyg + run: | + cd _xtool + llgo install ./... + cd .. + go install -v ./cmd/... + - name: Setup LLCppg (for C++ packages) working-directory: .llcppg run: | llgo install ./_xtool/llcppsymg llgo install ./_xtool/llcppsigfetch - go install github.com/goplus/llcppg/cmd/llcppcfg@${{matrix.llcppg}} - go install github.com/goplus/llcppg/cmd/gogensig@${{matrix.llcppg}} - go install github.com/goplus/llcppg/cmd/llcppg@${{matrix.llcppg}} + go install github.com/goplus/llcppg/cmd/llcppcfg@v0.7.3 + go install github.com/goplus/llcppg/cmd/gogensig@v0.7.3 + go install github.com/goplus/llcppg/cmd/llcppg@v0.7.3 - name: Get changed files id: changed-files uses: tj-actions/changed-files@v45 - - name: Checkout to website + with: + files: | + **/llpkg.cfg + **/llpyg.cfg + **/llcppg.cfg + **/*.go + - name: Debug changed files + run: | + echo "Changed files: ${{ steps.changed-files.outputs.all_changed_files }}" + echo "Any changed: ${{ steps.changed-files.outputs.any_changed }}" + echo "All changed: ${{ steps.changed-files.outputs.all_changed }}" + + - name: Detect package types and prepare for verification + run: | + echo "=== Package Type Detection and Preparation ===" + echo "Current working directory: $(pwd)" + echo "Directory contents:" + ls -la + + # 初始化包列表 + PYTHON_PACKAGES="" + CPP_PACKAGES="" + + # 检查根目录下的包 + echo "=== Checking root directory packages ===" + for dir in */; do + if [ -d "$dir" ] && [ -f "$dir/llpkg.cfg" ]; then + package_name=$(basename "$dir") + # 跳过非包目录 + if [[ ! "$package_name" =~ ^(py|\.github|\.git|public|\.DS_Store)$ ]]; then + echo "Found package in root: $package_name" + # 检查包类型 + PACKAGE_TYPE=$(grep -o '"type":\s*"[^"]*"' "$dir/llpkg.cfg" | cut -d'"' -f4 || echo "") + if [ "$PACKAGE_TYPE" = "python" ]; then + echo " -> Python package" + PYTHON_PACKAGES="$PYTHON_PACKAGES $package_name" + else + echo " -> C++ package (default)" + CPP_PACKAGES="$CPP_PACKAGES $package_name" + fi + fi + fi + done + + # 检查py目录下的Python包 + echo "=== Checking py directory packages ===" + if [ -d "py" ]; then + for subdir in py/*/; do + if [ -d "$subdir" ] && [ -f "$subdir/llpkg.cfg" ]; then + package_name=$(basename "$subdir") + echo "Found Python package in py: $package_name" + # 验证确实是Python包 + PACKAGE_TYPE=$(grep -o '"type":\s*"[^"]*"' "$subdir/llpkg.cfg" | cut -d'"' -f4 || echo "") + if [ "$PACKAGE_TYPE" = "python" ]; then + echo " -> Confirmed Python package" + PYTHON_PACKAGES="$PYTHON_PACKAGES $package_name" + else + echo " -> Warning: Package in py/ but not marked as python type" + fi + fi + done + else + echo "py directory does not exist" + fi + + # 合并所有包列表 + PACKAGES_TO_VERIFY="$CPP_PACKAGES $PYTHON_PACKAGES" + + echo "=== Package Summary ===" + echo "Python packages: $PYTHON_PACKAGES" + echo "C++ packages: $CPP_PACKAGES" + echo "All packages to verify: $PACKAGES_TO_VERIFY" + + # 设置环境变量供后续步骤使用 + echo "PACKAGES_TO_VERIFY=$PACKAGES_TO_VERIFY" >> $GITHUB_ENV + echo "PYTHON_PACKAGES=$PYTHON_PACKAGES" >> $GITHUB_ENV + echo "CPP_PACKAGES=$CPP_PACKAGES" >> $GITHUB_ENV + + # 为Python包创建符号链接以便llpkgstore verification能够找到 + if [ -n "$PYTHON_PACKAGES" ]; then + echo "=== Creating symlinks for Python packages ===" + for package in $PYTHON_PACKAGES; do + if [ -d "py/$package" ]; then + echo "Creating symlink for Python package: $package" + if [ -d "$package" ]; then + rm -rf "$package" + fi + ln -s "py/$package" "$package" + echo "Created symlink: $package -> py/$package" + fi + done + fi + + echo "=== Final directory structure ===" + ls -la + echo "=== Package verification preparation completed ===" + + - name: Checkout to website (for C++ packages) uses: actions/checkout@v4 with: ref: website path: .website - - name: Copy llpkgstore.json to root + - name: Copy llpkgstore.json to root (for C++ packages) continue-on-error: true run: | ls .website .website/public cp .website/public/llpkgstore.json . rm -rf .website + - name: Verification & Prebuilt env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} - run: llpkgstore verification + run: | + echo "Starting verification for packages: $PACKAGES_TO_VERIFY" + echo "Python packages: $PYTHON_PACKAGES" + echo "C++ packages: $CPP_PACKAGES" + + # 根据包类型执行相应的验证逻辑 + if [ -n "$PYTHON_PACKAGES" ]; then + echo "=== Verifying Python packages ===" + echo "Using Python environment for verification" + # Python包验证使用LLGo Python环境 + llpkgstore verification + fi + + if [ -n "$CPP_PACKAGES" ]; then + echo "=== Verifying C++ packages ===" + echo "Using C++ environment for verification" + # C++包验证使用系统环境 + # 临时恢复系统Python环境用于C++包验证 + if [ -n "$ORIGINAL_PYTHONHOME" ]; then + export PYTHONHOME="$ORIGINAL_PYTHONHOME" + else + unset PYTHONHOME + fi + # 恢复原始PATH,但保留必要的工具路径 + export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$PATH" + llpkgstore verification + fi + + echo "=== Verification completed ===" - name: Run demotest process env: LLPKG_PATH: ${{ env.LLPKG_PATH }} diff --git a/.gitignore b/.gitignore index 4985a527..672c2bb3 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,10 @@ -*.pc \ No newline at end of file +*.pc + +# macOS system files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db \ No newline at end of file diff --git a/README.md b/README.md index c9ef7c12..97313bd6 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ # llpkg -LLGo packages generated by llcppg +LLGo packages generated by llpyg diff --git a/bzip3/_demo/test/test.go b/bzip3/_demo/test/test.go deleted file mode 100644 index 2068b849..00000000 --- a/bzip3/_demo/test/test.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/goplus/lib/c" - zip "github.com/goplus/llpkg/bzip3" -) - -func main() { - fmt.Println(c.GoString(zip.Version())) - input := []byte("Hello, bzip3 compression!") - output := make([]byte, zip.Bound(uintptr(len(input)))) - outputSize := uintptr(len(output)) - - errCode := zip.Compress(1024*1024, &input[0], &output[0], uintptr(len(input)), &outputSize) - if errCode != zip.OK { - fmt.Println("Compression failed with error code:", errCode) - return - } - fmt.Println("Compression successful. Compressed size:", outputSize) - - decompressed := make([]byte, len(input)) - decompressedSize := uintptr(len(decompressed)) - errCode = zip.Decompress(&output[0], &decompressed[0], outputSize, &decompressedSize) - if errCode != zip.OK { - fmt.Println("Decompression failed with error code:", errCode) - return - } - fmt.Println("Decompression successful. Decompressed data:", string(decompressed)) -} diff --git a/bzip3/bzip3_autogen_link.go b/bzip3/bzip3_autogen_link.go deleted file mode 100644 index 033ac646..00000000 --- a/bzip3/bzip3_autogen_link.go +++ /dev/null @@ -1,5 +0,0 @@ -package bzip3 - -import _ "github.com/goplus/lib/c" - -const LLGoPackage string = "link: $(pkg-config --libs bzip3);" diff --git a/bzip3/go.mod b/bzip3/go.mod deleted file mode 100644 index 93fd945d..00000000 --- a/bzip3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/goplus/llpkg/bzip3 - -go 1.20 - -require github.com/goplus/lib v0.2.0 diff --git a/bzip3/go.sum b/bzip3/go.sum deleted file mode 100644 index 512980a5..00000000 --- a/bzip3/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/goplus/lib v0.2.0 h1:AjqkN1XK5H23wZMMlpaUYAMCDAdSBQ2NMFrLtSh7W4g= -github.com/goplus/lib v0.2.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/bzip3/libbz3.go b/bzip3/libbz3.go deleted file mode 100644 index 2c93f6ef..00000000 --- a/bzip3/libbz3.go +++ /dev/null @@ -1,188 +0,0 @@ -package bzip3 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const OK = 0 - -type State struct { - Unused [8]uint8 -} - -/** - * @brief Get bzip3 version. - */ -//go:linkname Version C.bz3_version -func Version() *c.Char - -/** - * @brief Get the last error number associated with a given state. - */ -// llgo:link (*State).LastError C.bz3_last_error -func (recv_ *State) LastError() c.Int8T { - return 0 -} - -/** - * @brief Return a user-readable message explaining the cause of the last error. - */ -// llgo:link (*State).Strerror C.bz3_strerror -func (recv_ *State) Strerror() *c.Char { - return nil -} - -/** - * @brief Construct a new block encoder state, which will encode blocks as big as the given block size. - * The decoder will be able to decode blocks at most as big as the given block size. - * Returns NULL in case allocation fails or the block size is not between 65K and 511M - */ -//go:linkname New C.bz3_new -func New(block_size c.Int32T) *State - -/** - * @brief Free the memory occupied by a block encoder state. - */ -// llgo:link (*State).Free C.bz3_free -func (recv_ *State) Free() { -} - -/** - * @brief Return the recommended size of the output buffer for the compression functions. - */ -//go:linkname Bound C.bz3_bound -func Bound(input_size c.SizeT) c.SizeT - -/** - * @brief Compress a frame. This function does not support parallelism - * by itself, consider using the low level `bz3_encode_blocks()` function instead. - * Using the low level API might provide better performance. - * Returns a bzip3 error code; BZ3_OK when the operation is successful. - * Make sure to set out_size to the size of the output buffer before the operation; - * out_size must be at least equal to `bz3_bound(in_size)'. - */ -//go:linkname Compress C.bz3_compress -func Compress(block_size c.Uint32T, in *c.Uint8T, out *c.Uint8T, in_size c.SizeT, out_size *c.SizeT) c.Int - -/** - * @brief Decompress a frame. This function does not support parallelism - * by itself, consider using the low level `bz3_decode_blocks()` function instead. - * Using the low level API might provide better performance. - * Returns a bzip3 error code; BZ3_OK when the operation is successful. - * Make sure to set out_size to the size of the output buffer before the operation. - */ -//go:linkname Decompress C.bz3_decompress -func Decompress(in *c.Uint8T, out *c.Uint8T, in_size c.SizeT, out_size *c.SizeT) c.Int - -/** - * @brief Calculate the minimal memory required for compression with the given block size. - * This includes all internal buffers and state structures. This calculates the amount of bytes - * that will be allocated by a call to `bz3_new()`. - * - * @details Memory allocation and usage patterns: - * - * bz3_new(): - * - Allocates all memory upfront: - * - Core state structure (sizeof(struct bz3_state)) - * - Swap buffer (bz3_bound(block_size) bytes) - * - SAIS array (BWT_BOUND(block_size) * sizeof(int32_t) bytes) - * - LZP lookup table ((1 << LZP_DICTIONARY) * sizeof(int32_t) bytes) - * - Compression state (sizeof(state)) - * - All memory remains allocated until bz3_free() - * - * Additional memory may be used depending on API used from here. - * - * # Low Level APIs - * - * 1. bz3_encode_block() / bz3_decode_block(): - * - Uses pre-allocated memory from bz3_new() - * - No additional memory allocation except for libsais (usually ~16KiB) - * - Peak memory usage of physical RAM varies with compression stages: - * - LZP: Uses LZP lookup table + swap buffer - * - BWT: Uses SAIS array + swap buffer - * - Entropy coding: Uses compression state (cm_state) + swap buffer - * - * Using the higher level API, `bz3_compress`, expect an additional allocation - * of `bz3_bound(block_size)`. - * - * In the parallel version `bz3_encode_blocks`, each thread gets its own state, - * so memory usage is `n_threads * bz3_compress_memory_needed()`. - * - * # High Level APIs - * - * 1. bz3_compress(): - * - Allocates additional temporary compression buffer (bz3_bound(block_size) bytes) - * in addition to the memory amount returned by this method call and libsais. - * - Everything is freed after compression completes - * - * 2. bz3_decompress(): - * - Allocates additional temporary compression buffer (bz3_bound(block_size) bytes) - * in addition to the memory amount returned by this method call and libsais. - * - Everything is freed after compression completes - * - * Memory remains constant during operation, with except of some small allocations from libsais during - * BWT stage. That is not accounted by this function, though it usually amounts to ~16KiB, negligible. - * The worst case of BWT is 2*block_size technically speaking. - * - * No dynamic (re)allocation occurs outside of that. - * - * @param block_size The block size to be used for compression - * @return The total number of bytes required for compression, or 0 if block_size is invalid - */ -//go:linkname MinMemoryNeeded C.bz3_min_memory_needed -func MinMemoryNeeded(block_size c.Int32T) c.SizeT - -/** - * @brief Encode a single block. Returns the amount of bytes written to `buffer'. - * `buffer' must be able to hold at least `bz3_bound(size)' bytes. The size must not - * exceed the block size associated with the state. - */ -// llgo:link (*State).EncodeBlock C.bz3_encode_block -func (recv_ *State) EncodeBlock(buffer *c.Uint8T, size c.Int32T) c.Int32T { - return 0 -} - -/** - * @brief Decode a single block. - * - * `buffer' must be able to hold at least `bz3_bound(orig_size)' bytes - * in order to ensure decompression will succeed for all possible bzip3 blocks. - * - * In most (but not all) cases, `orig_size` should usually be sufficient. - * If it is not sufficient, you must allocate a buffer of size `bz3_bound(orig_size)` temporarily. - * - * If `buffer_size` is too small, `BZ3_ERR_DATA_SIZE_TOO_SMALL` will be returned. - * The size must not exceed the block size associated with the state. - * - * @param buffer_size The size of the buffer at 'buffer' - * @param compressed_size The size of the compressed data in 'buffer' - * @param orig_size The original size of the data before compression. - */ -// llgo:link (*State).DecodeBlock C.bz3_decode_block -func (recv_ *State) DecodeBlock(buffer *c.Uint8T, buffer_size c.SizeT, compressed_size c.Int32T, orig_size c.Int32T) c.Int32T { - return 0 -} - -/** - * @brief Check if using original file size as buffer size is sufficient for decompressing - * a block at `block` pointer. - * - * @param block Pointer to the compressed block data - * @param block_size Size of the block buffer in bytes (must be at least 13 bytes for header) - * @param orig_size Size of the original uncompressed data - * @return 1 if original size is sufficient, 0 if insufficient, -1 on header error (insufficient buffer size) - * - * @remarks - * - * This function is useful for external APIs using the low level block encoding API, - * `bz3_encode_block`. You would normally call this directly after `bz3_encode_block` - * on the block that has been output. - * - * The purpose of this function is to prevent encoding blocks that would require an additional - * malloc at decompress time. - * The goal is to prevent erroring with `BZ3_ERR_DATA_SIZE_TOO_SMALL`, thus - * in turn - */ -//go:linkname OrigSizeSufficientForDecode C.bz3_orig_size_sufficient_for_decode -func OrigSizeSufficientForDecode(block *c.Uint8T, block_size c.SizeT, orig_size c.Int32T) c.Int diff --git a/bzip3/llcppg.cfg b/bzip3/llcppg.cfg deleted file mode 100644 index 2d1240e0..00000000 --- a/bzip3/llcppg.cfg +++ /dev/null @@ -1,22 +0,0 @@ -{ - "name": "bzip3", - "cflags": "$(pkg-config --cflags bzip3)", - "libs": "$(pkg-config --libs bzip3)", - "include": [ - "libbz3.h" - ], - "trimPrefixes": ["bz3_","BZ3_"], - "cplusplus": false, - "deps": [], - "keepUnderScore": false, - "impl": [ - { - "files": [], - "cond": { - "os": [], - "arch": [] - } - } - ], - "mix": false -} diff --git a/bzip3/llcppg.pub b/bzip3/llcppg.pub deleted file mode 100644 index 42f9c942..00000000 --- a/bzip3/llcppg.pub +++ /dev/null @@ -1 +0,0 @@ -bz3_state State \ No newline at end of file diff --git a/bzip3/llpkg.cfg b/bzip3/llpkg.cfg deleted file mode 100644 index db765289..00000000 --- a/bzip3/llpkg.cfg +++ /dev/null @@ -1,8 +0,0 @@ -{ - "upstream": { - "package": { - "name": "bzip3", - "version": "1.5.1" - } - } -} \ No newline at end of file diff --git a/libtool/_demo/test/test.go b/libtool/_demo/test/test.go deleted file mode 100644 index 40b9f308..00000000 --- a/libtool/_demo/test/test.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libtool" -) - -func main() { - fmt.Println("Simple libtool demonstration") - - // Initialize libtool - ret := libtool.Dlinit() - if ret != 0 { - fmt.Println("Failed to initialize libtool:", c.GoString(libtool.Dlerror())) - return - } - fmt.Println("Successfully initialized libtool") - - // Try to load a common library (libc) - libName := "libc.so.6" // Linux style - handle := libtool.Dlopen(c.Str(libName)) - if handle == nil { - libName = "libc.dylib" // macOS style - handle = libtool.Dlopen(c.Str(libName)) - } - if handle == nil { - libName = "c" // Generic style - handle = libtool.Dlopen(c.Str(libName)) - } - - if handle != nil { - fmt.Printf("Successfully opened %s\n", libName) - - // Try to find a common function (printf) - symPtr := libtool.Dlsym(handle, c.Str("printf")) - if symPtr != nil { - fmt.Println("Found 'printf' function") - } else { - fmt.Println("Symbol 'printf' not found:", c.GoString(libtool.Dlerror())) - } - - // Close the library - libtool.Dlclose(handle) - fmt.Println("Closed library") - } else { - fmt.Println("Could not open any standard library:", c.GoString(libtool.Dlerror())) - } - - // Clean up libtool - libtool.Dlexit() - fmt.Println("Successfully cleaned up libtool") -} diff --git a/libtool/go.mod b/libtool/go.mod deleted file mode 100644 index c8b6d58e..00000000 --- a/libtool/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/goplus/llpkg/libtool - -go 1.20 - -require github.com/goplus/lib v0.2.0 diff --git a/libtool/go.sum b/libtool/go.sum deleted file mode 100644 index 512980a5..00000000 --- a/libtool/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/goplus/lib v0.2.0 h1:AjqkN1XK5H23wZMMlpaUYAMCDAdSBQ2NMFrLtSh7W4g= -github.com/goplus/lib v0.2.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/libtool/libtool_autogen_link.go b/libtool/libtool_autogen_link.go deleted file mode 100644 index 60f6280d..00000000 --- a/libtool/libtool_autogen_link.go +++ /dev/null @@ -1,5 +0,0 @@ -package libtool - -import _ "github.com/goplus/lib/c" - -const LLGoPackage string = "link: $(pkg-config --libs libtool);" diff --git a/libtool/llcppg.cfg b/libtool/llcppg.cfg deleted file mode 100644 index f99f38b7..00000000 --- a/libtool/llcppg.cfg +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "libtool", - "cflags": "$(pkg-config --cflags libtool)", - "libs": "$(pkg-config --libs libtool)", - "include": [ - "ltdl.h", - "libltdl/lt_dlloader.h", - "libltdl/lt_error.h", - "libltdl/lt_system.h" - ], - "trimPrefixes": ["lt__","lt_", "LT_"], - "cplusplus": false, - "deps": [], - "keepUnderScore": false, - "impl": [ - { - "files": [], - "cond": { - "os": [], - "arch": [] - } - } - ], - "mix": false -} diff --git a/libtool/llcppg.pub b/libtool/llcppg.pub deleted file mode 100644 index 4bd09b5a..00000000 --- a/libtool/llcppg.pub +++ /dev/null @@ -1,20 +0,0 @@ -lt__advise Advise -lt__handle Handle -lt_dladvise Dladvise -lt_dlhandle Dlhandle -lt_dlhandle_interface DlhandleInterface -lt_dlinfo Dlinfo -lt_dlinterface_id DlinterfaceId -lt_dlloader Dlloader -lt_dlloader_exit DlloaderExit -lt_dlloader_init DlloaderInit -lt_dlloader_priority DlloaderPriority -lt_dlpreload_callback_func DlpreloadCallbackFunc -lt_dlsymlist Dlsymlist -lt_dlvtable Dlvtable -lt_find_sym FindSym -lt_get_vtable GetVtable -lt_module Module -lt_module_close ModuleClose -lt_module_open ModuleOpen -lt_user_data UserData \ No newline at end of file diff --git a/libtool/llpkg.cfg b/libtool/llpkg.cfg deleted file mode 100644 index 0dd40066..00000000 --- a/libtool/llpkg.cfg +++ /dev/null @@ -1,8 +0,0 @@ -{ - "upstream": { - "package": { - "name": "libtool", - "version": "2.4.7" - } - } -} \ No newline at end of file diff --git a/libtool/lt_dlloader.go b/libtool/lt_dlloader.go deleted file mode 100644 index c0a6aaef..00000000 --- a/libtool/lt_dlloader.go +++ /dev/null @@ -1,75 +0,0 @@ -package libtool - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const DLLOADER_H = 1 - -type Dlloader c.Pointer -type Module c.Pointer -type UserData c.Pointer - -type Advise struct { - Unused [8]uint8 -} -type Dladvise *Advise - -// llgo:type C -type ModuleOpen func(UserData, *c.Char, Dladvise) Module - -// llgo:type C -type ModuleClose func(UserData, Module) c.Int - -// llgo:type C -type FindSym func(UserData, Module, *c.Char) c.Pointer - -// llgo:type C -type DlloaderInit func(UserData) c.Int - -// llgo:type C -type DlloaderExit func(UserData) c.Int -type DlloaderPriority c.Int - -const ( - DLLOADER_PREPEND DlloaderPriority = 0 - DLLOADER_APPEND DlloaderPriority = 1 -) - -/* -This structure defines a module loader, as populated by the get_vtable - - entry point of each loader. -*/ -type Dlvtable struct { - Name *c.Char - SymPrefix *c.Char - ModuleOpen *ModuleOpen - ModuleClose *ModuleClose - FindSym *FindSym - DlloaderInit *DlloaderInit - DlloaderExit *DlloaderExit - DlloaderData UserData - Priority DlloaderPriority -} - -// llgo:link (*Dlvtable).DlloaderAdd C.lt_dlloader_add -func (recv_ *Dlvtable) DlloaderAdd() c.Int { - return 0 -} - -//go:linkname DlloaderNext C.lt_dlloader_next -func DlloaderNext(loader Dlloader) Dlloader - -//go:linkname DlloaderRemove C.lt_dlloader_remove -func DlloaderRemove(name *c.Char) *Dlvtable - -//go:linkname DlloaderFind C.lt_dlloader_find -func DlloaderFind(name *c.Char) *Dlvtable - -//go:linkname DlloaderGet C.lt_dlloader_get -func DlloaderGet(loader Dlloader) *Dlvtable - -// llgo:type C -type GetVtable func(UserData) *Dlvtable diff --git a/libtool/lt_error.go b/libtool/lt_error.go deleted file mode 100644 index 4e4bc685..00000000 --- a/libtool/lt_error.go +++ /dev/null @@ -1,38 +0,0 @@ -package libtool - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const ERROR_H = 1 -const ( - ERROR_UNKNOWN c.Int = 0 - ERROR_DLOPEN_NOT_SUPPORTED c.Int = 1 - ERROR_INVALID_LOADER c.Int = 2 - ERROR_INIT_LOADER c.Int = 3 - ERROR_REMOVE_LOADER c.Int = 4 - ERROR_FILE_NOT_FOUND c.Int = 5 - ERROR_DEPLIB_NOT_FOUND c.Int = 6 - ERROR_NO_SYMBOLS c.Int = 7 - ERROR_CANNOT_OPEN c.Int = 8 - ERROR_CANNOT_CLOSE c.Int = 9 - ERROR_SYMBOL_NOT_FOUND c.Int = 10 - ERROR_NO_MEMORY c.Int = 11 - ERROR_INVALID_HANDLE c.Int = 12 - ERROR_BUFFER_OVERFLOW c.Int = 13 - ERROR_INVALID_ERRORCODE c.Int = 14 - ERROR_SHUTDOWN c.Int = 15 - ERROR_CLOSE_RESIDENT_MODULE c.Int = 16 - ERROR_INVALID_MUTEX_ARGS c.Int = 17 - ERROR_INVALID_POSITION c.Int = 18 - ERROR_CONFLICTING_FLAGS c.Int = 19 - ERROR_MAX c.Int = 20 -) - -/* These functions are only useful from inside custom module loaders. */ -//go:linkname Dladderror C.lt_dladderror -func Dladderror(diagnostic *c.Char) c.Int - -//go:linkname Dlseterror C.lt_dlseterror -func Dlseterror(errorcode c.Int) c.Int diff --git a/libtool/lt_system.go b/libtool/lt_system.go deleted file mode 100644 index 88694fdc..00000000 --- a/libtool/lt_system.go +++ /dev/null @@ -1,8 +0,0 @@ -package libtool - -import _ "unsafe" - -const SYSTEM_H = 1 -const FILENAME_MAX = 2048 -const PATHSEP_CHAR = ":" -const READTEXT_MODE = "r" diff --git a/libtool/ltdl.go b/libtool/ltdl.go deleted file mode 100644 index 55ddea2f..00000000 --- a/libtool/ltdl.go +++ /dev/null @@ -1,150 +0,0 @@ -package libtool - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const LTDL_H = 1 - -type Handle struct { - Unused [8]uint8 -} -type Dlhandle *Handle - -/* Initialisation and finalisation functions for libltdl. */ -//go:linkname Dlinit C.lt_dlinit -func Dlinit() c.Int - -//go:linkname Dlexit C.lt_dlexit -func Dlexit() c.Int - -/* Module search path manipulation. */ -//go:linkname Dladdsearchdir C.lt_dladdsearchdir -func Dladdsearchdir(search_dir *c.Char) c.Int - -//go:linkname Dlinsertsearchdir C.lt_dlinsertsearchdir -func Dlinsertsearchdir(before *c.Char, search_dir *c.Char) c.Int - -//go:linkname Dlsetsearchpath C.lt_dlsetsearchpath -func Dlsetsearchpath(search_path *c.Char) c.Int - -//go:linkname Dlgetsearchpath C.lt_dlgetsearchpath -func Dlgetsearchpath() *c.Char - -//go:linkname Dlforeachfile C.lt_dlforeachfile -func Dlforeachfile(search_path *c.Char, func_ func(*c.Char, c.Pointer) c.Int, data c.Pointer) c.Int - -/* User module loading advisors. */ -//go:linkname DladviseInit C.lt_dladvise_init -func DladviseInit(advise *Dladvise) c.Int - -//go:linkname DladviseDestroy C.lt_dladvise_destroy -func DladviseDestroy(advise *Dladvise) c.Int - -//go:linkname DladviseExt C.lt_dladvise_ext -func DladviseExt(advise *Dladvise) c.Int - -//go:linkname DladviseResident C.lt_dladvise_resident -func DladviseResident(advise *Dladvise) c.Int - -//go:linkname DladviseLocal C.lt_dladvise_local -func DladviseLocal(advise *Dladvise) c.Int - -//go:linkname DladviseGlobal C.lt_dladvise_global -func DladviseGlobal(advise *Dladvise) c.Int - -//go:linkname DladvisePreload C.lt_dladvise_preload -func DladvisePreload(advise *Dladvise) c.Int - -/* Portable libltdl versions of the system dlopen() API. */ -//go:linkname Dlopen C.lt_dlopen -func Dlopen(filename *c.Char) Dlhandle - -//go:linkname Dlopenext C.lt_dlopenext -func Dlopenext(filename *c.Char) Dlhandle - -//go:linkname Dlopenadvise C.lt_dlopenadvise -func Dlopenadvise(filename *c.Char, advise Dladvise) Dlhandle - -//go:linkname Dlsym C.lt_dlsym -func Dlsym(handle Dlhandle, name *c.Char) c.Pointer - -//go:linkname Dlerror C.lt_dlerror -func Dlerror() *c.Char - -//go:linkname Dlclose C.lt_dlclose -func Dlclose(handle Dlhandle) c.Int - -/* -A preopened symbol. Arrays of this type comprise the exported - - symbols for a dlpreopened module. -*/ -type Dlsymlist struct { - Name *c.Char - Address c.Pointer -} - -// llgo:type C -type DlpreloadCallbackFunc func(Dlhandle) c.Int - -// llgo:link (*Dlsymlist).Dlpreload C.lt_dlpreload -func (recv_ *Dlsymlist) Dlpreload() c.Int { - return 0 -} - -// llgo:link (*Dlsymlist).DlpreloadDefault C.lt_dlpreload_default -func (recv_ *Dlsymlist) DlpreloadDefault() c.Int { - return 0 -} - -//go:linkname DlpreloadOpen C.lt_dlpreload_open -func DlpreloadOpen(originator *c.Char, func_ DlpreloadCallbackFunc) c.Int - -type DlinterfaceId c.Pointer - -// llgo:type C -type DlhandleInterface func(Dlhandle, *c.Char) c.Int - -//go:linkname DlinterfaceRegister C.lt_dlinterface_register -func DlinterfaceRegister(id_string *c.Char, iface DlhandleInterface) DlinterfaceId - -//go:linkname DlinterfaceFree C.lt_dlinterface_free -func DlinterfaceFree(key DlinterfaceId) - -//go:linkname DlcallerSetData C.lt_dlcaller_set_data -func DlcallerSetData(key DlinterfaceId, handle Dlhandle, data c.Pointer) c.Pointer - -//go:linkname DlcallerGetData C.lt_dlcaller_get_data -func DlcallerGetData(key DlinterfaceId, handle Dlhandle) c.Pointer - -/* Read only information pertaining to a loaded module. */ - -type Dlinfo struct { - Filename *c.Char - Name *c.Char - RefCount c.Int - IsResident c.Uint - IsSymglobal c.Uint - IsSymlocal c.Uint -} - -//go:linkname Dlgetinfo C.lt_dlgetinfo -func Dlgetinfo(handle Dlhandle) *Dlinfo - -//go:linkname DlhandleIterate C.lt_dlhandle_iterate -func DlhandleIterate(iface DlinterfaceId, place Dlhandle) Dlhandle - -//go:linkname DlhandleFetch C.lt_dlhandle_fetch -func DlhandleFetch(iface DlinterfaceId, module_name *c.Char) Dlhandle - -//go:linkname DlhandleMap C.lt_dlhandle_map -func DlhandleMap(iface DlinterfaceId, func_ func(Dlhandle, c.Pointer) c.Int, data c.Pointer) c.Int - -/* Deprecated module residency management API. */ -//go:linkname Dlmakeresident C.lt_dlmakeresident -func Dlmakeresident(handle Dlhandle) c.Int - -//go:linkname Dlisresident C.lt_dlisresident -func Dlisresident(handle Dlhandle) c.Int diff --git a/libxml2/HTMLparser.go b/libxml2/HTMLparser.go deleted file mode 100644 index 3f27ce49..00000000 --- a/libxml2/HTMLparser.go +++ /dev/null @@ -1,226 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type HtmlParserCtxt ParserCtxt -type HtmlParserCtxtPtr ParserCtxtPtr -type HtmlParserNodeInfo ParserNodeInfo -type HtmlSAXHandler SAXHandler -type HtmlSAXHandlerPtr SAXHandlerPtr -type HtmlParserInput ParserInput -type HtmlParserInputPtr ParserInputPtr -type HtmlDocPtr DocPtr -type HtmlNodePtr NodePtr - -type X_htmlElemDesc struct { - Name *c.Char - StartTag c.Char - EndTag c.Char - SaveEndTag c.Char - Empty c.Char - Depr c.Char - Dtd c.Char - Isinline c.Char - Desc *c.Char - Subelts **c.Char - Defaultsubelt *c.Char - AttrsOpt **c.Char - AttrsDepr **c.Char - AttrsReq **c.Char -} -type HtmlElemDesc X_htmlElemDesc -type HtmlElemDescPtr *HtmlElemDesc - -type X_htmlEntityDesc struct { - Value c.Uint - Name *c.Char - Desc *c.Char -} -type HtmlEntityDesc X_htmlEntityDesc -type HtmlEntityDescPtr *HtmlEntityDesc - -//go:linkname X__htmlDefaultSAXHandler C.__htmlDefaultSAXHandler -func X__htmlDefaultSAXHandler() *SAXHandlerV1 - -/* - * There is only few public functions. - */ -//go:linkname HtmlInitAutoClose C.htmlInitAutoClose -func HtmlInitAutoClose() - -// llgo:link (*Char).HtmlTagLookup C.htmlTagLookup -func (recv_ *Char) HtmlTagLookup() *HtmlElemDesc { - return nil -} - -// llgo:link (*Char).HtmlEntityLookup C.htmlEntityLookup -func (recv_ *Char) HtmlEntityLookup() *HtmlEntityDesc { - return nil -} - -//go:linkname HtmlEntityValueLookup C.htmlEntityValueLookup -func HtmlEntityValueLookup(value c.Uint) *HtmlEntityDesc - -//go:linkname HtmlIsAutoClosed C.htmlIsAutoClosed -func HtmlIsAutoClosed(doc HtmlDocPtr, elem HtmlNodePtr) c.Int - -//go:linkname HtmlAutoCloseTag C.htmlAutoCloseTag -func HtmlAutoCloseTag(doc HtmlDocPtr, name *Char, elem HtmlNodePtr) c.Int - -//go:linkname HtmlParseEntityRef C.htmlParseEntityRef -func HtmlParseEntityRef(ctxt HtmlParserCtxtPtr, str **Char) *HtmlEntityDesc - -//go:linkname HtmlParseCharRef C.htmlParseCharRef -func HtmlParseCharRef(ctxt HtmlParserCtxtPtr) c.Int - -//go:linkname HtmlParseElement C.htmlParseElement -func HtmlParseElement(ctxt HtmlParserCtxtPtr) - -//go:linkname HtmlNewParserCtxt C.htmlNewParserCtxt -func HtmlNewParserCtxt() HtmlParserCtxtPtr - -// llgo:link (*HtmlSAXHandler).HtmlNewSAXParserCtxt C.htmlNewSAXParserCtxt -func (recv_ *HtmlSAXHandler) HtmlNewSAXParserCtxt(userData c.Pointer) HtmlParserCtxtPtr { - return nil -} - -//go:linkname HtmlCreateMemoryParserCtxt C.htmlCreateMemoryParserCtxt -func HtmlCreateMemoryParserCtxt(buffer *c.Char, size c.Int) HtmlParserCtxtPtr - -//go:linkname HtmlParseDocument C.htmlParseDocument -func HtmlParseDocument(ctxt HtmlParserCtxtPtr) c.Int - -// llgo:link (*Char).HtmlSAXParseDoc C.htmlSAXParseDoc -func (recv_ *Char) HtmlSAXParseDoc(encoding *c.Char, sax HtmlSAXHandlerPtr, userData c.Pointer) HtmlDocPtr { - return nil -} - -// llgo:link (*Char).HtmlParseDoc C.htmlParseDoc -func (recv_ *Char) HtmlParseDoc(encoding *c.Char) HtmlDocPtr { - return nil -} - -//go:linkname HtmlCreateFileParserCtxt C.htmlCreateFileParserCtxt -func HtmlCreateFileParserCtxt(filename *c.Char, encoding *c.Char) HtmlParserCtxtPtr - -//go:linkname HtmlSAXParseFile C.htmlSAXParseFile -func HtmlSAXParseFile(filename *c.Char, encoding *c.Char, sax HtmlSAXHandlerPtr, userData c.Pointer) HtmlDocPtr - -//go:linkname HtmlParseFile C.htmlParseFile -func HtmlParseFile(filename *c.Char, encoding *c.Char) HtmlDocPtr - -//go:linkname UTF8ToHtml C.UTF8ToHtml -func UTF8ToHtml(out *c.Char, outlen *c.Int, in *c.Char, inlen *c.Int) c.Int - -//go:linkname HtmlEncodeEntities C.htmlEncodeEntities -func HtmlEncodeEntities(out *c.Char, outlen *c.Int, in *c.Char, inlen *c.Int, quoteChar c.Int) c.Int - -// llgo:link (*Char).HtmlIsScriptAttribute C.htmlIsScriptAttribute -func (recv_ *Char) HtmlIsScriptAttribute() c.Int { - return 0 -} - -//go:linkname HtmlHandleOmittedElem C.htmlHandleOmittedElem -func HtmlHandleOmittedElem(val c.Int) c.Int - -/** - * Interfaces for the Push mode. - */ -//go:linkname HtmlCreatePushParserCtxt C.htmlCreatePushParserCtxt -func HtmlCreatePushParserCtxt(sax HtmlSAXHandlerPtr, user_data c.Pointer, chunk *c.Char, size c.Int, filename *c.Char, enc CharEncoding) HtmlParserCtxtPtr - -//go:linkname HtmlParseChunk C.htmlParseChunk -func HtmlParseChunk(ctxt HtmlParserCtxtPtr, chunk *c.Char, size c.Int, terminate c.Int) c.Int - -//go:linkname HtmlFreeParserCtxt C.htmlFreeParserCtxt -func HtmlFreeParserCtxt(ctxt HtmlParserCtxtPtr) - -type HtmlParserOption c.Int - -const ( - HTML_PARSE_RECOVER HtmlParserOption = 1 - HTML_PARSE_NODEFDTD HtmlParserOption = 4 - HTML_PARSE_NOERROR HtmlParserOption = 32 - HTML_PARSE_NOWARNING HtmlParserOption = 64 - HTML_PARSE_PEDANTIC HtmlParserOption = 128 - HTML_PARSE_NOBLANKS HtmlParserOption = 256 - HTML_PARSE_NONET HtmlParserOption = 2048 - HTML_PARSE_NOIMPLIED HtmlParserOption = 8192 - HTML_PARSE_COMPACT HtmlParserOption = 65536 - HTML_PARSE_IGNORE_ENC HtmlParserOption = 2097152 -) - -//go:linkname HtmlCtxtReset C.htmlCtxtReset -func HtmlCtxtReset(ctxt HtmlParserCtxtPtr) - -//go:linkname HtmlCtxtUseOptions C.htmlCtxtUseOptions -func HtmlCtxtUseOptions(ctxt HtmlParserCtxtPtr, options c.Int) c.Int - -// llgo:link (*Char).HtmlReadDoc C.htmlReadDoc -func (recv_ *Char) HtmlReadDoc(URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr { - return nil -} - -//go:linkname HtmlReadFile C.htmlReadFile -func HtmlReadFile(URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlReadMemory C.htmlReadMemory -func HtmlReadMemory(buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlReadFd C.htmlReadFd -func HtmlReadFd(fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlReadIO C.htmlReadIO -func HtmlReadIO(ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlCtxtParseDocument C.htmlCtxtParseDocument -func HtmlCtxtParseDocument(ctxt HtmlParserCtxtPtr, input ParserInputPtr) HtmlDocPtr - -//go:linkname HtmlCtxtReadDoc C.htmlCtxtReadDoc -func HtmlCtxtReadDoc(ctxt ParserCtxtPtr, cur *Char, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlCtxtReadFile C.htmlCtxtReadFile -func HtmlCtxtReadFile(ctxt ParserCtxtPtr, filename *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlCtxtReadMemory C.htmlCtxtReadMemory -func HtmlCtxtReadMemory(ctxt ParserCtxtPtr, buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlCtxtReadFd C.htmlCtxtReadFd -func HtmlCtxtReadFd(ctxt ParserCtxtPtr, fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -//go:linkname HtmlCtxtReadIO C.htmlCtxtReadIO -func HtmlCtxtReadIO(ctxt ParserCtxtPtr, ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) HtmlDocPtr - -type HtmlStatus c.Int - -const ( - HTML_NA HtmlStatus = 0 - HTML_INVALID HtmlStatus = 1 - HTML_DEPRECATED HtmlStatus = 2 - HTML_VALID HtmlStatus = 4 - HTML_REQUIRED HtmlStatus = 12 -) - -/* Using htmlElemDesc rather than name here, to emphasise the fact - that otherwise there's a lookup overhead -*/ -// llgo:link (*HtmlElemDesc).HtmlAttrAllowed C.htmlAttrAllowed -func (recv_ *HtmlElemDesc) HtmlAttrAllowed(*Char, c.Int) HtmlStatus { - return 0 -} - -// llgo:link (*HtmlElemDesc).HtmlElementAllowedHere C.htmlElementAllowedHere -func (recv_ *HtmlElemDesc) HtmlElementAllowedHere(*Char) c.Int { - return 0 -} - -// llgo:link (*HtmlElemDesc).HtmlElementStatusHere C.htmlElementStatusHere -func (recv_ *HtmlElemDesc) HtmlElementStatusHere(*HtmlElemDesc) HtmlStatus { - return 0 -} - -//go:linkname HtmlNodeStatus C.htmlNodeStatus -func HtmlNodeStatus(HtmlNodePtr, c.Int) HtmlStatus diff --git a/libxml2/HTMLtree.go b/libxml2/HTMLtree.go deleted file mode 100644 index 0e032cec..00000000 --- a/libxml2/HTMLtree.go +++ /dev/null @@ -1,66 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -// llgo:link (*Char).HtmlNewDoc C.htmlNewDoc -func (recv_ *Char) HtmlNewDoc(ExternalID *Char) HtmlDocPtr { - return nil -} - -// llgo:link (*Char).HtmlNewDocNoDtD C.htmlNewDocNoDtD -func (recv_ *Char) HtmlNewDocNoDtD(ExternalID *Char) HtmlDocPtr { - return nil -} - -//go:linkname HtmlGetMetaEncoding C.htmlGetMetaEncoding -func HtmlGetMetaEncoding(doc HtmlDocPtr) *Char - -//go:linkname HtmlSetMetaEncoding C.htmlSetMetaEncoding -func HtmlSetMetaEncoding(doc HtmlDocPtr, encoding *Char) c.Int - -//go:linkname HtmlDocDumpMemory C.htmlDocDumpMemory -func HtmlDocDumpMemory(cur DocPtr, mem **Char, size *c.Int) - -//go:linkname HtmlDocDumpMemoryFormat C.htmlDocDumpMemoryFormat -func HtmlDocDumpMemoryFormat(cur DocPtr, mem **Char, size *c.Int, format c.Int) - -//go:linkname HtmlDocDump C.htmlDocDump -func HtmlDocDump(f *c.FILE, cur DocPtr) c.Int - -//go:linkname HtmlSaveFile C.htmlSaveFile -func HtmlSaveFile(filename *c.Char, cur DocPtr) c.Int - -//go:linkname HtmlNodeDump C.htmlNodeDump -func HtmlNodeDump(buf BufferPtr, doc DocPtr, cur NodePtr) c.Int - -//go:linkname HtmlNodeDumpFile C.htmlNodeDumpFile -func HtmlNodeDumpFile(out *c.FILE, doc DocPtr, cur NodePtr) - -//go:linkname HtmlNodeDumpFileFormat C.htmlNodeDumpFileFormat -func HtmlNodeDumpFileFormat(out *c.FILE, doc DocPtr, cur NodePtr, encoding *c.Char, format c.Int) c.Int - -//go:linkname HtmlSaveFileEnc C.htmlSaveFileEnc -func HtmlSaveFileEnc(filename *c.Char, cur DocPtr, encoding *c.Char) c.Int - -//go:linkname HtmlSaveFileFormat C.htmlSaveFileFormat -func HtmlSaveFileFormat(filename *c.Char, cur DocPtr, encoding *c.Char, format c.Int) c.Int - -//go:linkname HtmlNodeDumpFormatOutput C.htmlNodeDumpFormatOutput -func HtmlNodeDumpFormatOutput(buf OutputBufferPtr, doc DocPtr, cur NodePtr, encoding *c.Char, format c.Int) - -//go:linkname HtmlDocContentDumpOutput C.htmlDocContentDumpOutput -func HtmlDocContentDumpOutput(buf OutputBufferPtr, cur DocPtr, encoding *c.Char) - -//go:linkname HtmlDocContentDumpFormatOutput C.htmlDocContentDumpFormatOutput -func HtmlDocContentDumpFormatOutput(buf OutputBufferPtr, cur DocPtr, encoding *c.Char, format c.Int) - -//go:linkname HtmlNodeDumpOutput C.htmlNodeDumpOutput -func HtmlNodeDumpOutput(buf OutputBufferPtr, doc DocPtr, cur NodePtr, encoding *c.Char) - -// llgo:link (*Char).HtmlIsBooleanAttr C.htmlIsBooleanAttr -func (recv_ *Char) HtmlIsBooleanAttr() c.Int { - return 0 -} diff --git a/libxml2/SAX.go b/libxml2/SAX.go deleted file mode 100644 index d61a63c4..00000000 --- a/libxml2/SAX.go +++ /dev/null @@ -1,116 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -//go:linkname GetPublicId C.getPublicId -func GetPublicId(ctx c.Pointer) *Char - -//go:linkname GetSystemId C.getSystemId -func GetSystemId(ctx c.Pointer) *Char - -//go:linkname SetDocumentLocator C.setDocumentLocator -func SetDocumentLocator(ctx c.Pointer, loc SAXLocatorPtr) - -//go:linkname GetLineNumber C.getLineNumber -func GetLineNumber(ctx c.Pointer) c.Int - -//go:linkname GetColumnNumber C.getColumnNumber -func GetColumnNumber(ctx c.Pointer) c.Int - -//go:linkname IsStandalone C.isStandalone -func IsStandalone(ctx c.Pointer) c.Int - -//go:linkname HasInternalSubset C.hasInternalSubset -func HasInternalSubset(ctx c.Pointer) c.Int - -//go:linkname HasExternalSubset C.hasExternalSubset -func HasExternalSubset(ctx c.Pointer) c.Int - -//go:linkname InternalSubset C.internalSubset -func InternalSubset(ctx c.Pointer, name *Char, ExternalID *Char, SystemID *Char) - -//go:linkname ExternalSubset C.externalSubset -func ExternalSubset(ctx c.Pointer, name *Char, ExternalID *Char, SystemID *Char) - -//go:linkname GetEntity C.getEntity -func GetEntity(ctx c.Pointer, name *Char) EntityPtr - -//go:linkname GetParameterEntity__1 C.getParameterEntity -func GetParameterEntity__1(ctx c.Pointer, name *Char) EntityPtr - -//go:linkname ResolveEntity C.resolveEntity -func ResolveEntity(ctx c.Pointer, publicId *Char, systemId *Char) ParserInputPtr - -//go:linkname EntityDecl C.entityDecl -func EntityDecl(ctx c.Pointer, name *Char, type_ c.Int, publicId *Char, systemId *Char, content *Char) - -//go:linkname AttributeDecl C.attributeDecl -func AttributeDecl(ctx c.Pointer, elem *Char, fullname *Char, type_ c.Int, def c.Int, defaultValue *Char, tree EnumerationPtr) - -//go:linkname ElementDecl C.elementDecl -func ElementDecl(ctx c.Pointer, name *Char, type_ c.Int, content ElementContentPtr) - -//go:linkname NotationDecl C.notationDecl -func NotationDecl(ctx c.Pointer, name *Char, publicId *Char, systemId *Char) - -//go:linkname UnparsedEntityDecl C.unparsedEntityDecl -func UnparsedEntityDecl(ctx c.Pointer, name *Char, publicId *Char, systemId *Char, notationName *Char) - -//go:linkname StartDocument C.startDocument -func StartDocument(ctx c.Pointer) - -//go:linkname EndDocument C.endDocument -func EndDocument(ctx c.Pointer) - -//go:linkname GetAttribute C.attribute -func GetAttribute(ctx c.Pointer, fullname *Char, value *Char) - -//go:linkname StartElement C.startElement -func StartElement(ctx c.Pointer, fullname *Char, atts **Char) - -//go:linkname EndElement C.endElement -func EndElement(ctx c.Pointer, name *Char) - -//go:linkname Reference C.reference -func Reference(ctx c.Pointer, name *Char) - -//go:linkname Characters C.characters -func Characters(ctx c.Pointer, ch *Char, len c.Int) - -//go:linkname IgnorableWhitespace C.ignorableWhitespace -func IgnorableWhitespace(ctx c.Pointer, ch *Char, len c.Int) - -//go:linkname ProcessingInstruction C.processingInstruction -func ProcessingInstruction(ctx c.Pointer, target *Char, data *Char) - -//go:linkname GlobalNamespace C.globalNamespace -func GlobalNamespace(ctx c.Pointer, href *Char, prefix *Char) - -//go:linkname SetNamespace C.setNamespace -func SetNamespace(ctx c.Pointer, name *Char) - -//go:linkname GetNamespace C.getNamespace -func GetNamespace(ctx c.Pointer) NsPtr - -//go:linkname CheckNamespace C.checkNamespace -func CheckNamespace(ctx c.Pointer, nameSpace *Char) c.Int - -//go:linkname NamespaceDecl C.namespaceDecl -func NamespaceDecl(ctx c.Pointer, href *Char, prefix *Char) - -//go:linkname Comment C.comment -func Comment(ctx c.Pointer, value *Char) - -//go:linkname CdataBlock C.cdataBlock -func CdataBlock(ctx c.Pointer, value *Char, len c.Int) - -// llgo:link (*SAXHandlerV1).InitxmlDefaultSAXHandler C.initxmlDefaultSAXHandler -func (recv_ *SAXHandlerV1) InitxmlDefaultSAXHandler(warning c.Int) { -} - -// llgo:link (*SAXHandlerV1).InithtmlDefaultSAXHandler C.inithtmlDefaultSAXHandler -func (recv_ *SAXHandlerV1) InithtmlDefaultSAXHandler() { -} diff --git a/libxml2/SAX2.go b/libxml2/SAX2.go deleted file mode 100644 index 471cf2b0..00000000 --- a/libxml2/SAX2.go +++ /dev/null @@ -1,118 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -//go:linkname SAX2GetPublicId C.xmlSAX2GetPublicId -func SAX2GetPublicId(ctx c.Pointer) *Char - -//go:linkname SAX2GetSystemId C.xmlSAX2GetSystemId -func SAX2GetSystemId(ctx c.Pointer) *Char - -//go:linkname SAX2SetDocumentLocator C.xmlSAX2SetDocumentLocator -func SAX2SetDocumentLocator(ctx c.Pointer, loc SAXLocatorPtr) - -//go:linkname SAX2GetLineNumber C.xmlSAX2GetLineNumber -func SAX2GetLineNumber(ctx c.Pointer) c.Int - -//go:linkname SAX2GetColumnNumber C.xmlSAX2GetColumnNumber -func SAX2GetColumnNumber(ctx c.Pointer) c.Int - -//go:linkname SAX2IsStandalone C.xmlSAX2IsStandalone -func SAX2IsStandalone(ctx c.Pointer) c.Int - -//go:linkname SAX2HasInternalSubset C.xmlSAX2HasInternalSubset -func SAX2HasInternalSubset(ctx c.Pointer) c.Int - -//go:linkname SAX2HasExternalSubset C.xmlSAX2HasExternalSubset -func SAX2HasExternalSubset(ctx c.Pointer) c.Int - -//go:linkname SAX2InternalSubset C.xmlSAX2InternalSubset -func SAX2InternalSubset(ctx c.Pointer, name *Char, ExternalID *Char, SystemID *Char) - -//go:linkname SAX2ExternalSubset C.xmlSAX2ExternalSubset -func SAX2ExternalSubset(ctx c.Pointer, name *Char, ExternalID *Char, SystemID *Char) - -//go:linkname SAX2GetEntity C.xmlSAX2GetEntity -func SAX2GetEntity(ctx c.Pointer, name *Char) EntityPtr - -//go:linkname SAX2GetParameterEntity C.xmlSAX2GetParameterEntity -func SAX2GetParameterEntity(ctx c.Pointer, name *Char) EntityPtr - -//go:linkname SAX2ResolveEntity C.xmlSAX2ResolveEntity -func SAX2ResolveEntity(ctx c.Pointer, publicId *Char, systemId *Char) ParserInputPtr - -//go:linkname SAX2EntityDecl C.xmlSAX2EntityDecl -func SAX2EntityDecl(ctx c.Pointer, name *Char, type_ c.Int, publicId *Char, systemId *Char, content *Char) - -//go:linkname SAX2AttributeDecl C.xmlSAX2AttributeDecl -func SAX2AttributeDecl(ctx c.Pointer, elem *Char, fullname *Char, type_ c.Int, def c.Int, defaultValue *Char, tree EnumerationPtr) - -//go:linkname SAX2ElementDecl C.xmlSAX2ElementDecl -func SAX2ElementDecl(ctx c.Pointer, name *Char, type_ c.Int, content ElementContentPtr) - -//go:linkname SAX2NotationDecl C.xmlSAX2NotationDecl -func SAX2NotationDecl(ctx c.Pointer, name *Char, publicId *Char, systemId *Char) - -//go:linkname SAX2UnparsedEntityDecl C.xmlSAX2UnparsedEntityDecl -func SAX2UnparsedEntityDecl(ctx c.Pointer, name *Char, publicId *Char, systemId *Char, notationName *Char) - -//go:linkname SAX2StartDocument C.xmlSAX2StartDocument -func SAX2StartDocument(ctx c.Pointer) - -//go:linkname SAX2EndDocument C.xmlSAX2EndDocument -func SAX2EndDocument(ctx c.Pointer) - -//go:linkname SAX2StartElement C.xmlSAX2StartElement -func SAX2StartElement(ctx c.Pointer, fullname *Char, atts **Char) - -//go:linkname SAX2EndElement C.xmlSAX2EndElement -func SAX2EndElement(ctx c.Pointer, name *Char) - -//go:linkname SAX2StartElementNs C.xmlSAX2StartElementNs -func SAX2StartElementNs(ctx c.Pointer, localname *Char, prefix *Char, URI *Char, nb_namespaces c.Int, namespaces **Char, nb_attributes c.Int, nb_defaulted c.Int, attributes **Char) - -//go:linkname SAX2EndElementNs C.xmlSAX2EndElementNs -func SAX2EndElementNs(ctx c.Pointer, localname *Char, prefix *Char, URI *Char) - -//go:linkname SAX2Reference C.xmlSAX2Reference -func SAX2Reference(ctx c.Pointer, name *Char) - -//go:linkname SAX2Characters C.xmlSAX2Characters -func SAX2Characters(ctx c.Pointer, ch *Char, len c.Int) - -//go:linkname SAX2IgnorableWhitespace C.xmlSAX2IgnorableWhitespace -func SAX2IgnorableWhitespace(ctx c.Pointer, ch *Char, len c.Int) - -//go:linkname SAX2ProcessingInstruction C.xmlSAX2ProcessingInstruction -func SAX2ProcessingInstruction(ctx c.Pointer, target *Char, data *Char) - -//go:linkname SAX2Comment C.xmlSAX2Comment -func SAX2Comment(ctx c.Pointer, value *Char) - -//go:linkname SAX2CDataBlock C.xmlSAX2CDataBlock -func SAX2CDataBlock(ctx c.Pointer, value *Char, len c.Int) - -//go:linkname SAXDefaultVersion C.xmlSAXDefaultVersion -func SAXDefaultVersion(version c.Int) c.Int - -// llgo:link (*SAXHandler).SAXVersion C.xmlSAXVersion -func (recv_ *SAXHandler) SAXVersion(version c.Int) c.Int { - return 0 -} - -// llgo:link (*SAXHandler).SAX2InitDefaultSAXHandler C.xmlSAX2InitDefaultSAXHandler -func (recv_ *SAXHandler) SAX2InitDefaultSAXHandler(warning c.Int) { -} - -// llgo:link (*SAXHandler).SAX2InitHtmlDefaultSAXHandler C.xmlSAX2InitHtmlDefaultSAXHandler -func (recv_ *SAXHandler) SAX2InitHtmlDefaultSAXHandler() { -} - -//go:linkname HtmlDefaultSAXHandlerInit C.htmlDefaultSAXHandlerInit -func HtmlDefaultSAXHandlerInit() - -//go:linkname DefaultSAXHandlerInit C.xmlDefaultSAXHandlerInit -func DefaultSAXHandlerInit() diff --git a/libxml2/_demo/hello/hello.go b/libxml2/_demo/hello/hello.go deleted file mode 100644 index 0737bb3b..00000000 --- a/libxml2/_demo/hello/hello.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "unsafe" - - "github.com/goplus/llpkg/libxml2" - - "github.com/goplus/lib/c" -) - -func main() { - libxml2.InitParser() - xml := "Alice25" - doc := libxml2.ReadMemory((*int8)(unsafe.Pointer(unsafe.StringData(xml))), c.Int(len(xml)), nil, nil, 0) - if doc == nil { - panic("Failed to parse XML") - } - docPtr := (*libxml2.Doc)(unsafe.Pointer(doc)) - root := docPtr.DocGetRootElement() - c.Printf(c.Str("Root element: %s\n"), root.Name) - libxml2.FreeDoc(doc) - libxml2.CleanupParser() -} diff --git a/libxml2/c14n.go b/libxml2/c14n.go deleted file mode 100644 index 37da7f36..00000000 --- a/libxml2/c14n.go +++ /dev/null @@ -1,29 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type C14NMode c.Int - -const ( - C14N_1_0 C14NMode = 0 - C14N_EXCLUSIVE_1_0 C14NMode = 1 - C14N_1_1 C14NMode = 2 -) - -//go:linkname C14NDocSaveTo C.xmlC14NDocSaveTo -func C14NDocSaveTo(doc DocPtr, nodes NodeSetPtr, mode c.Int, inclusive_ns_prefixes **Char, with_comments c.Int, buf OutputBufferPtr) c.Int - -//go:linkname C14NDocDumpMemory C.xmlC14NDocDumpMemory -func C14NDocDumpMemory(doc DocPtr, nodes NodeSetPtr, mode c.Int, inclusive_ns_prefixes **Char, with_comments c.Int, doc_txt_ptr **Char) c.Int - -//go:linkname C14NDocSave C.xmlC14NDocSave -func C14NDocSave(doc DocPtr, nodes NodeSetPtr, mode c.Int, inclusive_ns_prefixes **Char, with_comments c.Int, filename *c.Char, compression c.Int) c.Int - -// llgo:type C -type C14NIsVisibleCallback func(c.Pointer, NodePtr, NodePtr) c.Int - -//go:linkname C14NExecute C.xmlC14NExecute -func C14NExecute(doc DocPtr, is_visible_callback C14NIsVisibleCallback, user_data c.Pointer, mode c.Int, inclusive_ns_prefixes **Char, with_comments c.Int, buf OutputBufferPtr) c.Int diff --git a/libxml2/catalog.go b/libxml2/catalog.go deleted file mode 100644 index 36d1c017..00000000 --- a/libxml2/catalog.go +++ /dev/null @@ -1,170 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type CatalogPrefer c.Int - -const ( - CATA_PREFER_NONE CatalogPrefer = 0 - CATA_PREFER_PUBLIC CatalogPrefer = 1 - CATA_PREFER_SYSTEM CatalogPrefer = 2 -) - -type CatalogAllow c.Int - -const ( - CATA_ALLOW_NONE CatalogAllow = 0 - CATA_ALLOW_GLOBAL CatalogAllow = 1 - CATA_ALLOW_DOCUMENT CatalogAllow = 2 - CATA_ALLOW_ALL CatalogAllow = 3 -) - -type X_xmlCatalog struct { - Unused [8]uint8 -} -type Catalog X_xmlCatalog -type CatalogPtr *Catalog - -/* - * Operations on a given catalog. - */ -//go:linkname NewCatalog C.xmlNewCatalog -func NewCatalog(sgml c.Int) CatalogPtr - -//go:linkname LoadACatalog C.xmlLoadACatalog -func LoadACatalog(filename *c.Char) CatalogPtr - -//go:linkname LoadSGMLSuperCatalog C.xmlLoadSGMLSuperCatalog -func LoadSGMLSuperCatalog(filename *c.Char) CatalogPtr - -//go:linkname ConvertSGMLCatalog C.xmlConvertSGMLCatalog -func ConvertSGMLCatalog(catal CatalogPtr) c.Int - -//go:linkname ACatalogAdd C.xmlACatalogAdd -func ACatalogAdd(catal CatalogPtr, type_ *Char, orig *Char, replace *Char) c.Int - -//go:linkname ACatalogRemove C.xmlACatalogRemove -func ACatalogRemove(catal CatalogPtr, value *Char) c.Int - -//go:linkname ACatalogResolve C.xmlACatalogResolve -func ACatalogResolve(catal CatalogPtr, pubID *Char, sysID *Char) *Char - -//go:linkname ACatalogResolveSystem C.xmlACatalogResolveSystem -func ACatalogResolveSystem(catal CatalogPtr, sysID *Char) *Char - -//go:linkname ACatalogResolvePublic C.xmlACatalogResolvePublic -func ACatalogResolvePublic(catal CatalogPtr, pubID *Char) *Char - -//go:linkname ACatalogResolveURI C.xmlACatalogResolveURI -func ACatalogResolveURI(catal CatalogPtr, URI *Char) *Char - -//go:linkname ACatalogDump C.xmlACatalogDump -func ACatalogDump(catal CatalogPtr, out *c.FILE) - -//go:linkname FreeCatalog C.xmlFreeCatalog -func FreeCatalog(catal CatalogPtr) - -//go:linkname CatalogIsEmpty C.xmlCatalogIsEmpty -func CatalogIsEmpty(catal CatalogPtr) c.Int - -/* - * Global operations. - */ -//go:linkname InitializeCatalog C.xmlInitializeCatalog -func InitializeCatalog() - -//go:linkname LoadCatalog C.xmlLoadCatalog -func LoadCatalog(filename *c.Char) c.Int - -//go:linkname LoadCatalogs C.xmlLoadCatalogs -func LoadCatalogs(paths *c.Char) - -//go:linkname CatalogCleanup C.xmlCatalogCleanup -func CatalogCleanup() - -//go:linkname CatalogDump C.xmlCatalogDump -func CatalogDump(out *c.FILE) - -// llgo:link (*Char).CatalogResolve C.xmlCatalogResolve -func (recv_ *Char) CatalogResolve(sysID *Char) *Char { - return nil -} - -// llgo:link (*Char).CatalogResolveSystem C.xmlCatalogResolveSystem -func (recv_ *Char) CatalogResolveSystem() *Char { - return nil -} - -// llgo:link (*Char).CatalogResolvePublic C.xmlCatalogResolvePublic -func (recv_ *Char) CatalogResolvePublic() *Char { - return nil -} - -// llgo:link (*Char).CatalogResolveURI C.xmlCatalogResolveURI -func (recv_ *Char) CatalogResolveURI() *Char { - return nil -} - -// llgo:link (*Char).CatalogAdd C.xmlCatalogAdd -func (recv_ *Char) CatalogAdd(orig *Char, replace *Char) c.Int { - return 0 -} - -// llgo:link (*Char).CatalogRemove C.xmlCatalogRemove -func (recv_ *Char) CatalogRemove() c.Int { - return 0 -} - -//go:linkname ParseCatalogFile C.xmlParseCatalogFile -func ParseCatalogFile(filename *c.Char) DocPtr - -//go:linkname CatalogConvert C.xmlCatalogConvert -func CatalogConvert() c.Int - -/* - * Strictly minimal interfaces for per-document catalogs used - * by the parser. - */ -//go:linkname CatalogFreeLocal C.xmlCatalogFreeLocal -func CatalogFreeLocal(catalogs c.Pointer) - -//go:linkname CatalogAddLocal C.xmlCatalogAddLocal -func CatalogAddLocal(catalogs c.Pointer, URL *Char) c.Pointer - -//go:linkname CatalogLocalResolve C.xmlCatalogLocalResolve -func CatalogLocalResolve(catalogs c.Pointer, pubID *Char, sysID *Char) *Char - -//go:linkname CatalogLocalResolveURI C.xmlCatalogLocalResolveURI -func CatalogLocalResolveURI(catalogs c.Pointer, URI *Char) *Char - -/* - * Preference settings. - */ -//go:linkname CatalogSetDebug C.xmlCatalogSetDebug -func CatalogSetDebug(level c.Int) c.Int - -// llgo:link CatalogPrefer.CatalogSetDefaultPrefer C.xmlCatalogSetDefaultPrefer -func (recv_ CatalogPrefer) CatalogSetDefaultPrefer() CatalogPrefer { - return 0 -} - -// llgo:link CatalogAllow.CatalogSetDefaults C.xmlCatalogSetDefaults -func (recv_ CatalogAllow) CatalogSetDefaults() { -} - -//go:linkname CatalogGetDefaults C.xmlCatalogGetDefaults -func CatalogGetDefaults() CatalogAllow - -/* DEPRECATED interfaces */ -// llgo:link (*Char).CatalogGetSystem C.xmlCatalogGetSystem -func (recv_ *Char) CatalogGetSystem() *Char { - return nil -} - -// llgo:link (*Char).CatalogGetPublic C.xmlCatalogGetPublic -func (recv_ *Char) CatalogGetPublic() *Char { - return nil -} diff --git a/libxml2/chvalid.go b/libxml2/chvalid.go deleted file mode 100644 index 5c2024c3..00000000 --- a/libxml2/chvalid.go +++ /dev/null @@ -1,59 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlChSRange struct { - Low uint16 - High uint16 -} -type ChSRange X_xmlChSRange -type ChSRangePtr *ChSRange - -type X_xmlChLRange struct { - Low c.Uint - High c.Uint -} -type ChLRange X_xmlChLRange -type ChLRangePtr *ChLRange - -type X_xmlChRangeGroup struct { - NbShortRange c.Int - NbLongRange c.Int - ShortRange *ChSRange - LongRange *ChLRange -} -type ChRangeGroup X_xmlChRangeGroup -type ChRangeGroupPtr *ChRangeGroup - -/** - * Range checking routine - */ -//go:linkname CharInRange C.xmlCharInRange -func CharInRange(val c.Uint, group *ChRangeGroup) c.Int - -//go:linkname IsBaseChar C.xmlIsBaseChar -func IsBaseChar(ch c.Uint) c.Int - -//go:linkname IsBlank C.xmlIsBlank -func IsBlank(ch c.Uint) c.Int - -//go:linkname IsChar C.xmlIsChar -func IsChar(ch c.Uint) c.Int - -//go:linkname IsCombining C.xmlIsCombining -func IsCombining(ch c.Uint) c.Int - -//go:linkname IsDigit C.xmlIsDigit -func IsDigit(ch c.Uint) c.Int - -//go:linkname IsExtender C.xmlIsExtender -func IsExtender(ch c.Uint) c.Int - -//go:linkname IsIdeographic C.xmlIsIdeographic -func IsIdeographic(ch c.Uint) c.Int - -//go:linkname IsPubidChar C.xmlIsPubidChar -func IsPubidChar(ch c.Uint) c.Int diff --git a/libxml2/debugXML.go b/libxml2/debugXML.go deleted file mode 100644 index 332c6cad..00000000 --- a/libxml2/debugXML.go +++ /dev/null @@ -1,124 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -/* - * The standard Dump routines. - */ -//go:linkname DebugDumpString C.xmlDebugDumpString -func DebugDumpString(output *c.FILE, str *Char) - -//go:linkname DebugDumpAttr C.xmlDebugDumpAttr -func DebugDumpAttr(output *c.FILE, attr AttrPtr, depth c.Int) - -//go:linkname DebugDumpAttrList C.xmlDebugDumpAttrList -func DebugDumpAttrList(output *c.FILE, attr AttrPtr, depth c.Int) - -//go:linkname DebugDumpOneNode C.xmlDebugDumpOneNode -func DebugDumpOneNode(output *c.FILE, node NodePtr, depth c.Int) - -//go:linkname DebugDumpNode C.xmlDebugDumpNode -func DebugDumpNode(output *c.FILE, node NodePtr, depth c.Int) - -//go:linkname DebugDumpNodeList C.xmlDebugDumpNodeList -func DebugDumpNodeList(output *c.FILE, node NodePtr, depth c.Int) - -//go:linkname DebugDumpDocumentHead C.xmlDebugDumpDocumentHead -func DebugDumpDocumentHead(output *c.FILE, doc DocPtr) - -//go:linkname DebugDumpDocument C.xmlDebugDumpDocument -func DebugDumpDocument(output *c.FILE, doc DocPtr) - -//go:linkname DebugDumpDTD C.xmlDebugDumpDTD -func DebugDumpDTD(output *c.FILE, dtd DtdPtr) - -//go:linkname DebugDumpEntities C.xmlDebugDumpEntities -func DebugDumpEntities(output *c.FILE, doc DocPtr) - -/**************************************************************** - * * - * Checking routines * - * * - ****************************************************************/ -//go:linkname DebugCheckDocument C.xmlDebugCheckDocument -func DebugCheckDocument(output *c.FILE, doc DocPtr) c.Int - -/**************************************************************** - * * - * XML shell helpers * - * * - ****************************************************************/ -//go:linkname LsOneNode C.xmlLsOneNode -func LsOneNode(output *c.FILE, node NodePtr) - -//go:linkname LsCountNode C.xmlLsCountNode -func LsCountNode(node NodePtr) c.Int - -//go:linkname BoolToText C.xmlBoolToText -func BoolToText(boolval c.Int) *c.Char - -// llgo:type C -type ShellReadlineFunc func(*c.Char) *c.Char - -type X_xmlShellCtxt struct { - Filename *c.Char - Doc DocPtr - Node NodePtr - Pctxt XPathContextPtr - Loaded c.Int - Output *c.FILE - Input ShellReadlineFunc -} -type ShellCtxt X_xmlShellCtxt -type ShellCtxtPtr *ShellCtxt - -// llgo:type C -type ShellCmd func(ShellCtxtPtr, *c.Char, NodePtr, NodePtr) c.Int - -//go:linkname ShellPrintXPathError C.xmlShellPrintXPathError -func ShellPrintXPathError(errorType c.Int, arg *c.Char) - -//go:linkname ShellPrintXPathResult C.xmlShellPrintXPathResult -func ShellPrintXPathResult(list XPathObjectPtr) - -//go:linkname ShellList C.xmlShellList -func ShellList(ctxt ShellCtxtPtr, arg *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellBase C.xmlShellBase -func ShellBase(ctxt ShellCtxtPtr, arg *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellDir C.xmlShellDir -func ShellDir(ctxt ShellCtxtPtr, arg *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellLoad C.xmlShellLoad -func ShellLoad(ctxt ShellCtxtPtr, filename *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellPrintNode C.xmlShellPrintNode -func ShellPrintNode(node NodePtr) - -//go:linkname ShellCat C.xmlShellCat -func ShellCat(ctxt ShellCtxtPtr, arg *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellWrite C.xmlShellWrite -func ShellWrite(ctxt ShellCtxtPtr, filename *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellSave C.xmlShellSave -func ShellSave(ctxt ShellCtxtPtr, filename *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellValidate C.xmlShellValidate -func ShellValidate(ctxt ShellCtxtPtr, dtd *c.Char, node NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellDu C.xmlShellDu -func ShellDu(ctxt ShellCtxtPtr, arg *c.Char, tree NodePtr, node2 NodePtr) c.Int - -//go:linkname ShellPwd C.xmlShellPwd -func ShellPwd(ctxt ShellCtxtPtr, buffer *c.Char, node NodePtr, node2 NodePtr) c.Int - -/* - * The Shell interface. - */ -//go:linkname Shell C.xmlShell -func Shell(doc DocPtr, filename *c.Char, input ShellReadlineFunc, output *c.FILE) diff --git a/libxml2/dict.go b/libxml2/dict.go deleted file mode 100644 index 0c232bad..00000000 --- a/libxml2/dict.go +++ /dev/null @@ -1,60 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type Dict X_xmlDict -type DictPtr *Dict - -/* - * Initializer - */ -//go:linkname InitializeDict C.xmlInitializeDict -func InitializeDict() c.Int - -/* - * Constructor and destructor. - */ -//go:linkname DictCreate C.xmlDictCreate -func DictCreate() DictPtr - -//go:linkname DictSetLimit C.xmlDictSetLimit -func DictSetLimit(dict DictPtr, limit c.SizeT) c.SizeT - -//go:linkname DictGetUsage C.xmlDictGetUsage -func DictGetUsage(dict DictPtr) c.SizeT - -//go:linkname DictCreateSub C.xmlDictCreateSub -func DictCreateSub(sub DictPtr) DictPtr - -//go:linkname DictReference C.xmlDictReference -func DictReference(dict DictPtr) c.Int - -//go:linkname DictFree C.xmlDictFree -func DictFree(dict DictPtr) - -/* - * Lookup of entry in the dictionary. - */ -//go:linkname DictLookup C.xmlDictLookup -func DictLookup(dict DictPtr, name *Char, len c.Int) *Char - -//go:linkname DictExists C.xmlDictExists -func DictExists(dict DictPtr, name *Char, len c.Int) *Char - -//go:linkname DictQLookup C.xmlDictQLookup -func DictQLookup(dict DictPtr, prefix *Char, name *Char) *Char - -//go:linkname DictOwns C.xmlDictOwns -func DictOwns(dict DictPtr, str *Char) c.Int - -//go:linkname DictSize C.xmlDictSize -func DictSize(dict DictPtr) c.Int - -/* - * Cleanup function - */ -//go:linkname DictCleanup C.xmlDictCleanup -func DictCleanup() diff --git a/libxml2/encoding.go b/libxml2/encoding.go deleted file mode 100644 index d80cb379..00000000 --- a/libxml2/encoding.go +++ /dev/null @@ -1,150 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type CharEncError c.Int - -const ( - ENC_ERR_SUCCESS CharEncError = 0 - ENC_ERR_SPACE CharEncError = -1 - ENC_ERR_INPUT CharEncError = -2 - ENC_ERR_PARTIAL CharEncError = -3 - ENC_ERR_INTERNAL CharEncError = -4 - ENC_ERR_MEMORY CharEncError = -5 -) - -type CharEncoding c.Int - -const ( - CHAR_ENCODING_ERROR CharEncoding = -1 - CHAR_ENCODING_NONE CharEncoding = 0 - CHAR_ENCODING_UTF8 CharEncoding = 1 - CHAR_ENCODING_UTF16LE CharEncoding = 2 - CHAR_ENCODING_UTF16BE CharEncoding = 3 - CHAR_ENCODING_UCS4LE CharEncoding = 4 - CHAR_ENCODING_UCS4BE CharEncoding = 5 - CHAR_ENCODING_EBCDIC CharEncoding = 6 - CHAR_ENCODING_UCS4_2143 CharEncoding = 7 - CHAR_ENCODING_UCS4_3412 CharEncoding = 8 - CHAR_ENCODING_UCS2 CharEncoding = 9 - CHAR_ENCODING_8859_1 CharEncoding = 10 - CHAR_ENCODING_8859_2 CharEncoding = 11 - CHAR_ENCODING_8859_3 CharEncoding = 12 - CHAR_ENCODING_8859_4 CharEncoding = 13 - CHAR_ENCODING_8859_5 CharEncoding = 14 - CHAR_ENCODING_8859_6 CharEncoding = 15 - CHAR_ENCODING_8859_7 CharEncoding = 16 - CHAR_ENCODING_8859_8 CharEncoding = 17 - CHAR_ENCODING_8859_9 CharEncoding = 18 - CHAR_ENCODING_2022_JP CharEncoding = 19 - CHAR_ENCODING_SHIFT_JIS CharEncoding = 20 - CHAR_ENCODING_EUC_JP CharEncoding = 21 - CHAR_ENCODING_ASCII CharEncoding = 22 -) - -// llgo:type C -type CharEncodingInputFunc func(*c.Char, *c.Int, *c.Char, *c.Int) c.Int - -// llgo:type C -type CharEncodingOutputFunc func(*c.Char, *c.Int, *c.Char, *c.Int) c.Int - -type X_xmlCharEncodingHandler struct { - Name *c.Char - Input CharEncodingInputFunc - Output CharEncodingOutputFunc -} -type CharEncodingHandler X_xmlCharEncodingHandler -type CharEncodingHandlerPtr *CharEncodingHandler - -/* - * Interfaces for encoding handlers. - */ -//go:linkname InitCharEncodingHandlers C.xmlInitCharEncodingHandlers -func InitCharEncodingHandlers() - -//go:linkname CleanupCharEncodingHandlers C.xmlCleanupCharEncodingHandlers -func CleanupCharEncodingHandlers() - -//go:linkname RegisterCharEncodingHandler C.xmlRegisterCharEncodingHandler -func RegisterCharEncodingHandler(handler CharEncodingHandlerPtr) - -// llgo:link CharEncoding.LookupCharEncodingHandler C.xmlLookupCharEncodingHandler -func (recv_ CharEncoding) LookupCharEncodingHandler(out *CharEncodingHandlerPtr) c.Int { - return 0 -} - -//go:linkname OpenCharEncodingHandler C.xmlOpenCharEncodingHandler -func OpenCharEncodingHandler(name *c.Char, output c.Int, out *CharEncodingHandlerPtr) c.Int - -// llgo:link CharEncoding.GetCharEncodingHandler C.xmlGetCharEncodingHandler -func (recv_ CharEncoding) GetCharEncodingHandler() CharEncodingHandlerPtr { - return nil -} - -//go:linkname FindCharEncodingHandler C.xmlFindCharEncodingHandler -func FindCharEncodingHandler(name *c.Char) CharEncodingHandlerPtr - -//go:linkname NewCharEncodingHandler C.xmlNewCharEncodingHandler -func NewCharEncodingHandler(name *c.Char, input CharEncodingInputFunc, output CharEncodingOutputFunc) CharEncodingHandlerPtr - -/* - * Interfaces for encoding names and aliases. - */ -//go:linkname AddEncodingAlias C.xmlAddEncodingAlias -func AddEncodingAlias(name *c.Char, alias *c.Char) c.Int - -//go:linkname DelEncodingAlias C.xmlDelEncodingAlias -func DelEncodingAlias(alias *c.Char) c.Int - -//go:linkname GetEncodingAlias C.xmlGetEncodingAlias -func GetEncodingAlias(alias *c.Char) *c.Char - -//go:linkname CleanupEncodingAliases C.xmlCleanupEncodingAliases -func CleanupEncodingAliases() - -//go:linkname ParseCharEncoding C.xmlParseCharEncoding -func ParseCharEncoding(name *c.Char) CharEncoding - -// llgo:link CharEncoding.GetCharEncodingName C.xmlGetCharEncodingName -func (recv_ CharEncoding) GetCharEncodingName() *c.Char { - return nil -} - -/* - * Interfaces directly used by the parsers. - */ -//go:linkname DetectCharEncoding C.xmlDetectCharEncoding -func DetectCharEncoding(in *c.Char, len c.Int) CharEncoding - -/** DOC_ENABLE */ -// llgo:link (*CharEncodingHandler).CharEncOutFunc C.xmlCharEncOutFunc -func (recv_ *CharEncodingHandler) CharEncOutFunc(out *X_xmlBuffer, in *X_xmlBuffer) c.Int { - return 0 -} - -// llgo:link (*CharEncodingHandler).CharEncInFunc C.xmlCharEncInFunc -func (recv_ *CharEncodingHandler) CharEncInFunc(out *X_xmlBuffer, in *X_xmlBuffer) c.Int { - return 0 -} - -// llgo:link (*CharEncodingHandler).CharEncFirstLine C.xmlCharEncFirstLine -func (recv_ *CharEncodingHandler) CharEncFirstLine(out *X_xmlBuffer, in *X_xmlBuffer) c.Int { - return 0 -} - -// llgo:link (*CharEncodingHandler).CharEncCloseFunc C.xmlCharEncCloseFunc -func (recv_ *CharEncodingHandler) CharEncCloseFunc() c.Int { - return 0 -} - -/* - * Export a few useful functions - */ -//go:linkname UTF8Toisolat1 C.UTF8Toisolat1 -func UTF8Toisolat1(out *c.Char, outlen *c.Int, in *c.Char, inlen *c.Int) c.Int - -//go:linkname Isolat1ToUTF8 C.isolat1ToUTF8 -func Isolat1ToUTF8(out *c.Char, outlen *c.Int, in *c.Char, inlen *c.Int) c.Int diff --git a/libxml2/entities.go b/libxml2/entities.go deleted file mode 100644 index bd28cb59..00000000 --- a/libxml2/entities.go +++ /dev/null @@ -1,86 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type EntityType c.Int - -const ( - INTERNAL_GENERAL_ENTITY EntityType = 1 - EXTERNAL_GENERAL_PARSED_ENTITY EntityType = 2 - EXTERNAL_GENERAL_UNPARSED_ENTITY EntityType = 3 - INTERNAL_PARAMETER_ENTITY EntityType = 4 - EXTERNAL_PARAMETER_ENTITY EntityType = 5 - INTERNAL_PREDEFINED_ENTITY EntityType = 6 -) - -type EntitiesTable X_xmlHashTable -type EntitiesTablePtr *EntitiesTable - -/* - * External functions: - */ -//go:linkname InitializePredefinedEntities C.xmlInitializePredefinedEntities -func InitializePredefinedEntities() - -//go:linkname NewEntity C.xmlNewEntity -func NewEntity(doc DocPtr, name *Char, type_ c.Int, ExternalID *Char, SystemID *Char, content *Char) EntityPtr - -//go:linkname FreeEntity C.xmlFreeEntity -func FreeEntity(entity EntityPtr) - -//go:linkname AddEntity C.xmlAddEntity -func AddEntity(doc DocPtr, extSubset c.Int, name *Char, type_ c.Int, ExternalID *Char, SystemID *Char, content *Char, out *EntityPtr) c.Int - -//go:linkname AddDocEntity C.xmlAddDocEntity -func AddDocEntity(doc DocPtr, name *Char, type_ c.Int, ExternalID *Char, SystemID *Char, content *Char) EntityPtr - -//go:linkname AddDtdEntity C.xmlAddDtdEntity -func AddDtdEntity(doc DocPtr, name *Char, type_ c.Int, ExternalID *Char, SystemID *Char, content *Char) EntityPtr - -// llgo:link (*Char).GetPredefinedEntity C.xmlGetPredefinedEntity -func (recv_ *Char) GetPredefinedEntity() EntityPtr { - return nil -} - -// llgo:link (*Doc).GetDocEntity C.xmlGetDocEntity -func (recv_ *Doc) GetDocEntity(name *Char) EntityPtr { - return nil -} - -//go:linkname GetDtdEntity C.xmlGetDtdEntity -func GetDtdEntity(doc DocPtr, name *Char) EntityPtr - -//go:linkname GetParameterEntity C.xmlGetParameterEntity -func GetParameterEntity(doc DocPtr, name *Char) EntityPtr - -//go:linkname EncodeEntities C.xmlEncodeEntities -func EncodeEntities(doc DocPtr, input *Char) *Char - -//go:linkname EncodeEntitiesReentrant C.xmlEncodeEntitiesReentrant -func EncodeEntitiesReentrant(doc DocPtr, input *Char) *Char - -// llgo:link (*Doc).EncodeSpecialChars C.xmlEncodeSpecialChars -func (recv_ *Doc) EncodeSpecialChars(input *Char) *Char { - return nil -} - -//go:linkname CreateEntitiesTable C.xmlCreateEntitiesTable -func CreateEntitiesTable() EntitiesTablePtr - -//go:linkname CopyEntitiesTable C.xmlCopyEntitiesTable -func CopyEntitiesTable(table EntitiesTablePtr) EntitiesTablePtr - -//go:linkname FreeEntitiesTable C.xmlFreeEntitiesTable -func FreeEntitiesTable(table EntitiesTablePtr) - -//go:linkname DumpEntitiesTable C.xmlDumpEntitiesTable -func DumpEntitiesTable(buf BufferPtr, table EntitiesTablePtr) - -//go:linkname DumpEntityDecl C.xmlDumpEntityDecl -func DumpEntityDecl(buf BufferPtr, ent EntityPtr) - -//go:linkname CleanupPredefinedEntities C.xmlCleanupPredefinedEntities -func CleanupPredefinedEntities() diff --git a/libxml2/globals.go b/libxml2/globals.go deleted file mode 100644 index d0f6b23f..00000000 --- a/libxml2/globals.go +++ /dev/null @@ -1,15 +0,0 @@ -package libxml2 - -import _ "unsafe" - -type X_xmlGlobalState struct { - Unused [8]uint8 -} -type GlobalState X_xmlGlobalState -type GlobalStatePtr *GlobalState - -//go:linkname InitializeGlobalState C.xmlInitializeGlobalState -func InitializeGlobalState(gs GlobalStatePtr) - -//go:linkname GetGlobalState C.xmlGetGlobalState -func GetGlobalState() GlobalStatePtr diff --git a/libxml2/go.mod b/libxml2/go.mod deleted file mode 100644 index 7227c4f4..00000000 --- a/libxml2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/goplus/llpkg/libxml2 - -go 1.20 - -require github.com/goplus/lib v0.2.0 diff --git a/libxml2/go.sum b/libxml2/go.sum deleted file mode 100644 index 512980a5..00000000 --- a/libxml2/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/goplus/lib v0.2.0 h1:AjqkN1XK5H23wZMMlpaUYAMCDAdSBQ2NMFrLtSh7W4g= -github.com/goplus/lib v0.2.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/libxml2/hash.go b/libxml2/hash.go deleted file mode 100644 index a7045f16..00000000 --- a/libxml2/hash.go +++ /dev/null @@ -1,126 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlHashTable struct { - Unused [8]uint8 -} -type HashTable X_xmlHashTable -type HashTablePtr *HashTable - -// llgo:type C -type HashDeallocator func(c.Pointer, *Char) - -// llgo:type C -type HashCopier func(c.Pointer, *Char) c.Pointer - -// llgo:type C -type HashScanner func(c.Pointer, c.Pointer, *Char) - -// llgo:type C -type HashScannerFull func(c.Pointer, c.Pointer, *Char, *Char, *Char) - -/* - * Constructor and destructor. - */ -//go:linkname HashCreate C.xmlHashCreate -func HashCreate(size c.Int) HashTablePtr - -//go:linkname HashCreateDict C.xmlHashCreateDict -func HashCreateDict(size c.Int, dict DictPtr) HashTablePtr - -//go:linkname HashFree C.xmlHashFree -func HashFree(hash HashTablePtr, dealloc HashDeallocator) - -//go:linkname HashDefaultDeallocator C.xmlHashDefaultDeallocator -func HashDefaultDeallocator(entry c.Pointer, name *Char) - -/* - * Add a new entry to the hash table. - */ -//go:linkname HashAdd C.xmlHashAdd -func HashAdd(hash HashTablePtr, name *Char, userdata c.Pointer) c.Int - -//go:linkname HashAddEntry C.xmlHashAddEntry -func HashAddEntry(hash HashTablePtr, name *Char, userdata c.Pointer) c.Int - -//go:linkname HashUpdateEntry C.xmlHashUpdateEntry -func HashUpdateEntry(hash HashTablePtr, name *Char, userdata c.Pointer, dealloc HashDeallocator) c.Int - -//go:linkname HashAdd2 C.xmlHashAdd2 -func HashAdd2(hash HashTablePtr, name *Char, name2 *Char, userdata c.Pointer) c.Int - -//go:linkname HashAddEntry2 C.xmlHashAddEntry2 -func HashAddEntry2(hash HashTablePtr, name *Char, name2 *Char, userdata c.Pointer) c.Int - -//go:linkname HashUpdateEntry2 C.xmlHashUpdateEntry2 -func HashUpdateEntry2(hash HashTablePtr, name *Char, name2 *Char, userdata c.Pointer, dealloc HashDeallocator) c.Int - -//go:linkname HashAdd3 C.xmlHashAdd3 -func HashAdd3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, userdata c.Pointer) c.Int - -//go:linkname HashAddEntry3 C.xmlHashAddEntry3 -func HashAddEntry3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, userdata c.Pointer) c.Int - -//go:linkname HashUpdateEntry3 C.xmlHashUpdateEntry3 -func HashUpdateEntry3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, userdata c.Pointer, dealloc HashDeallocator) c.Int - -/* - * Remove an entry from the hash table. - */ -//go:linkname HashRemoveEntry C.xmlHashRemoveEntry -func HashRemoveEntry(hash HashTablePtr, name *Char, dealloc HashDeallocator) c.Int - -//go:linkname HashRemoveEntry2 C.xmlHashRemoveEntry2 -func HashRemoveEntry2(hash HashTablePtr, name *Char, name2 *Char, dealloc HashDeallocator) c.Int - -//go:linkname HashRemoveEntry3 C.xmlHashRemoveEntry3 -func HashRemoveEntry3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, dealloc HashDeallocator) c.Int - -/* - * Retrieve the payload. - */ -//go:linkname HashLookup C.xmlHashLookup -func HashLookup(hash HashTablePtr, name *Char) c.Pointer - -//go:linkname HashLookup2 C.xmlHashLookup2 -func HashLookup2(hash HashTablePtr, name *Char, name2 *Char) c.Pointer - -//go:linkname HashLookup3 C.xmlHashLookup3 -func HashLookup3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char) c.Pointer - -//go:linkname HashQLookup C.xmlHashQLookup -func HashQLookup(hash HashTablePtr, prefix *Char, name *Char) c.Pointer - -//go:linkname HashQLookup2 C.xmlHashQLookup2 -func HashQLookup2(hash HashTablePtr, prefix *Char, name *Char, prefix2 *Char, name2 *Char) c.Pointer - -//go:linkname HashQLookup3 C.xmlHashQLookup3 -func HashQLookup3(hash HashTablePtr, prefix *Char, name *Char, prefix2 *Char, name2 *Char, prefix3 *Char, name3 *Char) c.Pointer - -/* - * Helpers. - */ -//go:linkname HashCopySafe C.xmlHashCopySafe -func HashCopySafe(hash HashTablePtr, copy HashCopier, dealloc HashDeallocator) HashTablePtr - -//go:linkname HashCopy C.xmlHashCopy -func HashCopy(hash HashTablePtr, copy HashCopier) HashTablePtr - -//go:linkname HashSize C.xmlHashSize -func HashSize(hash HashTablePtr) c.Int - -//go:linkname HashScan C.xmlHashScan -func HashScan(hash HashTablePtr, scan HashScanner, data c.Pointer) - -//go:linkname HashScan3 C.xmlHashScan3 -func HashScan3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, scan HashScanner, data c.Pointer) - -//go:linkname HashScanFull C.xmlHashScanFull -func HashScanFull(hash HashTablePtr, scan HashScannerFull, data c.Pointer) - -//go:linkname HashScanFull3 C.xmlHashScanFull3 -func HashScanFull3(hash HashTablePtr, name *Char, name2 *Char, name3 *Char, scan HashScannerFull, data c.Pointer) diff --git a/libxml2/libxml2_autogen_link.go b/libxml2/libxml2_autogen_link.go deleted file mode 100644 index 873db775..00000000 --- a/libxml2/libxml2_autogen_link.go +++ /dev/null @@ -1,8 +0,0 @@ -package libxml2 - -import ( - _ "github.com/goplus/lib/c" - _ "github.com/goplus/lib/c/os" -) - -const LLGoPackage string = "link: $(pkg-config --libs libxml-2.0);" diff --git a/libxml2/list.go b/libxml2/list.go deleted file mode 100644 index 90d21f11..00000000 --- a/libxml2/list.go +++ /dev/null @@ -1,109 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlLink struct { - Unused [8]uint8 -} -type Link X_xmlLink -type LinkPtr *Link - -type X_xmlList struct { - Unused [8]uint8 -} -type List X_xmlList -type ListPtr *List - -// llgo:type C -type ListDeallocator func(LinkPtr) - -// llgo:type C -type ListDataCompare func(c.Pointer, c.Pointer) c.Int - -// llgo:type C -type ListWalker func(c.Pointer, c.Pointer) c.Int - -/* Creation/Deletion */ -//go:linkname ListCreate C.xmlListCreate -func ListCreate(deallocator ListDeallocator, compare ListDataCompare) ListPtr - -//go:linkname ListDelete C.xmlListDelete -func ListDelete(l ListPtr) - -/* Basic Operators */ -//go:linkname ListSearch C.xmlListSearch -func ListSearch(l ListPtr, data c.Pointer) c.Pointer - -//go:linkname ListReverseSearch C.xmlListReverseSearch -func ListReverseSearch(l ListPtr, data c.Pointer) c.Pointer - -//go:linkname ListInsert C.xmlListInsert -func ListInsert(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListAppend C.xmlListAppend -func ListAppend(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListRemoveFirst C.xmlListRemoveFirst -func ListRemoveFirst(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListRemoveLast C.xmlListRemoveLast -func ListRemoveLast(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListRemoveAll C.xmlListRemoveAll -func ListRemoveAll(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListClear C.xmlListClear -func ListClear(l ListPtr) - -//go:linkname ListEmpty C.xmlListEmpty -func ListEmpty(l ListPtr) c.Int - -//go:linkname ListFront C.xmlListFront -func ListFront(l ListPtr) LinkPtr - -//go:linkname ListEnd C.xmlListEnd -func ListEnd(l ListPtr) LinkPtr - -//go:linkname ListSize C.xmlListSize -func ListSize(l ListPtr) c.Int - -//go:linkname ListPopFront C.xmlListPopFront -func ListPopFront(l ListPtr) - -//go:linkname ListPopBack C.xmlListPopBack -func ListPopBack(l ListPtr) - -//go:linkname ListPushFront C.xmlListPushFront -func ListPushFront(l ListPtr, data c.Pointer) c.Int - -//go:linkname ListPushBack C.xmlListPushBack -func ListPushBack(l ListPtr, data c.Pointer) c.Int - -/* Advanced Operators */ -//go:linkname ListReverse C.xmlListReverse -func ListReverse(l ListPtr) - -//go:linkname ListSort C.xmlListSort -func ListSort(l ListPtr) - -//go:linkname ListWalk C.xmlListWalk -func ListWalk(l ListPtr, walker ListWalker, user c.Pointer) - -//go:linkname ListReverseWalk C.xmlListReverseWalk -func ListReverseWalk(l ListPtr, walker ListWalker, user c.Pointer) - -//go:linkname ListMerge C.xmlListMerge -func ListMerge(l1 ListPtr, l2 ListPtr) - -//go:linkname ListDup C.xmlListDup -func ListDup(old ListPtr) ListPtr - -//go:linkname ListCopy C.xmlListCopy -func ListCopy(cur ListPtr, old ListPtr) c.Int - -/* Link operators */ -//go:linkname LinkGetData C.xmlLinkGetData -func LinkGetData(lk LinkPtr) c.Pointer diff --git a/libxml2/llcppg.cfg b/libxml2/llcppg.cfg deleted file mode 100644 index fd7811db..00000000 --- a/libxml2/llcppg.cfg +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "libxml2", - "cflags": "$(pkg-config --cflags libxml-2.0)", - "libs": "$(pkg-config --libs libxml-2.0)", - "include": [ - "libxml/parserInternals.h", - "libxml/xmlschemastypes.h", - "libxml/globals.h", - "libxml/xmlreader.h", - "libxml/xpointer.h", - "libxml/HTMLtree.h", - "libxml/c14n.h", - "libxml/xpathInternals.h", - "libxml/debugXML.h", - "libxml/xpath.h", - "libxml/pattern.h", - "libxml/xmlsave.h", - "libxml/xlink.h", - "libxml/catalog.h", - "libxml/SAX2.h", - "libxml/xinclude.h", - "libxml/parser.h", - "libxml/SAX.h", - "libxml/xmlschemas.h", - "libxml/relaxng.h", - "libxml/schemasInternals.h", - "libxml/schematron.h", - "libxml/HTMLparser.h", - "libxml/tree.h", - "libxml/valid.h", - "libxml/xmlwriter.h", - "libxml/xmlIO.h", - "libxml/entities.h", - "libxml/hash.h", - "libxml/dict.h", - "libxml/xmlautomata.h", - "libxml/uri.h", - "libxml/chvalid.h", - "libxml/xmlregexp.h", - "libxml/threads.h", - "libxml/xmlmodule.h", - "libxml/xmlmemory.h", - "libxml/xmlerror.h", - "libxml/xmlstring.h", - "libxml/xmlunicode.h", - "libxml/nanohttp.h", - "libxml/nanoftp.h", - "libxml/list.h", - "libxml/encoding.h", - "libxml/xmlversion.h", - "libxml/xmlexports.h" - ], - "deps":["c","c/os"], - "trimPrefixes": ["xml","XML_","XML","LIBXML_"], - "cplusplus": false, - "symMap" :{ - "attribute": "GetAttribute" - } -} diff --git a/libxml2/llcppg.pub b/libxml2/llcppg.pub deleted file mode 100644 index 10830fc3..00000000 --- a/libxml2/llcppg.pub +++ /dev/null @@ -1,337 +0,0 @@ -attributeDeclSAXFunc AttributeDeclSAXFunc -attributeSAXFunc AttributeSAXFunc -cdataBlockSAXFunc CdataBlockSAXFunc -charactersSAXFunc CharactersSAXFunc -commentSAXFunc CommentSAXFunc -elementDeclSAXFunc ElementDeclSAXFunc -endDocumentSAXFunc EndDocumentSAXFunc -endElementNsSAX2Func EndElementNsSAX2Func -endElementSAXFunc EndElementSAXFunc -entityDeclSAXFunc EntityDeclSAXFunc -errorSAXFunc ErrorSAXFunc -externalSubsetSAXFunc ExternalSubsetSAXFunc -fatalErrorSAXFunc FatalErrorSAXFunc -ftpDataCallback FtpDataCallback -ftpListCallback FtpListCallback -getEntitySAXFunc GetEntitySAXFunc -getParameterEntitySAXFunc GetParameterEntitySAXFunc -hasExternalSubsetSAXFunc HasExternalSubsetSAXFunc -hasInternalSubsetSAXFunc HasInternalSubsetSAXFunc -htmlDocPtr HtmlDocPtr -htmlElemDesc HtmlElemDesc -htmlElemDescPtr HtmlElemDescPtr -htmlEntityDesc HtmlEntityDesc -htmlEntityDescPtr HtmlEntityDescPtr -htmlNodePtr HtmlNodePtr -htmlParserCtxt HtmlParserCtxt -htmlParserCtxtPtr HtmlParserCtxtPtr -htmlParserInput HtmlParserInput -htmlParserInputPtr HtmlParserInputPtr -htmlParserNodeInfo HtmlParserNodeInfo -htmlParserOption HtmlParserOption -htmlSAXHandler HtmlSAXHandler -htmlSAXHandlerPtr HtmlSAXHandlerPtr -htmlStatus HtmlStatus -ignorableWhitespaceSAXFunc IgnorableWhitespaceSAXFunc -internalSubsetSAXFunc InternalSubsetSAXFunc -isStandaloneSAXFunc IsStandaloneSAXFunc -notationDeclSAXFunc NotationDeclSAXFunc -processingInstructionSAXFunc ProcessingInstructionSAXFunc -referenceSAXFunc ReferenceSAXFunc -resolveEntitySAXFunc ResolveEntitySAXFunc -setDocumentLocatorSAXFunc SetDocumentLocatorSAXFunc -startDocumentSAXFunc StartDocumentSAXFunc -startElementNsSAX2Func StartElementNsSAX2Func -startElementSAXFunc StartElementSAXFunc -unparsedEntityDeclSAXFunc UnparsedEntityDeclSAXFunc -warningSAXFunc WarningSAXFunc -xlinkActuate XlinkActuate -xlinkExtendedLinkFunk XlinkExtendedLinkFunk -xlinkExtendedLinkSetFunk XlinkExtendedLinkSetFunk -xlinkHRef XlinkHRef -xlinkHandler XlinkHandler -xlinkHandlerPtr XlinkHandlerPtr -xlinkNodeDetectFunc XlinkNodeDetectFunc -xlinkRole XlinkRole -xlinkShow XlinkShow -xlinkSimpleLinkFunk XlinkSimpleLinkFunk -xlinkTitle XlinkTitle -xlinkType XlinkType -xmlAttr Attr -xmlAttrHashBucket AttrHashBucket -xmlAttrPtr AttrPtr -xmlAttribute Attribute -xmlAttributeDefault AttributeDefault -xmlAttributePtr AttributePtr -xmlAttributeTable AttributeTable -xmlAttributeTablePtr AttributeTablePtr -xmlAttributeType AttributeType -xmlAutomata Automata -xmlAutomataPtr AutomataPtr -xmlAutomataState AutomataState -xmlAutomataStatePtr AutomataStatePtr -xmlBuf Buf -xmlBufPtr BufPtr -xmlBuffer Buffer -xmlBufferAllocationScheme BufferAllocationScheme -xmlBufferPtr BufferPtr -xmlC14NIsVisibleCallback C14NIsVisibleCallback -xmlC14NMode C14NMode -xmlCatalog Catalog -xmlCatalogAllow CatalogAllow -xmlCatalogPrefer CatalogPrefer -xmlCatalogPtr CatalogPtr -xmlChLRange ChLRange -xmlChLRangePtr ChLRangePtr -xmlChRangeGroup ChRangeGroup -xmlChRangeGroupPtr ChRangeGroupPtr -xmlChSRange ChSRange -xmlChSRangePtr ChSRangePtr -xmlChar Char -xmlCharEncError CharEncError -xmlCharEncoding CharEncoding -xmlCharEncodingHandler CharEncodingHandler -xmlCharEncodingHandlerPtr CharEncodingHandlerPtr -xmlCharEncodingInputFunc CharEncodingInputFunc -xmlCharEncodingOutputFunc CharEncodingOutputFunc -xmlDOMWrapAcquireNsFunction DOMWrapAcquireNsFunction -xmlDOMWrapCtxt DOMWrapCtxt -xmlDOMWrapCtxtPtr DOMWrapCtxtPtr -xmlDeregisterNodeFunc DeregisterNodeFunc -xmlDict Dict -xmlDictPtr DictPtr -xmlDoc Doc -xmlDocProperties DocProperties -xmlDocPtr DocPtr -xmlDtd Dtd -xmlDtdPtr DtdPtr -xmlElement Element -xmlElementContent ElementContent -xmlElementContentOccur ElementContentOccur -xmlElementContentPtr ElementContentPtr -xmlElementContentType ElementContentType -xmlElementPtr ElementPtr -xmlElementTable ElementTable -xmlElementTablePtr ElementTablePtr -xmlElementType ElementType -xmlElementTypeVal ElementTypeVal -xmlEntitiesTable EntitiesTable -xmlEntitiesTablePtr EntitiesTablePtr -xmlEntity Entity -xmlEntityPtr EntityPtr -xmlEntityReferenceFunc EntityReferenceFunc -xmlEntityType EntityType -xmlEnumeration Enumeration -xmlEnumerationPtr EnumerationPtr -xmlError Error -xmlErrorDomain ErrorDomain -xmlErrorLevel ErrorLevel -xmlErrorPtr ErrorPtr -xmlExternalEntityLoader ExternalEntityLoader -xmlFeature Feature -xmlFreeFunc FreeFunc -xmlGenericErrorFunc GenericErrorFunc -xmlGlobalState GlobalState -xmlGlobalStatePtr GlobalStatePtr -xmlHashCopier HashCopier -xmlHashDeallocator HashDeallocator -xmlHashScanner HashScanner -xmlHashScannerFull HashScannerFull -xmlHashTable HashTable -xmlHashTablePtr HashTablePtr -xmlID ID -xmlIDPtr IDPtr -xmlIDTable IDTable -xmlIDTablePtr IDTablePtr -xmlInputCloseCallback InputCloseCallback -xmlInputMatchCallback InputMatchCallback -xmlInputOpenCallback InputOpenCallback -xmlInputReadCallback InputReadCallback -xmlLink Link -xmlLinkPtr LinkPtr -xmlList List -xmlListDataCompare ListDataCompare -xmlListDeallocator ListDeallocator -xmlListPtr ListPtr -xmlListWalker ListWalker -xmlMallocFunc MallocFunc -xmlModule Module -xmlModuleOption ModuleOption -xmlModulePtr ModulePtr -xmlMutex Mutex -xmlMutexPtr MutexPtr -xmlNode Node -xmlNodePtr NodePtr -xmlNodeSet NodeSet -xmlNodeSetPtr NodeSetPtr -xmlNotation Notation -xmlNotationPtr NotationPtr -xmlNotationTable NotationTable -xmlNotationTablePtr NotationTablePtr -xmlNs Ns -xmlNsPtr NsPtr -xmlNsType NsType -xmlOutputBuffer OutputBuffer -xmlOutputBufferCreateFilenameFunc OutputBufferCreateFilenameFunc -xmlOutputBufferPtr OutputBufferPtr -xmlOutputCloseCallback OutputCloseCallback -xmlOutputMatchCallback OutputMatchCallback -xmlOutputOpenCallback OutputOpenCallback -xmlOutputWriteCallback OutputWriteCallback -xmlParserCtxt ParserCtxt -xmlParserCtxtPtr ParserCtxtPtr -xmlParserErrors ParserErrors -xmlParserInput ParserInput -xmlParserInputBuffer ParserInputBuffer -xmlParserInputBufferCreateFilenameFunc ParserInputBufferCreateFilenameFunc -xmlParserInputBufferPtr ParserInputBufferPtr -xmlParserInputDeallocate ParserInputDeallocate -xmlParserInputPtr ParserInputPtr -xmlParserInputState ParserInputState -xmlParserMode ParserMode -xmlParserNodeInfo ParserNodeInfo -xmlParserNodeInfoPtr ParserNodeInfoPtr -xmlParserNodeInfoSeq ParserNodeInfoSeq -xmlParserNodeInfoSeqPtr ParserNodeInfoSeqPtr -xmlParserNsData ParserNsData -xmlParserOption ParserOption -xmlParserProperties ParserProperties -xmlParserSeverities ParserSeverities -xmlPattern Pattern -xmlPatternFlags PatternFlags -xmlPatternPtr PatternPtr -xmlRMutex RMutex -xmlRMutexPtr RMutexPtr -xmlReaderTypes ReaderTypes -xmlReallocFunc ReallocFunc -xmlRef Ref -xmlRefPtr RefPtr -xmlRefTable RefTable -xmlRefTablePtr RefTablePtr -xmlRegExecCallbacks RegExecCallbacks -xmlRegExecCtxt RegExecCtxt -xmlRegExecCtxtPtr RegExecCtxtPtr -xmlRegexp Regexp -xmlRegexpPtr RegexpPtr -xmlRegisterNodeFunc RegisterNodeFunc -xmlRelaxNG RelaxNG -xmlRelaxNGParserCtxt RelaxNGParserCtxt -xmlRelaxNGParserCtxtPtr RelaxNGParserCtxtPtr -xmlRelaxNGParserFlag RelaxNGParserFlag -xmlRelaxNGPtr RelaxNGPtr -xmlRelaxNGValidCtxt RelaxNGValidCtxt -xmlRelaxNGValidCtxtPtr RelaxNGValidCtxtPtr -xmlRelaxNGValidErr RelaxNGValidErr -xmlRelaxNGValidityErrorFunc RelaxNGValidityErrorFunc -xmlRelaxNGValidityWarningFunc RelaxNGValidityWarningFunc -xmlSAXHandler SAXHandler -xmlSAXHandlerPtr SAXHandlerPtr -xmlSAXHandlerV1 SAXHandlerV1 -xmlSAXHandlerV1Ptr SAXHandlerV1Ptr -xmlSAXLocator SAXLocator -xmlSAXLocatorPtr SAXLocatorPtr -xmlSaveCtxt SaveCtxt -xmlSaveCtxtPtr SaveCtxtPtr -xmlSaveOption SaveOption -xmlSchema Schema -xmlSchemaAnnot SchemaAnnot -xmlSchemaAnnotPtr SchemaAnnotPtr -xmlSchemaAttribute SchemaAttribute -xmlSchemaAttributeGroup SchemaAttributeGroup -xmlSchemaAttributeGroupPtr SchemaAttributeGroupPtr -xmlSchemaAttributeLink SchemaAttributeLink -xmlSchemaAttributeLinkPtr SchemaAttributeLinkPtr -xmlSchemaAttributePtr SchemaAttributePtr -xmlSchemaContentType SchemaContentType -xmlSchemaElement SchemaElement -xmlSchemaElementPtr SchemaElementPtr -xmlSchemaFacet SchemaFacet -xmlSchemaFacetLink SchemaFacetLink -xmlSchemaFacetLinkPtr SchemaFacetLinkPtr -xmlSchemaFacetPtr SchemaFacetPtr -xmlSchemaNotation SchemaNotation -xmlSchemaNotationPtr SchemaNotationPtr -xmlSchemaParserCtxt SchemaParserCtxt -xmlSchemaParserCtxtPtr SchemaParserCtxtPtr -xmlSchemaPtr SchemaPtr -xmlSchemaSAXPlugPtr SchemaSAXPlugPtr -xmlSchemaSAXPlugStruct SchemaSAXPlugStruct -xmlSchemaType SchemaType -xmlSchemaTypeLink SchemaTypeLink -xmlSchemaTypeLinkPtr SchemaTypeLinkPtr -xmlSchemaTypePtr SchemaTypePtr -xmlSchemaTypeType SchemaTypeType -xmlSchemaVal SchemaVal -xmlSchemaValPtr SchemaValPtr -xmlSchemaValType SchemaValType -xmlSchemaValidCtxt SchemaValidCtxt -xmlSchemaValidCtxtPtr SchemaValidCtxtPtr -xmlSchemaValidError SchemaValidError -xmlSchemaValidOption SchemaValidOption -xmlSchemaValidityErrorFunc SchemaValidityErrorFunc -xmlSchemaValidityLocatorFunc SchemaValidityLocatorFunc -xmlSchemaValidityWarningFunc SchemaValidityWarningFunc -xmlSchemaWhitespaceValueType SchemaWhitespaceValueType -xmlSchemaWildcard SchemaWildcard -xmlSchemaWildcardNs SchemaWildcardNs -xmlSchemaWildcardNsPtr SchemaWildcardNsPtr -xmlSchemaWildcardPtr SchemaWildcardPtr -xmlSchematron Schematron -xmlSchematronParserCtxt SchematronParserCtxt -xmlSchematronParserCtxtPtr SchematronParserCtxtPtr -xmlSchematronPtr SchematronPtr -xmlSchematronValidCtxt SchematronValidCtxt -xmlSchematronValidCtxtPtr SchematronValidCtxtPtr -xmlSchematronValidOptions SchematronValidOptions -xmlSchematronValidityErrorFunc SchematronValidityErrorFunc -xmlSchematronValidityWarningFunc SchematronValidityWarningFunc -xmlShellCmd ShellCmd -xmlShellCtxt ShellCtxt -xmlShellCtxtPtr ShellCtxtPtr -xmlShellReadlineFunc ShellReadlineFunc -xmlStartTag StartTag -xmlStrdupFunc StrdupFunc -xmlStreamCtxt StreamCtxt -xmlStreamCtxtPtr StreamCtxtPtr -xmlStructuredErrorFunc StructuredErrorFunc -xmlTextReader TextReader -xmlTextReaderErrorFunc TextReaderErrorFunc -xmlTextReaderLocatorPtr TextReaderLocatorPtr -xmlTextReaderMode TextReaderMode -xmlTextReaderPtr TextReaderPtr -xmlTextWriter TextWriter -xmlTextWriterPtr TextWriterPtr -xmlURI URI -xmlURIPtr URIPtr -xmlValidCtxt ValidCtxt -xmlValidCtxtPtr ValidCtxtPtr -xmlValidState ValidState -xmlValidStatePtr ValidStatePtr -xmlValidityErrorFunc ValidityErrorFunc -xmlValidityWarningFunc ValidityWarningFunc -xmlXIncludeCtxt XIncludeCtxt -xmlXIncludeCtxtPtr XIncludeCtxtPtr -xmlXPathAxis XPathAxis -xmlXPathAxisFunc XPathAxisFunc -xmlXPathAxisPtr XPathAxisPtr -xmlXPathCompExpr XPathCompExpr -xmlXPathCompExprPtr XPathCompExprPtr -xmlXPathContext XPathContext -xmlXPathContextPtr XPathContextPtr -xmlXPathConvertFunc XPathConvertFunc -xmlXPathError XPathError -xmlXPathEvalFunc XPathEvalFunc -xmlXPathFuncLookupFunc XPathFuncLookupFunc -xmlXPathFuncPtr XPathFuncPtr -xmlXPathFunct XPathFunct -xmlXPathFunction XPathFunction -xmlXPathObject XPathObject -xmlXPathObjectPtr XPathObjectPtr -xmlXPathObjectType XPathObjectType -xmlXPathParserContext XPathParserContext -xmlXPathParserContextPtr XPathParserContextPtr -xmlXPathType XPathType -xmlXPathTypePtr XPathTypePtr -xmlXPathVariable XPathVariable -xmlXPathVariableLookupFunc XPathVariableLookupFunc -xmlXPathVariablePtr XPathVariablePtr \ No newline at end of file diff --git a/libxml2/llpkg.cfg b/libxml2/llpkg.cfg deleted file mode 100644 index fdcf05ec..00000000 --- a/libxml2/llpkg.cfg +++ /dev/null @@ -1,14 +0,0 @@ -{ - "upstream": { - "package": { - "name": "libxml2", - "version": "2.13.6" - }, - "installer":{ - "name": "conan", - "config" : { - "options": "iconv=False" - } - } - } -} \ No newline at end of file diff --git a/libxml2/nanoftp.go b/libxml2/nanoftp.go deleted file mode 100644 index d2c96b67..00000000 --- a/libxml2/nanoftp.go +++ /dev/null @@ -1,93 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -// llgo:type C -type FtpListCallback func(c.Pointer, *c.Char, *c.Char, *c.Char, *c.Char, c.Ulong, c.Int, c.Int, *c.Char, c.Int, c.Int, c.Int) - -// llgo:type C -type FtpDataCallback func(c.Pointer, *c.Char, c.Int) - -/* - * Init - */ -//go:linkname NanoFTPInit C.xmlNanoFTPInit -func NanoFTPInit() - -//go:linkname NanoFTPCleanup C.xmlNanoFTPCleanup -func NanoFTPCleanup() - -/* - * Creating/freeing contexts. - */ -//go:linkname NanoFTPNewCtxt C.xmlNanoFTPNewCtxt -func NanoFTPNewCtxt(URL *c.Char) c.Pointer - -//go:linkname NanoFTPFreeCtxt C.xmlNanoFTPFreeCtxt -func NanoFTPFreeCtxt(ctx c.Pointer) - -//go:linkname NanoFTPConnectTo C.xmlNanoFTPConnectTo -func NanoFTPConnectTo(server *c.Char, port c.Int) c.Pointer - -/* - * Opening/closing session connections. - */ -//go:linkname NanoFTPOpen C.xmlNanoFTPOpen -func NanoFTPOpen(URL *c.Char) c.Pointer - -//go:linkname NanoFTPConnect C.xmlNanoFTPConnect -func NanoFTPConnect(ctx c.Pointer) c.Int - -//go:linkname NanoFTPClose C.xmlNanoFTPClose -func NanoFTPClose(ctx c.Pointer) c.Int - -//go:linkname NanoFTPQuit C.xmlNanoFTPQuit -func NanoFTPQuit(ctx c.Pointer) c.Int - -//go:linkname NanoFTPScanProxy C.xmlNanoFTPScanProxy -func NanoFTPScanProxy(URL *c.Char) - -//go:linkname NanoFTPProxy C.xmlNanoFTPProxy -func NanoFTPProxy(host *c.Char, port c.Int, user *c.Char, passwd *c.Char, type_ c.Int) - -//go:linkname NanoFTPUpdateURL C.xmlNanoFTPUpdateURL -func NanoFTPUpdateURL(ctx c.Pointer, URL *c.Char) c.Int - -/* - * Rather internal commands. - */ -//go:linkname NanoFTPGetResponse C.xmlNanoFTPGetResponse -func NanoFTPGetResponse(ctx c.Pointer) c.Int - -//go:linkname NanoFTPCheckResponse C.xmlNanoFTPCheckResponse -func NanoFTPCheckResponse(ctx c.Pointer) c.Int - -/* - * CD/DIR/GET handlers. - */ -//go:linkname NanoFTPCwd C.xmlNanoFTPCwd -func NanoFTPCwd(ctx c.Pointer, directory *c.Char) c.Int - -//go:linkname NanoFTPDele C.xmlNanoFTPDele -func NanoFTPDele(ctx c.Pointer, file *c.Char) c.Int - -//go:linkname NanoFTPGetConnection C.xmlNanoFTPGetConnection -func NanoFTPGetConnection(ctx c.Pointer) c.Int - -//go:linkname NanoFTPCloseConnection C.xmlNanoFTPCloseConnection -func NanoFTPCloseConnection(ctx c.Pointer) c.Int - -//go:linkname NanoFTPList C.xmlNanoFTPList -func NanoFTPList(ctx c.Pointer, callback FtpListCallback, userData c.Pointer, filename *c.Char) c.Int - -//go:linkname NanoFTPGetSocket C.xmlNanoFTPGetSocket -func NanoFTPGetSocket(ctx c.Pointer, filename *c.Char) c.Int - -//go:linkname NanoFTPGet C.xmlNanoFTPGet -func NanoFTPGet(ctx c.Pointer, callback FtpDataCallback, userData c.Pointer, filename *c.Char) c.Int - -//go:linkname NanoFTPRead C.xmlNanoFTPRead -func NanoFTPRead(ctx c.Pointer, dest c.Pointer, len c.Int) c.Int diff --git a/libxml2/nanohttp.go b/libxml2/nanohttp.go deleted file mode 100644 index c59570c1..00000000 --- a/libxml2/nanohttp.go +++ /dev/null @@ -1,57 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -//go:linkname NanoHTTPInit C.xmlNanoHTTPInit -func NanoHTTPInit() - -//go:linkname NanoHTTPCleanup C.xmlNanoHTTPCleanup -func NanoHTTPCleanup() - -//go:linkname NanoHTTPScanProxy C.xmlNanoHTTPScanProxy -func NanoHTTPScanProxy(URL *c.Char) - -//go:linkname NanoHTTPFetch C.xmlNanoHTTPFetch -func NanoHTTPFetch(URL *c.Char, filename *c.Char, contentType **c.Char) c.Int - -//go:linkname NanoHTTPMethod C.xmlNanoHTTPMethod -func NanoHTTPMethod(URL *c.Char, method *c.Char, input *c.Char, contentType **c.Char, headers *c.Char, ilen c.Int) c.Pointer - -//go:linkname NanoHTTPMethodRedir C.xmlNanoHTTPMethodRedir -func NanoHTTPMethodRedir(URL *c.Char, method *c.Char, input *c.Char, contentType **c.Char, redir **c.Char, headers *c.Char, ilen c.Int) c.Pointer - -//go:linkname NanoHTTPOpen C.xmlNanoHTTPOpen -func NanoHTTPOpen(URL *c.Char, contentType **c.Char) c.Pointer - -//go:linkname NanoHTTPOpenRedir C.xmlNanoHTTPOpenRedir -func NanoHTTPOpenRedir(URL *c.Char, contentType **c.Char, redir **c.Char) c.Pointer - -//go:linkname NanoHTTPReturnCode C.xmlNanoHTTPReturnCode -func NanoHTTPReturnCode(ctx c.Pointer) c.Int - -//go:linkname NanoHTTPAuthHeader C.xmlNanoHTTPAuthHeader -func NanoHTTPAuthHeader(ctx c.Pointer) *c.Char - -//go:linkname NanoHTTPRedir C.xmlNanoHTTPRedir -func NanoHTTPRedir(ctx c.Pointer) *c.Char - -//go:linkname NanoHTTPContentLength C.xmlNanoHTTPContentLength -func NanoHTTPContentLength(ctx c.Pointer) c.Int - -//go:linkname NanoHTTPEncoding C.xmlNanoHTTPEncoding -func NanoHTTPEncoding(ctx c.Pointer) *c.Char - -//go:linkname NanoHTTPMimeType C.xmlNanoHTTPMimeType -func NanoHTTPMimeType(ctx c.Pointer) *c.Char - -//go:linkname NanoHTTPRead C.xmlNanoHTTPRead -func NanoHTTPRead(ctx c.Pointer, dest c.Pointer, len c.Int) c.Int - -//go:linkname NanoHTTPSave C.xmlNanoHTTPSave -func NanoHTTPSave(ctxt c.Pointer, filename *c.Char) c.Int - -//go:linkname NanoHTTPClose C.xmlNanoHTTPClose -func NanoHTTPClose(ctx c.Pointer) diff --git a/libxml2/parser.go b/libxml2/parser.go deleted file mode 100644 index 91cb9e98..00000000 --- a/libxml2/parser.go +++ /dev/null @@ -1,627 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const DEFAULT_VERSION = "1.0" -const DETECT_IDS = 2 -const COMPLETE_ATTRS = 4 -const SKIP_IDS = 8 -const SAX2_MAGIC = 0xDEEDBEAF - -// llgo:type C -type ParserInputDeallocate func(*Char) - -type X_xmlParserNodeInfo struct { - Node *X_xmlNode - BeginPos c.Ulong - BeginLine c.Ulong - EndPos c.Ulong - EndLine c.Ulong -} -type ParserNodeInfo X_xmlParserNodeInfo -type ParserNodeInfoPtr *ParserNodeInfo - -type X_xmlParserNodeInfoSeq struct { - Maximum c.Ulong - Length c.Ulong - Buffer *ParserNodeInfo -} -type ParserNodeInfoSeq X_xmlParserNodeInfoSeq -type ParserNodeInfoSeqPtr *ParserNodeInfoSeq -type ParserInputState c.Int - -const ( - PARSER_EOF ParserInputState = -1 - PARSER_START ParserInputState = 0 - PARSER_MISC ParserInputState = 1 - PARSER_PI ParserInputState = 2 - PARSER_DTD ParserInputState = 3 - PARSER_PROLOG ParserInputState = 4 - PARSER_COMMENT ParserInputState = 5 - PARSER_START_TAG ParserInputState = 6 - PARSER_CONTENT ParserInputState = 7 - PARSER_CDATA_SECTION ParserInputState = 8 - PARSER_END_TAG ParserInputState = 9 - PARSER_ENTITY_DECL ParserInputState = 10 - PARSER_ENTITY_VALUE ParserInputState = 11 - PARSER_ATTRIBUTE_VALUE ParserInputState = 12 - PARSER_SYSTEM_LITERAL ParserInputState = 13 - PARSER_EPILOG ParserInputState = 14 - PARSER_IGNORE ParserInputState = 15 - PARSER_PUBLIC_LITERAL ParserInputState = 16 - PARSER_XML_DECL ParserInputState = 17 -) - -type ParserMode c.Int - -const ( - PARSE_UNKNOWN ParserMode = 0 - PARSE_DOM ParserMode = 1 - PARSE_SAX ParserMode = 2 - PARSE_PUSH_DOM ParserMode = 3 - PARSE_PUSH_SAX ParserMode = 4 - PARSE_READER ParserMode = 5 -) - -type X_xmlStartTag struct { - Unused [8]uint8 -} -type StartTag X_xmlStartTag - -type X_xmlParserNsData struct { - Unused [8]uint8 -} -type ParserNsData X_xmlParserNsData - -type X_xmlAttrHashBucket struct { - Unused [8]uint8 -} -type AttrHashBucket X_xmlAttrHashBucket - -// llgo:type C -type ResolveEntitySAXFunc func(c.Pointer, *Char, *Char) ParserInputPtr - -// llgo:type C -type InternalSubsetSAXFunc func(c.Pointer, *Char, *Char, *Char) - -// llgo:type C -type ExternalSubsetSAXFunc func(c.Pointer, *Char, *Char, *Char) - -// llgo:type C -type GetEntitySAXFunc func(c.Pointer, *Char) EntityPtr - -// llgo:type C -type GetParameterEntitySAXFunc func(c.Pointer, *Char) EntityPtr - -// llgo:type C -type EntityDeclSAXFunc func(c.Pointer, *Char, c.Int, *Char, *Char, *Char) - -// llgo:type C -type NotationDeclSAXFunc func(c.Pointer, *Char, *Char, *Char) - -// llgo:type C -type AttributeDeclSAXFunc func(c.Pointer, *Char, *Char, c.Int, c.Int, *Char, EnumerationPtr) - -// llgo:type C -type ElementDeclSAXFunc func(c.Pointer, *Char, c.Int, ElementContentPtr) - -// llgo:type C -type UnparsedEntityDeclSAXFunc func(c.Pointer, *Char, *Char, *Char, *Char) - -// llgo:type C -type SetDocumentLocatorSAXFunc func(c.Pointer, SAXLocatorPtr) - -// llgo:type C -type StartDocumentSAXFunc func(c.Pointer) - -// llgo:type C -type EndDocumentSAXFunc func(c.Pointer) - -// llgo:type C -type StartElementSAXFunc func(c.Pointer, *Char, **Char) - -// llgo:type C -type EndElementSAXFunc func(c.Pointer, *Char) - -// llgo:type C -type AttributeSAXFunc func(c.Pointer, *Char, *Char) - -// llgo:type C -type ReferenceSAXFunc func(c.Pointer, *Char) - -// llgo:type C -type CharactersSAXFunc func(c.Pointer, *Char, c.Int) - -// llgo:type C -type IgnorableWhitespaceSAXFunc func(c.Pointer, *Char, c.Int) - -// llgo:type C -type ProcessingInstructionSAXFunc func(c.Pointer, *Char, *Char) - -// llgo:type C -type CommentSAXFunc func(c.Pointer, *Char) - -// llgo:type C -type CdataBlockSAXFunc func(c.Pointer, *Char, c.Int) - -// llgo:type C -type WarningSAXFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type ErrorSAXFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type FatalErrorSAXFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type IsStandaloneSAXFunc func(c.Pointer) c.Int - -// llgo:type C -type HasInternalSubsetSAXFunc func(c.Pointer) c.Int - -// llgo:type C -type HasExternalSubsetSAXFunc func(c.Pointer) c.Int - -// llgo:type C -type StartElementNsSAX2Func func(c.Pointer, *Char, *Char, *Char, c.Int, **Char, c.Int, c.Int, **Char) - -// llgo:type C -type EndElementNsSAX2Func func(c.Pointer, *Char, *Char, *Char) - -type X_xmlSAXHandlerV1 struct { - InternalSubset InternalSubsetSAXFunc - IsStandalone IsStandaloneSAXFunc - HasInternalSubset HasInternalSubsetSAXFunc - HasExternalSubset HasExternalSubsetSAXFunc - ResolveEntity ResolveEntitySAXFunc - GetEntity GetEntitySAXFunc - EntityDecl EntityDeclSAXFunc - NotationDecl NotationDeclSAXFunc - AttributeDecl AttributeDeclSAXFunc - ElementDecl ElementDeclSAXFunc - UnparsedEntityDecl UnparsedEntityDeclSAXFunc - SetDocumentLocator SetDocumentLocatorSAXFunc - StartDocument StartDocumentSAXFunc - EndDocument EndDocumentSAXFunc - StartElement StartElementSAXFunc - EndElement EndElementSAXFunc - Reference ReferenceSAXFunc - Characters CharactersSAXFunc - IgnorableWhitespace IgnorableWhitespaceSAXFunc - ProcessingInstruction ProcessingInstructionSAXFunc - Comment CommentSAXFunc - Warning WarningSAXFunc - Error ErrorSAXFunc - FatalError FatalErrorSAXFunc - GetParameterEntity GetParameterEntitySAXFunc - CdataBlock CdataBlockSAXFunc - ExternalSubset ExternalSubsetSAXFunc - Initialized c.Uint -} -type SAXHandlerV1 X_xmlSAXHandlerV1 -type SAXHandlerV1Ptr *SAXHandlerV1 - -// llgo:type C -type ExternalEntityLoader func(*c.Char, *c.Char, ParserCtxtPtr) ParserInputPtr - -/* backward compatibility */ -//go:linkname X__xmlParserVersion C.__xmlParserVersion -func X__xmlParserVersion() **c.Char - -//go:linkname X__oldXMLWDcompatibility C.__oldXMLWDcompatibility -func X__oldXMLWDcompatibility() *c.Int - -//go:linkname X__xmlParserDebugEntities C.__xmlParserDebugEntities -func X__xmlParserDebugEntities() *c.Int - -//go:linkname X__xmlDefaultSAXLocator C.__xmlDefaultSAXLocator -func X__xmlDefaultSAXLocator() *SAXLocator - -//go:linkname X__xmlDefaultSAXHandler C.__xmlDefaultSAXHandler -func X__xmlDefaultSAXHandler() *SAXHandlerV1 - -//go:linkname X__xmlDoValidityCheckingDefaultValue C.__xmlDoValidityCheckingDefaultValue -func X__xmlDoValidityCheckingDefaultValue() *c.Int - -//go:linkname X__xmlGetWarningsDefaultValue C.__xmlGetWarningsDefaultValue -func X__xmlGetWarningsDefaultValue() *c.Int - -//go:linkname X__xmlKeepBlanksDefaultValue C.__xmlKeepBlanksDefaultValue -func X__xmlKeepBlanksDefaultValue() *c.Int - -//go:linkname X__xmlLineNumbersDefaultValue C.__xmlLineNumbersDefaultValue -func X__xmlLineNumbersDefaultValue() *c.Int - -//go:linkname X__xmlLoadExtDtdDefaultValue C.__xmlLoadExtDtdDefaultValue -func X__xmlLoadExtDtdDefaultValue() *c.Int - -//go:linkname X__xmlPedanticParserDefaultValue C.__xmlPedanticParserDefaultValue -func X__xmlPedanticParserDefaultValue() *c.Int - -//go:linkname X__xmlSubstituteEntitiesDefaultValue C.__xmlSubstituteEntitiesDefaultValue -func X__xmlSubstituteEntitiesDefaultValue() *c.Int - -//go:linkname X__xmlIndentTreeOutput C.__xmlIndentTreeOutput -func X__xmlIndentTreeOutput() *c.Int - -//go:linkname X__xmlTreeIndentString C.__xmlTreeIndentString -func X__xmlTreeIndentString() **c.Char - -//go:linkname X__xmlSaveNoEmptyTags C.__xmlSaveNoEmptyTags -func X__xmlSaveNoEmptyTags() *c.Int - -/* - * Init/Cleanup - */ -//go:linkname InitParser C.xmlInitParser -func InitParser() - -//go:linkname CleanupParser C.xmlCleanupParser -func CleanupParser() - -//go:linkname InitGlobals C.xmlInitGlobals -func InitGlobals() - -//go:linkname CleanupGlobals C.xmlCleanupGlobals -func CleanupGlobals() - -/* - * Input functions - */ -//go:linkname ParserInputRead C.xmlParserInputRead -func ParserInputRead(in ParserInputPtr, len c.Int) c.Int - -//go:linkname ParserInputGrow C.xmlParserInputGrow -func ParserInputGrow(in ParserInputPtr, len c.Int) c.Int - -/* - * Basic parsing Interfaces - */ -// llgo:link (*Char).ParseDoc C.xmlParseDoc -func (recv_ *Char) ParseDoc() DocPtr { - return nil -} - -//go:linkname ParseFile C.xmlParseFile -func ParseFile(filename *c.Char) DocPtr - -//go:linkname ParseMemory C.xmlParseMemory -func ParseMemory(buffer *c.Char, size c.Int) DocPtr - -//go:linkname SubstituteEntitiesDefault C.xmlSubstituteEntitiesDefault -func SubstituteEntitiesDefault(val c.Int) c.Int - -//go:linkname ThrDefSubstituteEntitiesDefaultValue C.xmlThrDefSubstituteEntitiesDefaultValue -func ThrDefSubstituteEntitiesDefaultValue(v c.Int) c.Int - -//go:linkname KeepBlanksDefault C.xmlKeepBlanksDefault -func KeepBlanksDefault(val c.Int) c.Int - -//go:linkname ThrDefKeepBlanksDefaultValue C.xmlThrDefKeepBlanksDefaultValue -func ThrDefKeepBlanksDefaultValue(v c.Int) c.Int - -//go:linkname StopParser C.xmlStopParser -func StopParser(ctxt ParserCtxtPtr) - -//go:linkname PedanticParserDefault C.xmlPedanticParserDefault -func PedanticParserDefault(val c.Int) c.Int - -//go:linkname ThrDefPedanticParserDefaultValue C.xmlThrDefPedanticParserDefaultValue -func ThrDefPedanticParserDefaultValue(v c.Int) c.Int - -//go:linkname LineNumbersDefault C.xmlLineNumbersDefault -func LineNumbersDefault(val c.Int) c.Int - -//go:linkname ThrDefLineNumbersDefaultValue C.xmlThrDefLineNumbersDefaultValue -func ThrDefLineNumbersDefaultValue(v c.Int) c.Int - -//go:linkname ThrDefDoValidityCheckingDefaultValue C.xmlThrDefDoValidityCheckingDefaultValue -func ThrDefDoValidityCheckingDefaultValue(v c.Int) c.Int - -//go:linkname ThrDefGetWarningsDefaultValue C.xmlThrDefGetWarningsDefaultValue -func ThrDefGetWarningsDefaultValue(v c.Int) c.Int - -//go:linkname ThrDefLoadExtDtdDefaultValue C.xmlThrDefLoadExtDtdDefaultValue -func ThrDefLoadExtDtdDefaultValue(v c.Int) c.Int - -//go:linkname ThrDefParserDebugEntities C.xmlThrDefParserDebugEntities -func ThrDefParserDebugEntities(v c.Int) c.Int - -/* - * Recovery mode - */ -// llgo:link (*Char).RecoverDoc C.xmlRecoverDoc -func (recv_ *Char) RecoverDoc() DocPtr { - return nil -} - -//go:linkname RecoverMemory C.xmlRecoverMemory -func RecoverMemory(buffer *c.Char, size c.Int) DocPtr - -//go:linkname RecoverFile C.xmlRecoverFile -func RecoverFile(filename *c.Char) DocPtr - -/* - * Less common routines and SAX interfaces - */ -//go:linkname ParseDocument C.xmlParseDocument -func ParseDocument(ctxt ParserCtxtPtr) c.Int - -//go:linkname ParseExtParsedEnt C.xmlParseExtParsedEnt -func ParseExtParsedEnt(ctxt ParserCtxtPtr) c.Int - -//go:linkname SAXUserParseFile C.xmlSAXUserParseFile -func SAXUserParseFile(sax SAXHandlerPtr, user_data c.Pointer, filename *c.Char) c.Int - -//go:linkname SAXUserParseMemory C.xmlSAXUserParseMemory -func SAXUserParseMemory(sax SAXHandlerPtr, user_data c.Pointer, buffer *c.Char, size c.Int) c.Int - -//go:linkname SAXParseDoc C.xmlSAXParseDoc -func SAXParseDoc(sax SAXHandlerPtr, cur *Char, recovery c.Int) DocPtr - -//go:linkname SAXParseMemory C.xmlSAXParseMemory -func SAXParseMemory(sax SAXHandlerPtr, buffer *c.Char, size c.Int, recovery c.Int) DocPtr - -//go:linkname SAXParseMemoryWithData C.xmlSAXParseMemoryWithData -func SAXParseMemoryWithData(sax SAXHandlerPtr, buffer *c.Char, size c.Int, recovery c.Int, data c.Pointer) DocPtr - -//go:linkname SAXParseFile C.xmlSAXParseFile -func SAXParseFile(sax SAXHandlerPtr, filename *c.Char, recovery c.Int) DocPtr - -//go:linkname SAXParseFileWithData C.xmlSAXParseFileWithData -func SAXParseFileWithData(sax SAXHandlerPtr, filename *c.Char, recovery c.Int, data c.Pointer) DocPtr - -//go:linkname SAXParseEntity C.xmlSAXParseEntity -func SAXParseEntity(sax SAXHandlerPtr, filename *c.Char) DocPtr - -//go:linkname ParseEntity C.xmlParseEntity -func ParseEntity(filename *c.Char) DocPtr - -//go:linkname SAXParseDTD C.xmlSAXParseDTD -func SAXParseDTD(sax SAXHandlerPtr, ExternalID *Char, SystemID *Char) DtdPtr - -// llgo:link (*Char).ParseDTD C.xmlParseDTD -func (recv_ *Char) ParseDTD(SystemID *Char) DtdPtr { - return nil -} - -//go:linkname IOParseDTD C.xmlIOParseDTD -func IOParseDTD(sax SAXHandlerPtr, input ParserInputBufferPtr, enc CharEncoding) DtdPtr - -//go:linkname ParseBalancedChunkMemory C.xmlParseBalancedChunkMemory -func ParseBalancedChunkMemory(doc DocPtr, sax SAXHandlerPtr, user_data c.Pointer, depth c.Int, string *Char, lst *NodePtr) c.Int - -//go:linkname ParseInNodeContext C.xmlParseInNodeContext -func ParseInNodeContext(node NodePtr, data *c.Char, datalen c.Int, options c.Int, lst *NodePtr) ParserErrors - -//go:linkname ParseBalancedChunkMemoryRecover C.xmlParseBalancedChunkMemoryRecover -func ParseBalancedChunkMemoryRecover(doc DocPtr, sax SAXHandlerPtr, user_data c.Pointer, depth c.Int, string *Char, lst *NodePtr, recover c.Int) c.Int - -//go:linkname ParseExternalEntity C.xmlParseExternalEntity -func ParseExternalEntity(doc DocPtr, sax SAXHandlerPtr, user_data c.Pointer, depth c.Int, URL *Char, ID *Char, lst *NodePtr) c.Int - -//go:linkname ParseCtxtExternalEntity C.xmlParseCtxtExternalEntity -func ParseCtxtExternalEntity(ctx ParserCtxtPtr, URL *Char, ID *Char, lst *NodePtr) c.Int - -/* - * Parser contexts handling. - */ -//go:linkname NewParserCtxt C.xmlNewParserCtxt -func NewParserCtxt() ParserCtxtPtr - -// llgo:link (*SAXHandler).NewSAXParserCtxt C.xmlNewSAXParserCtxt -func (recv_ *SAXHandler) NewSAXParserCtxt(userData c.Pointer) ParserCtxtPtr { - return nil -} - -//go:linkname InitParserCtxt C.xmlInitParserCtxt -func InitParserCtxt(ctxt ParserCtxtPtr) c.Int - -//go:linkname ClearParserCtxt C.xmlClearParserCtxt -func ClearParserCtxt(ctxt ParserCtxtPtr) - -//go:linkname FreeParserCtxt C.xmlFreeParserCtxt -func FreeParserCtxt(ctxt ParserCtxtPtr) - -//go:linkname SetupParserForBuffer C.xmlSetupParserForBuffer -func SetupParserForBuffer(ctxt ParserCtxtPtr, buffer *Char, filename *c.Char) - -// llgo:link (*Char).CreateDocParserCtxt C.xmlCreateDocParserCtxt -func (recv_ *Char) CreateDocParserCtxt() ParserCtxtPtr { - return nil -} - -/* - * Reading/setting optional parsing features. - */ -//go:linkname GetFeaturesList C.xmlGetFeaturesList -func GetFeaturesList(len *c.Int, result **c.Char) c.Int - -//go:linkname GetFeature C.xmlGetFeature -func GetFeature(ctxt ParserCtxtPtr, name *c.Char, result c.Pointer) c.Int - -//go:linkname SetFeature C.xmlSetFeature -func SetFeature(ctxt ParserCtxtPtr, name *c.Char, value c.Pointer) c.Int - -/* - * Interfaces for the Push mode. - */ -//go:linkname CreatePushParserCtxt C.xmlCreatePushParserCtxt -func CreatePushParserCtxt(sax SAXHandlerPtr, user_data c.Pointer, chunk *c.Char, size c.Int, filename *c.Char) ParserCtxtPtr - -//go:linkname ParseChunk C.xmlParseChunk -func ParseChunk(ctxt ParserCtxtPtr, chunk *c.Char, size c.Int, terminate c.Int) c.Int - -/* - * Special I/O mode. - */ -//go:linkname CreateIOParserCtxt C.xmlCreateIOParserCtxt -func CreateIOParserCtxt(sax SAXHandlerPtr, user_data c.Pointer, ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, enc CharEncoding) ParserCtxtPtr - -//go:linkname NewIOInputStream C.xmlNewIOInputStream -func NewIOInputStream(ctxt ParserCtxtPtr, input ParserInputBufferPtr, enc CharEncoding) ParserInputPtr - -/* - * Node infos. - */ -//go:linkname ParserFindNodeInfo C.xmlParserFindNodeInfo -func ParserFindNodeInfo(ctxt ParserCtxtPtr, node NodePtr) *ParserNodeInfo - -//go:linkname InitNodeInfoSeq C.xmlInitNodeInfoSeq -func InitNodeInfoSeq(seq ParserNodeInfoSeqPtr) - -//go:linkname ClearNodeInfoSeq C.xmlClearNodeInfoSeq -func ClearNodeInfoSeq(seq ParserNodeInfoSeqPtr) - -//go:linkname ParserFindNodeInfoIndex C.xmlParserFindNodeInfoIndex -func ParserFindNodeInfoIndex(seq ParserNodeInfoSeqPtr, node NodePtr) c.Ulong - -//go:linkname ParserAddNodeInfo C.xmlParserAddNodeInfo -func ParserAddNodeInfo(ctxt ParserCtxtPtr, info ParserNodeInfoPtr) - -/* - * External entities handling actually implemented in xmlIO. - */ -//go:linkname SetExternalEntityLoader C.xmlSetExternalEntityLoader -func SetExternalEntityLoader(f ExternalEntityLoader) - -//go:linkname GetExternalEntityLoader C.xmlGetExternalEntityLoader -func GetExternalEntityLoader() ExternalEntityLoader - -//go:linkname LoadExternalEntity C.xmlLoadExternalEntity -func LoadExternalEntity(URL *c.Char, ID *c.Char, ctxt ParserCtxtPtr) ParserInputPtr - -/* - * Index lookup, actually implemented in the encoding module - */ -//go:linkname ByteConsumed C.xmlByteConsumed -func ByteConsumed(ctxt ParserCtxtPtr) c.Long - -type ParserOption c.Int - -const ( - PARSE_RECOVER ParserOption = 1 - PARSE_NOENT ParserOption = 2 - PARSE_DTDLOAD ParserOption = 4 - PARSE_DTDATTR ParserOption = 8 - PARSE_DTDVALID ParserOption = 16 - PARSE_NOERROR ParserOption = 32 - PARSE_NOWARNING ParserOption = 64 - PARSE_PEDANTIC ParserOption = 128 - PARSE_NOBLANKS ParserOption = 256 - PARSE_SAX1 ParserOption = 512 - PARSE_XINCLUDE ParserOption = 1024 - PARSE_NONET ParserOption = 2048 - PARSE_NODICT ParserOption = 4096 - PARSE_NSCLEAN ParserOption = 8192 - PARSE_NOCDATA ParserOption = 16384 - PARSE_NOXINCNODE ParserOption = 32768 - PARSE_COMPACT ParserOption = 65536 - PARSE_OLD10 ParserOption = 131072 - PARSE_NOBASEFIX ParserOption = 262144 - PARSE_HUGE ParserOption = 524288 - PARSE_OLDSAX ParserOption = 1048576 - PARSE_IGNORE_ENC ParserOption = 2097152 - PARSE_BIG_LINES ParserOption = 4194304 - PARSE_NO_XXE ParserOption = 8388608 -) - -//go:linkname CtxtReset C.xmlCtxtReset -func CtxtReset(ctxt ParserCtxtPtr) - -//go:linkname CtxtResetPush C.xmlCtxtResetPush -func CtxtResetPush(ctxt ParserCtxtPtr, chunk *c.Char, size c.Int, filename *c.Char, encoding *c.Char) c.Int - -//go:linkname CtxtSetOptions C.xmlCtxtSetOptions -func CtxtSetOptions(ctxt ParserCtxtPtr, options c.Int) c.Int - -//go:linkname CtxtUseOptions C.xmlCtxtUseOptions -func CtxtUseOptions(ctxt ParserCtxtPtr, options c.Int) c.Int - -//go:linkname CtxtSetErrorHandler C.xmlCtxtSetErrorHandler -func CtxtSetErrorHandler(ctxt ParserCtxtPtr, handler StructuredErrorFunc, data c.Pointer) - -//go:linkname CtxtSetMaxAmplification C.xmlCtxtSetMaxAmplification -func CtxtSetMaxAmplification(ctxt ParserCtxtPtr, maxAmpl c.Uint) - -// llgo:link (*Char).ReadDoc C.xmlReadDoc -func (recv_ *Char) ReadDoc(URL *c.Char, encoding *c.Char, options c.Int) DocPtr { - return nil -} - -//go:linkname ReadFile C.xmlReadFile -func ReadFile(URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname ReadMemory C.xmlReadMemory -func ReadMemory(buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname ReadFd C.xmlReadFd -func ReadFd(fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname ReadIO C.xmlReadIO -func ReadIO(ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname CtxtParseDocument C.xmlCtxtParseDocument -func CtxtParseDocument(ctxt ParserCtxtPtr, input ParserInputPtr) DocPtr - -//go:linkname CtxtReadDoc C.xmlCtxtReadDoc -func CtxtReadDoc(ctxt ParserCtxtPtr, cur *Char, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname CtxtReadFile C.xmlCtxtReadFile -func CtxtReadFile(ctxt ParserCtxtPtr, filename *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname CtxtReadMemory C.xmlCtxtReadMemory -func CtxtReadMemory(ctxt ParserCtxtPtr, buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname CtxtReadFd C.xmlCtxtReadFd -func CtxtReadFd(ctxt ParserCtxtPtr, fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -//go:linkname CtxtReadIO C.xmlCtxtReadIO -func CtxtReadIO(ctxt ParserCtxtPtr, ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) DocPtr - -type Feature c.Int - -const ( - WITH_THREAD Feature = 1 - WITH_TREE Feature = 2 - WITH_OUTPUT Feature = 3 - WITH_PUSH Feature = 4 - WITH_READER Feature = 5 - WITH_PATTERN Feature = 6 - WITH_WRITER Feature = 7 - WITH_SAX1 Feature = 8 - WITH_FTP Feature = 9 - WITH_HTTP Feature = 10 - WITH_VALID Feature = 11 - WITH_HTML Feature = 12 - WITH_LEGACY Feature = 13 - WITH_C14N Feature = 14 - WITH_CATALOG Feature = 15 - WITH_XPATH Feature = 16 - WITH_XPTR Feature = 17 - WITH_XINCLUDE Feature = 18 - WITH_ICONV Feature = 19 - WITH_ISO8859X Feature = 20 - WITH_UNICODE Feature = 21 - WITH_REGEXP Feature = 22 - WITH_AUTOMATA Feature = 23 - WITH_EXPR Feature = 24 - WITH_SCHEMAS Feature = 25 - WITH_SCHEMATRON Feature = 26 - WITH_MODULES Feature = 27 - WITH_DEBUG Feature = 28 - WITH_DEBUG_MEM Feature = 29 - WITH_DEBUG_RUN Feature = 30 - WITH_ZLIB Feature = 31 - WITH_ICU Feature = 32 - WITH_LZMA Feature = 33 - WITH_NONE Feature = 99999 -) - -// llgo:link Feature.HasFeature C.xmlHasFeature -func (recv_ Feature) HasFeature() c.Int { - return 0 -} diff --git a/libxml2/parserInternals.go b/libxml2/parserInternals.go deleted file mode 100644 index fa0d184b..00000000 --- a/libxml2/parserInternals.go +++ /dev/null @@ -1,321 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const MAX_TEXT_LENGTH = 10000000 -const MAX_HUGE_LENGTH = 1000000000 -const MAX_NAME_LENGTH = 50000 -const MAX_DICTIONARY_LIMIT = 10000000 -const MAX_LOOKUP_LIMIT = 10000000 -const MAX_NAMELEN = 100 -const INPUT_CHUNK = 250 -const SUBSTITUTE_NONE = 0 -const SUBSTITUTE_REF = 1 -const SUBSTITUTE_PEREF = 2 -const SUBSTITUTE_BOTH = 3 - -/* - * Function to finish the work of the macros where needed. - */ -//go:linkname IsLetter C.xmlIsLetter -func IsLetter(c c.Int) c.Int - -/** - * Parser context. - */ -//go:linkname CreateFileParserCtxt C.xmlCreateFileParserCtxt -func CreateFileParserCtxt(filename *c.Char) ParserCtxtPtr - -//go:linkname CreateURLParserCtxt C.xmlCreateURLParserCtxt -func CreateURLParserCtxt(filename *c.Char, options c.Int) ParserCtxtPtr - -//go:linkname CreateMemoryParserCtxt C.xmlCreateMemoryParserCtxt -func CreateMemoryParserCtxt(buffer *c.Char, size c.Int) ParserCtxtPtr - -// llgo:link (*Char).CreateEntityParserCtxt C.xmlCreateEntityParserCtxt -func (recv_ *Char) CreateEntityParserCtxt(ID *Char, base *Char) ParserCtxtPtr { - return nil -} - -//go:linkname CtxtErrMemory C.xmlCtxtErrMemory -func CtxtErrMemory(ctxt ParserCtxtPtr) - -//go:linkname SwitchEncoding C.xmlSwitchEncoding -func SwitchEncoding(ctxt ParserCtxtPtr, enc CharEncoding) c.Int - -//go:linkname SwitchEncodingName C.xmlSwitchEncodingName -func SwitchEncodingName(ctxt ParserCtxtPtr, encoding *c.Char) c.Int - -//go:linkname SwitchToEncoding C.xmlSwitchToEncoding -func SwitchToEncoding(ctxt ParserCtxtPtr, handler CharEncodingHandlerPtr) c.Int - -//go:linkname SwitchInputEncoding C.xmlSwitchInputEncoding -func SwitchInputEncoding(ctxt ParserCtxtPtr, input ParserInputPtr, handler CharEncodingHandlerPtr) c.Int - -/** - * Input Streams. - */ -//go:linkname NewStringInputStream C.xmlNewStringInputStream -func NewStringInputStream(ctxt ParserCtxtPtr, buffer *Char) ParserInputPtr - -//go:linkname NewEntityInputStream C.xmlNewEntityInputStream -func NewEntityInputStream(ctxt ParserCtxtPtr, entity EntityPtr) ParserInputPtr - -//go:linkname PushInput C.xmlPushInput -func PushInput(ctxt ParserCtxtPtr, input ParserInputPtr) c.Int - -//go:linkname PopInput C.xmlPopInput -func PopInput(ctxt ParserCtxtPtr) Char - -//go:linkname FreeInputStream C.xmlFreeInputStream -func FreeInputStream(input ParserInputPtr) - -//go:linkname NewInputFromFile C.xmlNewInputFromFile -func NewInputFromFile(ctxt ParserCtxtPtr, filename *c.Char) ParserInputPtr - -//go:linkname NewInputStream C.xmlNewInputStream -func NewInputStream(ctxt ParserCtxtPtr) ParserInputPtr - -/** - * Namespaces. - */ -//go:linkname SplitQName C.xmlSplitQName -func SplitQName(ctxt ParserCtxtPtr, name *Char, prefix **Char) *Char - -/** - * Generic production rules. - */ -//go:linkname ParseName C.xmlParseName -func ParseName(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseNmtoken C.xmlParseNmtoken -func ParseNmtoken(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseEntityValue C.xmlParseEntityValue -func ParseEntityValue(ctxt ParserCtxtPtr, orig **Char) *Char - -//go:linkname ParseAttValue C.xmlParseAttValue -func ParseAttValue(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseSystemLiteral C.xmlParseSystemLiteral -func ParseSystemLiteral(ctxt ParserCtxtPtr) *Char - -//go:linkname ParsePubidLiteral C.xmlParsePubidLiteral -func ParsePubidLiteral(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseCharData C.xmlParseCharData -func ParseCharData(ctxt ParserCtxtPtr, cdata c.Int) - -//go:linkname ParseExternalID C.xmlParseExternalID -func ParseExternalID(ctxt ParserCtxtPtr, publicID **Char, strict c.Int) *Char - -//go:linkname ParseComment C.xmlParseComment -func ParseComment(ctxt ParserCtxtPtr) - -//go:linkname ParsePITarget C.xmlParsePITarget -func ParsePITarget(ctxt ParserCtxtPtr) *Char - -//go:linkname ParsePI C.xmlParsePI -func ParsePI(ctxt ParserCtxtPtr) - -//go:linkname ParseNotationDecl C.xmlParseNotationDecl -func ParseNotationDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseEntityDecl C.xmlParseEntityDecl -func ParseEntityDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseDefaultDecl C.xmlParseDefaultDecl -func ParseDefaultDecl(ctxt ParserCtxtPtr, value **Char) c.Int - -//go:linkname ParseNotationType C.xmlParseNotationType -func ParseNotationType(ctxt ParserCtxtPtr) EnumerationPtr - -//go:linkname ParseEnumerationType C.xmlParseEnumerationType -func ParseEnumerationType(ctxt ParserCtxtPtr) EnumerationPtr - -//go:linkname ParseEnumeratedType C.xmlParseEnumeratedType -func ParseEnumeratedType(ctxt ParserCtxtPtr, tree *EnumerationPtr) c.Int - -//go:linkname ParseAttributeType C.xmlParseAttributeType -func ParseAttributeType(ctxt ParserCtxtPtr, tree *EnumerationPtr) c.Int - -//go:linkname ParseAttributeListDecl C.xmlParseAttributeListDecl -func ParseAttributeListDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseElementMixedContentDecl C.xmlParseElementMixedContentDecl -func ParseElementMixedContentDecl(ctxt ParserCtxtPtr, inputchk c.Int) ElementContentPtr - -//go:linkname ParseElementChildrenContentDecl C.xmlParseElementChildrenContentDecl -func ParseElementChildrenContentDecl(ctxt ParserCtxtPtr, inputchk c.Int) ElementContentPtr - -//go:linkname ParseElementContentDecl C.xmlParseElementContentDecl -func ParseElementContentDecl(ctxt ParserCtxtPtr, name *Char, result *ElementContentPtr) c.Int - -//go:linkname ParseElementDecl C.xmlParseElementDecl -func ParseElementDecl(ctxt ParserCtxtPtr) c.Int - -//go:linkname ParseMarkupDecl C.xmlParseMarkupDecl -func ParseMarkupDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseCharRef C.xmlParseCharRef -func ParseCharRef(ctxt ParserCtxtPtr) c.Int - -//go:linkname ParseEntityRef C.xmlParseEntityRef -func ParseEntityRef(ctxt ParserCtxtPtr) EntityPtr - -//go:linkname ParseReference C.xmlParseReference -func ParseReference(ctxt ParserCtxtPtr) - -//go:linkname ParsePEReference C.xmlParsePEReference -func ParsePEReference(ctxt ParserCtxtPtr) - -//go:linkname ParseDocTypeDecl C.xmlParseDocTypeDecl -func ParseDocTypeDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseAttribute C.xmlParseAttribute -func ParseAttribute(ctxt ParserCtxtPtr, value **Char) *Char - -//go:linkname ParseStartTag C.xmlParseStartTag -func ParseStartTag(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseEndTag C.xmlParseEndTag -func ParseEndTag(ctxt ParserCtxtPtr) - -//go:linkname ParseCDSect C.xmlParseCDSect -func ParseCDSect(ctxt ParserCtxtPtr) - -//go:linkname ParseContent C.xmlParseContent -func ParseContent(ctxt ParserCtxtPtr) - -//go:linkname ParseElement C.xmlParseElement -func ParseElement(ctxt ParserCtxtPtr) - -//go:linkname ParseVersionNum C.xmlParseVersionNum -func ParseVersionNum(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseVersionInfo C.xmlParseVersionInfo -func ParseVersionInfo(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseEncName C.xmlParseEncName -func ParseEncName(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseEncodingDecl C.xmlParseEncodingDecl -func ParseEncodingDecl(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseSDDecl C.xmlParseSDDecl -func ParseSDDecl(ctxt ParserCtxtPtr) c.Int - -//go:linkname ParseXMLDecl C.xmlParseXMLDecl -func ParseXMLDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseTextDecl C.xmlParseTextDecl -func ParseTextDecl(ctxt ParserCtxtPtr) - -//go:linkname ParseMisc C.xmlParseMisc -func ParseMisc(ctxt ParserCtxtPtr) - -//go:linkname ParseExternalSubset C.xmlParseExternalSubset -func ParseExternalSubset(ctxt ParserCtxtPtr, ExternalID *Char, SystemID *Char) - -//go:linkname StringDecodeEntities C.xmlStringDecodeEntities -func StringDecodeEntities(ctxt ParserCtxtPtr, str *Char, what c.Int, end Char, end2 Char, end3 Char) *Char - -//go:linkname StringLenDecodeEntities C.xmlStringLenDecodeEntities -func StringLenDecodeEntities(ctxt ParserCtxtPtr, str *Char, len c.Int, what c.Int, end Char, end2 Char, end3 Char) *Char - -/* - * Generated by MACROS on top of parser.c c.f. PUSH_AND_POP. - */ -//go:linkname NodePush C.nodePush -func NodePush(ctxt ParserCtxtPtr, value NodePtr) c.Int - -//go:linkname NodePop C.nodePop -func NodePop(ctxt ParserCtxtPtr) NodePtr - -//go:linkname InputPush C.inputPush -func InputPush(ctxt ParserCtxtPtr, value ParserInputPtr) c.Int - -//go:linkname InputPop C.inputPop -func InputPop(ctxt ParserCtxtPtr) ParserInputPtr - -//go:linkname NamePop C.namePop -func NamePop(ctxt ParserCtxtPtr) *Char - -//go:linkname NamePush C.namePush -func NamePush(ctxt ParserCtxtPtr, value *Char) c.Int - -/* - * other commodities shared between parser.c and parserInternals. - */ -//go:linkname SkipBlankChars C.xmlSkipBlankChars -func SkipBlankChars(ctxt ParserCtxtPtr) c.Int - -//go:linkname StringCurrentChar C.xmlStringCurrentChar -func StringCurrentChar(ctxt ParserCtxtPtr, cur *Char, len *c.Int) c.Int - -//go:linkname ParserHandlePEReference C.xmlParserHandlePEReference -func ParserHandlePEReference(ctxt ParserCtxtPtr) - -// llgo:link (*Char).CheckLanguageID C.xmlCheckLanguageID -func (recv_ *Char) CheckLanguageID() c.Int { - return 0 -} - -/* - * Really core function shared with HTML parser. - */ -//go:linkname CurrentChar C.xmlCurrentChar -func CurrentChar(ctxt ParserCtxtPtr, len *c.Int) c.Int - -// llgo:link (*Char).CopyCharMultiByte C.xmlCopyCharMultiByte -func (recv_ *Char) CopyCharMultiByte(val c.Int) c.Int { - return 0 -} - -//go:linkname CopyChar C.xmlCopyChar -func CopyChar(len c.Int, out *Char, val c.Int) c.Int - -//go:linkname NextChar C.xmlNextChar -func NextChar(ctxt ParserCtxtPtr) - -//go:linkname ParserInputShrink C.xmlParserInputShrink -func ParserInputShrink(in ParserInputPtr) - -// llgo:type C -type EntityReferenceFunc func(EntityPtr, NodePtr, NodePtr) - -//go:linkname SetEntityReferenceFunc C.xmlSetEntityReferenceFunc -func SetEntityReferenceFunc(func_ EntityReferenceFunc) - -//go:linkname ParseQuotedString C.xmlParseQuotedString -func ParseQuotedString(ctxt ParserCtxtPtr) *Char - -//go:linkname ParseNamespace C.xmlParseNamespace -func ParseNamespace(ctxt ParserCtxtPtr) - -//go:linkname NamespaceParseNSDef C.xmlNamespaceParseNSDef -func NamespaceParseNSDef(ctxt ParserCtxtPtr) *Char - -//go:linkname ScanName C.xmlScanName -func ScanName(ctxt ParserCtxtPtr) *Char - -//go:linkname NamespaceParseNCName C.xmlNamespaceParseNCName -func NamespaceParseNCName(ctxt ParserCtxtPtr) *Char - -//go:linkname ParserHandleReference C.xmlParserHandleReference -func ParserHandleReference(ctxt ParserCtxtPtr) - -//go:linkname NamespaceParseQName C.xmlNamespaceParseQName -func NamespaceParseQName(ctxt ParserCtxtPtr, prefix **Char) *Char - -/** - * Entities - */ -//go:linkname DecodeEntities C.xmlDecodeEntities -func DecodeEntities(ctxt ParserCtxtPtr, len c.Int, what c.Int, end Char, end2 Char, end3 Char) *Char - -//go:linkname HandleEntity C.xmlHandleEntity -func HandleEntity(ctxt ParserCtxtPtr, entity EntityPtr) diff --git a/libxml2/pattern.go b/libxml2/pattern.go deleted file mode 100644 index e43848da..00000000 --- a/libxml2/pattern.go +++ /dev/null @@ -1,78 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlPattern struct { - Unused [8]uint8 -} -type Pattern X_xmlPattern -type PatternPtr *Pattern -type PatternFlags c.Int - -const ( - PATTERN_DEFAULT PatternFlags = 0 - PATTERN_XPATH PatternFlags = 1 - PATTERN_XSSEL PatternFlags = 2 - PATTERN_XSFIELD PatternFlags = 4 -) - -//go:linkname FreePattern C.xmlFreePattern -func FreePattern(comp PatternPtr) - -//go:linkname FreePatternList C.xmlFreePatternList -func FreePatternList(comp PatternPtr) - -// llgo:link (*Char).Patterncompile C.xmlPatterncompile -func (recv_ *Char) Patterncompile(dict *Dict, flags c.Int, namespaces **Char) PatternPtr { - return nil -} - -// llgo:link (*Char).PatternCompileSafe C.xmlPatternCompileSafe -func (recv_ *Char) PatternCompileSafe(dict *Dict, flags c.Int, namespaces **Char, patternOut *PatternPtr) c.Int { - return 0 -} - -//go:linkname PatternMatch C.xmlPatternMatch -func PatternMatch(comp PatternPtr, node NodePtr) c.Int - -type X_xmlStreamCtxt struct { - Unused [8]uint8 -} -type StreamCtxt X_xmlStreamCtxt -type StreamCtxtPtr *StreamCtxt - -//go:linkname PatternStreamable C.xmlPatternStreamable -func PatternStreamable(comp PatternPtr) c.Int - -//go:linkname PatternMaxDepth C.xmlPatternMaxDepth -func PatternMaxDepth(comp PatternPtr) c.Int - -//go:linkname PatternMinDepth C.xmlPatternMinDepth -func PatternMinDepth(comp PatternPtr) c.Int - -//go:linkname PatternFromRoot C.xmlPatternFromRoot -func PatternFromRoot(comp PatternPtr) c.Int - -//go:linkname PatternGetStreamCtxt C.xmlPatternGetStreamCtxt -func PatternGetStreamCtxt(comp PatternPtr) StreamCtxtPtr - -//go:linkname FreeStreamCtxt C.xmlFreeStreamCtxt -func FreeStreamCtxt(stream StreamCtxtPtr) - -//go:linkname StreamPushNode C.xmlStreamPushNode -func StreamPushNode(stream StreamCtxtPtr, name *Char, ns *Char, nodeType c.Int) c.Int - -//go:linkname StreamPush C.xmlStreamPush -func StreamPush(stream StreamCtxtPtr, name *Char, ns *Char) c.Int - -//go:linkname StreamPushAttr C.xmlStreamPushAttr -func StreamPushAttr(stream StreamCtxtPtr, name *Char, ns *Char) c.Int - -//go:linkname StreamPop C.xmlStreamPop -func StreamPop(stream StreamCtxtPtr) c.Int - -//go:linkname StreamWantsAnyNode C.xmlStreamWantsAnyNode -func StreamWantsAnyNode(stream StreamCtxtPtr) c.Int diff --git a/libxml2/relaxng.go b/libxml2/relaxng.go deleted file mode 100644 index 4b04c38c..00000000 --- a/libxml2/relaxng.go +++ /dev/null @@ -1,163 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlRelaxNG struct { - Unused [8]uint8 -} -type RelaxNG X_xmlRelaxNG -type RelaxNGPtr *RelaxNG - -// llgo:type C -type RelaxNGValidityErrorFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type RelaxNGValidityWarningFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -type X_xmlRelaxNGParserCtxt struct { - Unused [8]uint8 -} -type RelaxNGParserCtxt X_xmlRelaxNGParserCtxt -type RelaxNGParserCtxtPtr *RelaxNGParserCtxt - -type X_xmlRelaxNGValidCtxt struct { - Unused [8]uint8 -} -type RelaxNGValidCtxt X_xmlRelaxNGValidCtxt -type RelaxNGValidCtxtPtr *RelaxNGValidCtxt -type RelaxNGValidErr c.Int - -const ( - RELAXNG_OK RelaxNGValidErr = 0 - RELAXNG_ERR_MEMORY RelaxNGValidErr = 1 - RELAXNG_ERR_TYPE RelaxNGValidErr = 2 - RELAXNG_ERR_TYPEVAL RelaxNGValidErr = 3 - RELAXNG_ERR_DUPID RelaxNGValidErr = 4 - RELAXNG_ERR_TYPECMP RelaxNGValidErr = 5 - RELAXNG_ERR_NOSTATE RelaxNGValidErr = 6 - RELAXNG_ERR_NODEFINE RelaxNGValidErr = 7 - RELAXNG_ERR_LISTEXTRA RelaxNGValidErr = 8 - RELAXNG_ERR_LISTEMPTY RelaxNGValidErr = 9 - RELAXNG_ERR_INTERNODATA RelaxNGValidErr = 10 - RELAXNG_ERR_INTERSEQ RelaxNGValidErr = 11 - RELAXNG_ERR_INTEREXTRA RelaxNGValidErr = 12 - RELAXNG_ERR_ELEMNAME RelaxNGValidErr = 13 - RELAXNG_ERR_ATTRNAME RelaxNGValidErr = 14 - RELAXNG_ERR_ELEMNONS RelaxNGValidErr = 15 - RELAXNG_ERR_ATTRNONS RelaxNGValidErr = 16 - RELAXNG_ERR_ELEMWRONGNS RelaxNGValidErr = 17 - RELAXNG_ERR_ATTRWRONGNS RelaxNGValidErr = 18 - RELAXNG_ERR_ELEMEXTRANS RelaxNGValidErr = 19 - RELAXNG_ERR_ATTREXTRANS RelaxNGValidErr = 20 - RELAXNG_ERR_ELEMNOTEMPTY RelaxNGValidErr = 21 - RELAXNG_ERR_NOELEM RelaxNGValidErr = 22 - RELAXNG_ERR_NOTELEM RelaxNGValidErr = 23 - RELAXNG_ERR_ATTRVALID RelaxNGValidErr = 24 - RELAXNG_ERR_CONTENTVALID RelaxNGValidErr = 25 - RELAXNG_ERR_EXTRACONTENT RelaxNGValidErr = 26 - RELAXNG_ERR_INVALIDATTR RelaxNGValidErr = 27 - RELAXNG_ERR_DATAELEM RelaxNGValidErr = 28 - RELAXNG_ERR_VALELEM RelaxNGValidErr = 29 - RELAXNG_ERR_LISTELEM RelaxNGValidErr = 30 - RELAXNG_ERR_DATATYPE RelaxNGValidErr = 31 - RELAXNG_ERR_VALUE RelaxNGValidErr = 32 - RELAXNG_ERR_LIST RelaxNGValidErr = 33 - RELAXNG_ERR_NOGRAMMAR RelaxNGValidErr = 34 - RELAXNG_ERR_EXTRADATA RelaxNGValidErr = 35 - RELAXNG_ERR_LACKDATA RelaxNGValidErr = 36 - RELAXNG_ERR_INTERNAL RelaxNGValidErr = 37 - RELAXNG_ERR_ELEMWRONG RelaxNGValidErr = 38 - RELAXNG_ERR_TEXTWRONG RelaxNGValidErr = 39 -) - -type RelaxNGParserFlag c.Int - -const ( - RELAXNGP_NONE RelaxNGParserFlag = 0 - RELAXNGP_FREE_DOC RelaxNGParserFlag = 1 - RELAXNGP_CRNG RelaxNGParserFlag = 2 -) - -//go:linkname RelaxNGInitTypes C.xmlRelaxNGInitTypes -func RelaxNGInitTypes() c.Int - -//go:linkname RelaxNGCleanupTypes C.xmlRelaxNGCleanupTypes -func RelaxNGCleanupTypes() - -/* - * Interfaces for parsing. - */ -//go:linkname RelaxNGNewParserCtxt C.xmlRelaxNGNewParserCtxt -func RelaxNGNewParserCtxt(URL *c.Char) RelaxNGParserCtxtPtr - -//go:linkname RelaxNGNewMemParserCtxt C.xmlRelaxNGNewMemParserCtxt -func RelaxNGNewMemParserCtxt(buffer *c.Char, size c.Int) RelaxNGParserCtxtPtr - -//go:linkname RelaxNGNewDocParserCtxt C.xmlRelaxNGNewDocParserCtxt -func RelaxNGNewDocParserCtxt(doc DocPtr) RelaxNGParserCtxtPtr - -//go:linkname RelaxParserSetFlag C.xmlRelaxParserSetFlag -func RelaxParserSetFlag(ctxt RelaxNGParserCtxtPtr, flag c.Int) c.Int - -//go:linkname RelaxNGFreeParserCtxt C.xmlRelaxNGFreeParserCtxt -func RelaxNGFreeParserCtxt(ctxt RelaxNGParserCtxtPtr) - -//go:linkname RelaxNGSetParserErrors C.xmlRelaxNGSetParserErrors -func RelaxNGSetParserErrors(ctxt RelaxNGParserCtxtPtr, err RelaxNGValidityErrorFunc, warn RelaxNGValidityWarningFunc, ctx c.Pointer) - -//go:linkname RelaxNGGetParserErrors C.xmlRelaxNGGetParserErrors -func RelaxNGGetParserErrors(ctxt RelaxNGParserCtxtPtr, err RelaxNGValidityErrorFunc, warn RelaxNGValidityWarningFunc, ctx *c.Pointer) c.Int - -//go:linkname RelaxNGSetParserStructuredErrors C.xmlRelaxNGSetParserStructuredErrors -func RelaxNGSetParserStructuredErrors(ctxt RelaxNGParserCtxtPtr, serror StructuredErrorFunc, ctx c.Pointer) - -//go:linkname RelaxNGParse C.xmlRelaxNGParse -func RelaxNGParse(ctxt RelaxNGParserCtxtPtr) RelaxNGPtr - -//go:linkname RelaxNGFree C.xmlRelaxNGFree -func RelaxNGFree(schema RelaxNGPtr) - -//go:linkname RelaxNGDump C.xmlRelaxNGDump -func RelaxNGDump(output *c.FILE, schema RelaxNGPtr) - -//go:linkname RelaxNGDumpTree C.xmlRelaxNGDumpTree -func RelaxNGDumpTree(output *c.FILE, schema RelaxNGPtr) - -/* - * Interfaces for validating - */ -//go:linkname RelaxNGSetValidErrors C.xmlRelaxNGSetValidErrors -func RelaxNGSetValidErrors(ctxt RelaxNGValidCtxtPtr, err RelaxNGValidityErrorFunc, warn RelaxNGValidityWarningFunc, ctx c.Pointer) - -//go:linkname RelaxNGGetValidErrors C.xmlRelaxNGGetValidErrors -func RelaxNGGetValidErrors(ctxt RelaxNGValidCtxtPtr, err RelaxNGValidityErrorFunc, warn RelaxNGValidityWarningFunc, ctx *c.Pointer) c.Int - -//go:linkname RelaxNGSetValidStructuredErrors C.xmlRelaxNGSetValidStructuredErrors -func RelaxNGSetValidStructuredErrors(ctxt RelaxNGValidCtxtPtr, serror StructuredErrorFunc, ctx c.Pointer) - -//go:linkname RelaxNGNewValidCtxt C.xmlRelaxNGNewValidCtxt -func RelaxNGNewValidCtxt(schema RelaxNGPtr) RelaxNGValidCtxtPtr - -//go:linkname RelaxNGFreeValidCtxt C.xmlRelaxNGFreeValidCtxt -func RelaxNGFreeValidCtxt(ctxt RelaxNGValidCtxtPtr) - -//go:linkname RelaxNGValidateDoc C.xmlRelaxNGValidateDoc -func RelaxNGValidateDoc(ctxt RelaxNGValidCtxtPtr, doc DocPtr) c.Int - -/* - * Interfaces for progressive validation when possible - */ -//go:linkname RelaxNGValidatePushElement C.xmlRelaxNGValidatePushElement -func RelaxNGValidatePushElement(ctxt RelaxNGValidCtxtPtr, doc DocPtr, elem NodePtr) c.Int - -//go:linkname RelaxNGValidatePushCData C.xmlRelaxNGValidatePushCData -func RelaxNGValidatePushCData(ctxt RelaxNGValidCtxtPtr, data *Char, len c.Int) c.Int - -//go:linkname RelaxNGValidatePopElement C.xmlRelaxNGValidatePopElement -func RelaxNGValidatePopElement(ctxt RelaxNGValidCtxtPtr, doc DocPtr, elem NodePtr) c.Int - -//go:linkname RelaxNGValidateFullElement C.xmlRelaxNGValidateFullElement -func RelaxNGValidateFullElement(ctxt RelaxNGValidCtxtPtr, doc DocPtr, elem NodePtr) c.Int diff --git a/libxml2/schemasInternals.go b/libxml2/schemasInternals.go deleted file mode 100644 index 0a62c676..00000000 --- a/libxml2/schemasInternals.go +++ /dev/null @@ -1,358 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const SCHEMAS_ANYATTR_SKIP = 1 -const SCHEMAS_ANYATTR_LAX = 2 -const SCHEMAS_ANYATTR_STRICT = 3 -const SCHEMAS_ANY_SKIP = 1 -const SCHEMAS_ANY_LAX = 2 -const SCHEMAS_ANY_STRICT = 3 -const SCHEMAS_ATTR_USE_PROHIBITED = 0 -const SCHEMAS_ATTR_USE_REQUIRED = 1 -const SCHEMAS_ATTR_USE_OPTIONAL = 2 -const SCHEMAS_FACET_UNKNOWN = 0 -const SCHEMAS_FACET_PRESERVE = 1 -const SCHEMAS_FACET_REPLACE = 2 -const SCHEMAS_FACET_COLLAPSE = 3 - -type SchemaValType c.Int - -const ( - SCHEMAS_UNKNOWN SchemaValType = 0 - SCHEMAS_STRING SchemaValType = 1 - SCHEMAS_NORMSTRING SchemaValType = 2 - SCHEMAS_DECIMAL SchemaValType = 3 - SCHEMAS_TIME SchemaValType = 4 - SCHEMAS_GDAY SchemaValType = 5 - SCHEMAS_GMONTH SchemaValType = 6 - SCHEMAS_GMONTHDAY SchemaValType = 7 - SCHEMAS_GYEAR SchemaValType = 8 - SCHEMAS_GYEARMONTH SchemaValType = 9 - SCHEMAS_DATE SchemaValType = 10 - SCHEMAS_DATETIME SchemaValType = 11 - SCHEMAS_DURATION SchemaValType = 12 - SCHEMAS_FLOAT SchemaValType = 13 - SCHEMAS_DOUBLE SchemaValType = 14 - SCHEMAS_BOOLEAN SchemaValType = 15 - SCHEMAS_TOKEN SchemaValType = 16 - SCHEMAS_LANGUAGE SchemaValType = 17 - SCHEMAS_NMTOKEN SchemaValType = 18 - SCHEMAS_NMTOKENS SchemaValType = 19 - SCHEMAS_NAME SchemaValType = 20 - SCHEMAS_QNAME SchemaValType = 21 - SCHEMAS_NCNAME SchemaValType = 22 - SCHEMAS_ID SchemaValType = 23 - SCHEMAS_IDREF SchemaValType = 24 - SCHEMAS_IDREFS SchemaValType = 25 - SCHEMAS_ENTITY SchemaValType = 26 - SCHEMAS_ENTITIES SchemaValType = 27 - SCHEMAS_NOTATION SchemaValType = 28 - SCHEMAS_ANYURI SchemaValType = 29 - SCHEMAS_INTEGER SchemaValType = 30 - SCHEMAS_NPINTEGER SchemaValType = 31 - SCHEMAS_NINTEGER SchemaValType = 32 - SCHEMAS_NNINTEGER SchemaValType = 33 - SCHEMAS_PINTEGER SchemaValType = 34 - SCHEMAS_INT SchemaValType = 35 - SCHEMAS_UINT SchemaValType = 36 - SCHEMAS_LONG SchemaValType = 37 - SCHEMAS_ULONG SchemaValType = 38 - SCHEMAS_SHORT SchemaValType = 39 - SCHEMAS_USHORT SchemaValType = 40 - SCHEMAS_BYTE SchemaValType = 41 - SCHEMAS_UBYTE SchemaValType = 42 - SCHEMAS_HEXBINARY SchemaValType = 43 - SCHEMAS_BASE64BINARY SchemaValType = 44 - SCHEMAS_ANYTYPE SchemaValType = 45 - SCHEMAS_ANYSIMPLETYPE SchemaValType = 46 -) - -type SchemaTypeType c.Int - -const ( - SCHEMA_TYPE_BASIC SchemaTypeType = 1 - SCHEMA_TYPE_ANY SchemaTypeType = 2 - SCHEMA_TYPE_FACET SchemaTypeType = 3 - SCHEMA_TYPE_SIMPLE SchemaTypeType = 4 - SCHEMA_TYPE_COMPLEX SchemaTypeType = 5 - SCHEMA_TYPE_SEQUENCE SchemaTypeType = 6 - SCHEMA_TYPE_CHOICE SchemaTypeType = 7 - SCHEMA_TYPE_ALL SchemaTypeType = 8 - SCHEMA_TYPE_SIMPLE_CONTENT SchemaTypeType = 9 - SCHEMA_TYPE_COMPLEX_CONTENT SchemaTypeType = 10 - SCHEMA_TYPE_UR SchemaTypeType = 11 - SCHEMA_TYPE_RESTRICTION SchemaTypeType = 12 - SCHEMA_TYPE_EXTENSION SchemaTypeType = 13 - SCHEMA_TYPE_ELEMENT SchemaTypeType = 14 - SCHEMA_TYPE_ATTRIBUTE SchemaTypeType = 15 - SCHEMA_TYPE_ATTRIBUTEGROUP SchemaTypeType = 16 - SCHEMA_TYPE_GROUP SchemaTypeType = 17 - SCHEMA_TYPE_NOTATION SchemaTypeType = 18 - SCHEMA_TYPE_LIST SchemaTypeType = 19 - SCHEMA_TYPE_UNION SchemaTypeType = 20 - SCHEMA_TYPE_ANY_ATTRIBUTE SchemaTypeType = 21 - SCHEMA_TYPE_IDC_UNIQUE SchemaTypeType = 22 - SCHEMA_TYPE_IDC_KEY SchemaTypeType = 23 - SCHEMA_TYPE_IDC_KEYREF SchemaTypeType = 24 - SCHEMA_TYPE_PARTICLE SchemaTypeType = 25 - SCHEMA_TYPE_ATTRIBUTE_USE SchemaTypeType = 26 - SCHEMA_FACET_MININCLUSIVE SchemaTypeType = 1000 - SCHEMA_FACET_MINEXCLUSIVE SchemaTypeType = 1001 - SCHEMA_FACET_MAXINCLUSIVE SchemaTypeType = 1002 - SCHEMA_FACET_MAXEXCLUSIVE SchemaTypeType = 1003 - SCHEMA_FACET_TOTALDIGITS SchemaTypeType = 1004 - SCHEMA_FACET_FRACTIONDIGITS SchemaTypeType = 1005 - SCHEMA_FACET_PATTERN SchemaTypeType = 1006 - SCHEMA_FACET_ENUMERATION SchemaTypeType = 1007 - SCHEMA_FACET_WHITESPACE SchemaTypeType = 1008 - SCHEMA_FACET_LENGTH SchemaTypeType = 1009 - SCHEMA_FACET_MAXLENGTH SchemaTypeType = 1010 - SCHEMA_FACET_MINLENGTH SchemaTypeType = 1011 - SCHEMA_EXTRA_QNAMEREF SchemaTypeType = 2000 - SCHEMA_EXTRA_ATTR_USE_PROHIB SchemaTypeType = 2001 -) - -type SchemaContentType c.Int - -const ( - SCHEMA_CONTENT_UNKNOWN SchemaContentType = 0 - SCHEMA_CONTENT_EMPTY SchemaContentType = 1 - SCHEMA_CONTENT_ELEMENTS SchemaContentType = 2 - SCHEMA_CONTENT_MIXED SchemaContentType = 3 - SCHEMA_CONTENT_SIMPLE SchemaContentType = 4 - SCHEMA_CONTENT_MIXED_OR_ELEMENTS SchemaContentType = 5 - SCHEMA_CONTENT_BASIC SchemaContentType = 6 - SCHEMA_CONTENT_ANY SchemaContentType = 7 -) - -type X_xmlSchemaVal struct { - Unused [8]uint8 -} -type SchemaVal X_xmlSchemaVal -type SchemaValPtr *SchemaVal - -type X_xmlSchemaType struct { - Type SchemaTypeType - Next *X_xmlSchemaType - Name *Char - Id *Char - Ref *Char - RefNs *Char - Annot SchemaAnnotPtr - Subtypes SchemaTypePtr - Attributes SchemaAttributePtr - Node NodePtr - MinOccurs c.Int - MaxOccurs c.Int - Flags c.Int - ContentType SchemaContentType - Base *Char - BaseNs *Char - BaseType SchemaTypePtr - Facets SchemaFacetPtr - Redef *X_xmlSchemaType - Recurse c.Int - AttributeUses *SchemaAttributeLinkPtr - AttributeWildcard SchemaWildcardPtr - BuiltInType c.Int - MemberTypes SchemaTypeLinkPtr - FacetSet SchemaFacetLinkPtr - RefPrefix *Char - ContentTypeDef SchemaTypePtr - ContModel RegexpPtr - TargetNamespace *Char - AttrUses c.Pointer -} -type SchemaType X_xmlSchemaType -type SchemaTypePtr *SchemaType - -type X_xmlSchemaFacet struct { - Type SchemaTypeType - Next *X_xmlSchemaFacet - Value *Char - Id *Char - Annot SchemaAnnotPtr - Node NodePtr - Fixed c.Int - Whitespace c.Int - Val SchemaValPtr - Regexp RegexpPtr -} -type SchemaFacet X_xmlSchemaFacet -type SchemaFacetPtr *SchemaFacet - -type X_xmlSchemaAnnot struct { - Next *X_xmlSchemaAnnot - Content NodePtr -} -type SchemaAnnot X_xmlSchemaAnnot -type SchemaAnnotPtr *SchemaAnnot - -type X_xmlSchemaAttribute struct { - Type SchemaTypeType - Next *X_xmlSchemaAttribute - Name *Char - Id *Char - Ref *Char - RefNs *Char - TypeName *Char - TypeNs *Char - Annot SchemaAnnotPtr - Base SchemaTypePtr - Occurs c.Int - DefValue *Char - Subtypes SchemaTypePtr - Node NodePtr - TargetNamespace *Char - Flags c.Int - RefPrefix *Char - DefVal SchemaValPtr - RefDecl SchemaAttributePtr -} -type SchemaAttribute X_xmlSchemaAttribute -type SchemaAttributePtr *SchemaAttribute - -type X_xmlSchemaAttributeLink struct { - Next *X_xmlSchemaAttributeLink - Attr *X_xmlSchemaAttribute -} -type SchemaAttributeLink X_xmlSchemaAttributeLink -type SchemaAttributeLinkPtr *SchemaAttributeLink - -type X_xmlSchemaWildcardNs struct { - Next *X_xmlSchemaWildcardNs - Value *Char -} -type SchemaWildcardNs X_xmlSchemaWildcardNs -type SchemaWildcardNsPtr *SchemaWildcardNs - -type X_xmlSchemaWildcard struct { - Type SchemaTypeType - Id *Char - Annot SchemaAnnotPtr - Node NodePtr - MinOccurs c.Int - MaxOccurs c.Int - ProcessContents c.Int - Any c.Int - NsSet SchemaWildcardNsPtr - NegNsSet SchemaWildcardNsPtr - Flags c.Int -} -type SchemaWildcard X_xmlSchemaWildcard -type SchemaWildcardPtr *SchemaWildcard - -type X_xmlSchemaAttributeGroup struct { - Type SchemaTypeType - Next *X_xmlSchemaAttribute - Name *Char - Id *Char - Ref *Char - RefNs *Char - Annot SchemaAnnotPtr - Attributes SchemaAttributePtr - Node NodePtr - Flags c.Int - AttributeWildcard SchemaWildcardPtr - RefPrefix *Char - RefItem SchemaAttributeGroupPtr - TargetNamespace *Char - AttrUses c.Pointer -} -type SchemaAttributeGroup X_xmlSchemaAttributeGroup -type SchemaAttributeGroupPtr *SchemaAttributeGroup - -type X_xmlSchemaTypeLink struct { - Next *X_xmlSchemaTypeLink - Type SchemaTypePtr -} -type SchemaTypeLink X_xmlSchemaTypeLink -type SchemaTypeLinkPtr *SchemaTypeLink - -type X_xmlSchemaFacetLink struct { - Next *X_xmlSchemaFacetLink - Facet SchemaFacetPtr -} -type SchemaFacetLink X_xmlSchemaFacetLink -type SchemaFacetLinkPtr *SchemaFacetLink - -type X_xmlSchemaElement struct { - Type SchemaTypeType - Next *X_xmlSchemaType - Name *Char - Id *Char - Ref *Char - RefNs *Char - Annot SchemaAnnotPtr - Subtypes SchemaTypePtr - Attributes SchemaAttributePtr - Node NodePtr - MinOccurs c.Int - MaxOccurs c.Int - Flags c.Int - TargetNamespace *Char - NamedType *Char - NamedTypeNs *Char - SubstGroup *Char - SubstGroupNs *Char - Scope *Char - Value *Char - RefDecl *X_xmlSchemaElement - ContModel RegexpPtr - ContentType SchemaContentType - RefPrefix *Char - DefVal SchemaValPtr - Idcs c.Pointer -} -type SchemaElement X_xmlSchemaElement -type SchemaElementPtr *SchemaElement - -type X_xmlSchemaNotation struct { - Type SchemaTypeType - Name *Char - Annot SchemaAnnotPtr - Identifier *Char - TargetNamespace *Char -} -type SchemaNotation X_xmlSchemaNotation -type SchemaNotationPtr *SchemaNotation - -/** - * _xmlSchema: - * - * A Schemas definition - */ - -type X_xmlSchema struct { - Name *Char - TargetNamespace *Char - Version *Char - Id *Char - Doc DocPtr - Annot SchemaAnnotPtr - Flags c.Int - TypeDecl HashTablePtr - AttrDecl HashTablePtr - AttrgrpDecl HashTablePtr - ElemDecl HashTablePtr - NotaDecl HashTablePtr - SchemasImports HashTablePtr - X_private c.Pointer - GroupDecl HashTablePtr - Dict DictPtr - Includes c.Pointer - Preserve c.Int - Counter c.Int - IdcDef HashTablePtr - Volatiles c.Pointer -} - -//go:linkname SchemaFreeType C.xmlSchemaFreeType -func SchemaFreeType(type_ SchemaTypePtr) - -//go:linkname SchemaFreeWildcard C.xmlSchemaFreeWildcard -func SchemaFreeWildcard(wildcard SchemaWildcardPtr) diff --git a/libxml2/schematron.go b/libxml2/schematron.go deleted file mode 100644 index c6ac9cc0..00000000 --- a/libxml2/schematron.go +++ /dev/null @@ -1,112 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type SchematronValidOptions c.Int - -const ( - SCHEMATRON_OUT_QUIET SchematronValidOptions = 1 - SCHEMATRON_OUT_TEXT SchematronValidOptions = 2 - SCHEMATRON_OUT_XML SchematronValidOptions = 4 - SCHEMATRON_OUT_ERROR SchematronValidOptions = 8 - SCHEMATRON_OUT_FILE SchematronValidOptions = 256 - SCHEMATRON_OUT_BUFFER SchematronValidOptions = 512 - SCHEMATRON_OUT_IO SchematronValidOptions = 1024 -) - -type X_xmlSchematron struct { - Unused [8]uint8 -} -type Schematron X_xmlSchematron -type SchematronPtr *Schematron - -// llgo:type C -type SchematronValidityErrorFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type SchematronValidityWarningFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -type X_xmlSchematronParserCtxt struct { - Unused [8]uint8 -} -type SchematronParserCtxt X_xmlSchematronParserCtxt -type SchematronParserCtxtPtr *SchematronParserCtxt - -type X_xmlSchematronValidCtxt struct { - Unused [8]uint8 -} -type SchematronValidCtxt X_xmlSchematronValidCtxt -type SchematronValidCtxtPtr *SchematronValidCtxt - -/* - * Interfaces for parsing. - */ -//go:linkname SchematronNewParserCtxt C.xmlSchematronNewParserCtxt -func SchematronNewParserCtxt(URL *c.Char) SchematronParserCtxtPtr - -//go:linkname SchematronNewMemParserCtxt C.xmlSchematronNewMemParserCtxt -func SchematronNewMemParserCtxt(buffer *c.Char, size c.Int) SchematronParserCtxtPtr - -//go:linkname SchematronNewDocParserCtxt C.xmlSchematronNewDocParserCtxt -func SchematronNewDocParserCtxt(doc DocPtr) SchematronParserCtxtPtr - -//go:linkname SchematronFreeParserCtxt C.xmlSchematronFreeParserCtxt -func SchematronFreeParserCtxt(ctxt SchematronParserCtxtPtr) - -/***** -XMLPUBFUN void - xmlSchematronSetParserErrors(xmlSchematronParserCtxtPtr ctxt, - xmlSchematronValidityErrorFunc err, - xmlSchematronValidityWarningFunc warn, - void *ctx); -XMLPUBFUN int - xmlSchematronGetParserErrors(xmlSchematronParserCtxtPtr ctxt, - xmlSchematronValidityErrorFunc * err, - xmlSchematronValidityWarningFunc * warn, - void **ctx); -XMLPUBFUN int - xmlSchematronIsValid (xmlSchematronValidCtxtPtr ctxt); - *****/ -//go:linkname SchematronParse C.xmlSchematronParse -func SchematronParse(ctxt SchematronParserCtxtPtr) SchematronPtr - -//go:linkname SchematronFree C.xmlSchematronFree -func SchematronFree(schema SchematronPtr) - -/* - * Interfaces for validating - */ -//go:linkname SchematronSetValidStructuredErrors C.xmlSchematronSetValidStructuredErrors -func SchematronSetValidStructuredErrors(ctxt SchematronValidCtxtPtr, serror StructuredErrorFunc, ctx c.Pointer) - -/****** -XMLPUBFUN void - xmlSchematronSetValidErrors (xmlSchematronValidCtxtPtr ctxt, - xmlSchematronValidityErrorFunc err, - xmlSchematronValidityWarningFunc warn, - void *ctx); -XMLPUBFUN int - xmlSchematronGetValidErrors (xmlSchematronValidCtxtPtr ctxt, - xmlSchematronValidityErrorFunc *err, - xmlSchematronValidityWarningFunc *warn, - void **ctx); -XMLPUBFUN int - xmlSchematronSetValidOptions(xmlSchematronValidCtxtPtr ctxt, - int options); -XMLPUBFUN int - xmlSchematronValidCtxtGetOptions(xmlSchematronValidCtxtPtr ctxt); -XMLPUBFUN int - xmlSchematronValidateOneElement (xmlSchematronValidCtxtPtr ctxt, - xmlNodePtr elem); - *******/ -//go:linkname SchematronNewValidCtxt C.xmlSchematronNewValidCtxt -func SchematronNewValidCtxt(schema SchematronPtr, options c.Int) SchematronValidCtxtPtr - -//go:linkname SchematronFreeValidCtxt C.xmlSchematronFreeValidCtxt -func SchematronFreeValidCtxt(ctxt SchematronValidCtxtPtr) - -//go:linkname SchematronValidateDoc C.xmlSchematronValidateDoc -func SchematronValidateDoc(ctxt SchematronValidCtxtPtr, instance DocPtr) c.Int diff --git a/libxml2/threads.go b/libxml2/threads.go deleted file mode 100644 index 9592dc5f..00000000 --- a/libxml2/threads.go +++ /dev/null @@ -1,66 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlMutex struct { - Unused [8]uint8 -} -type Mutex X_xmlMutex -type MutexPtr *Mutex - -type X_xmlRMutex struct { - Unused [8]uint8 -} -type RMutex X_xmlRMutex -type RMutexPtr *RMutex - -//go:linkname CheckThreadLocalStorage C.xmlCheckThreadLocalStorage -func CheckThreadLocalStorage() c.Int - -//go:linkname NewMutex C.xmlNewMutex -func NewMutex() MutexPtr - -//go:linkname MutexLock C.xmlMutexLock -func MutexLock(tok MutexPtr) - -//go:linkname MutexUnlock C.xmlMutexUnlock -func MutexUnlock(tok MutexPtr) - -//go:linkname FreeMutex C.xmlFreeMutex -func FreeMutex(tok MutexPtr) - -//go:linkname NewRMutex C.xmlNewRMutex -func NewRMutex() RMutexPtr - -//go:linkname RMutexLock C.xmlRMutexLock -func RMutexLock(tok RMutexPtr) - -//go:linkname RMutexUnlock C.xmlRMutexUnlock -func RMutexUnlock(tok RMutexPtr) - -//go:linkname FreeRMutex C.xmlFreeRMutex -func FreeRMutex(tok RMutexPtr) - -/* - * Library wide APIs. - */ -//go:linkname InitThreads C.xmlInitThreads -func InitThreads() - -//go:linkname LockLibrary C.xmlLockLibrary -func LockLibrary() - -//go:linkname UnlockLibrary C.xmlUnlockLibrary -func UnlockLibrary() - -//go:linkname GetThreadId C.xmlGetThreadId -func GetThreadId() c.Int - -//go:linkname IsMainThread C.xmlIsMainThread -func IsMainThread() c.Int - -//go:linkname CleanupThreads C.xmlCleanupThreads -func CleanupThreads() diff --git a/libxml2/tree.go b/libxml2/tree.go deleted file mode 100644 index 70aca250..00000000 --- a/libxml2/tree.go +++ /dev/null @@ -1,1227 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const BASE_BUFFER_SIZE = 4096 -const DOCB_DOCUMENT_NODE = 21 - -type X_xmlParserInputBuffer struct { - Context c.Pointer - Readcallback InputReadCallback - Closecallback InputCloseCallback - Encoder CharEncodingHandlerPtr - Buffer BufPtr - Raw BufPtr - Compressed c.Int - Error c.Int - Rawconsumed c.Ulong -} -type ParserInputBuffer X_xmlParserInputBuffer -type ParserInputBufferPtr *ParserInputBuffer - -type X_xmlOutputBuffer struct { - Context c.Pointer - Writecallback OutputWriteCallback - Closecallback OutputCloseCallback - Encoder CharEncodingHandlerPtr - Buffer BufPtr - Conv BufPtr - Written c.Int - Error c.Int -} -type OutputBuffer X_xmlOutputBuffer -type OutputBufferPtr *OutputBuffer - -type X_xmlParserInput struct { - Buf ParserInputBufferPtr - Filename *c.Char - Directory *c.Char - Base *Char - Cur *Char - End *Char - Length c.Int - Line c.Int - Col c.Int - Consumed c.Ulong - Free ParserInputDeallocate - Encoding *Char - Version *Char - Flags c.Int - Id c.Int - ParentConsumed c.Ulong - Entity EntityPtr -} -type ParserInput X_xmlParserInput -type ParserInputPtr *ParserInput - -type X_xmlParserCtxt struct { - Sax *X_xmlSAXHandler - UserData c.Pointer - MyDoc DocPtr - WellFormed c.Int - ReplaceEntities c.Int - Version *Char - Encoding *Char - Standalone c.Int - Html c.Int - Input ParserInputPtr - InputNr c.Int - InputMax c.Int - InputTab *ParserInputPtr - Node NodePtr - NodeNr c.Int - NodeMax c.Int - NodeTab *NodePtr - RecordInfo c.Int - NodeSeq ParserNodeInfoSeq - ErrNo c.Int - HasExternalSubset c.Int - HasPErefs c.Int - External c.Int - Valid c.Int - Validate c.Int - Vctxt ValidCtxt - Instate ParserInputState - Token c.Int - Directory *c.Char - Name *Char - NameNr c.Int - NameMax c.Int - NameTab **Char - NbChars c.Long - CheckIndex c.Long - KeepBlanks c.Int - DisableSAX c.Int - InSubset c.Int - IntSubName *Char - ExtSubURI *Char - ExtSubSystem *Char - Space *c.Int - SpaceNr c.Int - SpaceMax c.Int - SpaceTab *c.Int - Depth c.Int - Entity ParserInputPtr - Charset c.Int - Nodelen c.Int - Nodemem c.Int - Pedantic c.Int - X_private c.Pointer - Loadsubset c.Int - Linenumbers c.Int - Catalogs c.Pointer - Recovery c.Int - Progressive c.Int - Dict DictPtr - Atts **Char - Maxatts c.Int - Docdict c.Int - StrXml *Char - StrXmlns *Char - StrXmlNs *Char - Sax2 c.Int - NsNr c.Int - NsMax c.Int - NsTab **Char - Attallocs *c.Uint - PushTab *StartTag - AttsDefault HashTablePtr - AttsSpecial HashTablePtr - NsWellFormed c.Int - Options c.Int - DictNames c.Int - FreeElemsNr c.Int - FreeElems NodePtr - FreeAttrsNr c.Int - FreeAttrs AttrPtr - LastError Error - ParseMode ParserMode - Nbentities c.Ulong - Sizeentities c.Ulong - NodeInfo *ParserNodeInfo - NodeInfoNr c.Int - NodeInfoMax c.Int - NodeInfoTab *ParserNodeInfo - InputId c.Int - Sizeentcopy c.Ulong - EndCheckState c.Int - NbErrors uint16 - NbWarnings uint16 - MaxAmpl c.Uint - Nsdb *ParserNsData - AttrHashMax c.Uint - AttrHash *AttrHashBucket - ErrorHandler StructuredErrorFunc - ErrorCtxt c.Pointer -} -type ParserCtxt X_xmlParserCtxt -type ParserCtxtPtr *ParserCtxt - -type X_xmlSAXLocator struct { - GetPublicId c.Pointer - GetSystemId c.Pointer - GetLineNumber c.Pointer - GetColumnNumber c.Pointer -} -type SAXLocator X_xmlSAXLocator -type SAXLocatorPtr *SAXLocator - -type X_xmlSAXHandler struct { - InternalSubset InternalSubsetSAXFunc - IsStandalone IsStandaloneSAXFunc - HasInternalSubset HasInternalSubsetSAXFunc - HasExternalSubset HasExternalSubsetSAXFunc - ResolveEntity ResolveEntitySAXFunc - GetEntity GetEntitySAXFunc - EntityDecl EntityDeclSAXFunc - NotationDecl NotationDeclSAXFunc - AttributeDecl AttributeDeclSAXFunc - ElementDecl ElementDeclSAXFunc - UnparsedEntityDecl UnparsedEntityDeclSAXFunc - SetDocumentLocator SetDocumentLocatorSAXFunc - StartDocument StartDocumentSAXFunc - EndDocument EndDocumentSAXFunc - StartElement StartElementSAXFunc - EndElement EndElementSAXFunc - Reference ReferenceSAXFunc - Characters CharactersSAXFunc - IgnorableWhitespace IgnorableWhitespaceSAXFunc - ProcessingInstruction ProcessingInstructionSAXFunc - Comment CommentSAXFunc - Warning WarningSAXFunc - Error ErrorSAXFunc - FatalError FatalErrorSAXFunc - GetParameterEntity GetParameterEntitySAXFunc - CdataBlock CdataBlockSAXFunc - ExternalSubset ExternalSubsetSAXFunc - Initialized c.Uint - X_private c.Pointer - StartElementNs StartElementNsSAX2Func - EndElementNs EndElementNsSAX2Func - Serror StructuredErrorFunc -} -type SAXHandler X_xmlSAXHandler -type SAXHandlerPtr *SAXHandler - -type X_xmlEntity struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlDtd - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Orig *Char - Content *Char - Length c.Int - Etype EntityType - ExternalID *Char - SystemID *Char - Nexte *X_xmlEntity - URI *Char - Owner c.Int - Flags c.Int - ExpandedSize c.Ulong -} -type Entity X_xmlEntity -type EntityPtr *Entity -type BufferAllocationScheme c.Int - -const ( - BUFFER_ALLOC_DOUBLEIT BufferAllocationScheme = 0 - BUFFER_ALLOC_EXACT BufferAllocationScheme = 1 - BUFFER_ALLOC_IMMUTABLE BufferAllocationScheme = 2 - BUFFER_ALLOC_IO BufferAllocationScheme = 3 - BUFFER_ALLOC_HYBRID BufferAllocationScheme = 4 - BUFFER_ALLOC_BOUNDED BufferAllocationScheme = 5 -) - -type X_xmlBuffer struct { - Content *Char - Use c.Uint - Size c.Uint - Alloc BufferAllocationScheme - ContentIO *Char -} -type Buffer X_xmlBuffer -type BufferPtr *Buffer - -type X_xmlBuf struct { - Unused [8]uint8 -} -type Buf X_xmlBuf -type BufPtr *Buf - -/* - * A few public routines for xmlBuf. As those are expected to be used - * mostly internally the bulk of the routines are internal in buf.h - */ -// llgo:link (*Buf).BufContent C.xmlBufContent -func (recv_ *Buf) BufContent() *Char { - return nil -} - -//go:linkname BufEnd C.xmlBufEnd -func BufEnd(buf BufPtr) *Char - -//go:linkname BufUse C.xmlBufUse -func BufUse(buf BufPtr) c.SizeT - -//go:linkname BufShrink C.xmlBufShrink -func BufShrink(buf BufPtr, len c.SizeT) c.SizeT - -type ElementType c.Int - -const ( - ELEMENT_NODE ElementType = 1 - ATTRIBUTE_NODE ElementType = 2 - TEXT_NODE ElementType = 3 - CDATA_SECTION_NODE ElementType = 4 - ENTITY_REF_NODE ElementType = 5 - ENTITY_NODE ElementType = 6 - PI_NODE ElementType = 7 - COMMENT_NODE ElementType = 8 - DOCUMENT_NODE ElementType = 9 - DOCUMENT_TYPE_NODE ElementType = 10 - DOCUMENT_FRAG_NODE ElementType = 11 - NOTATION_NODE ElementType = 12 - HTML_DOCUMENT_NODE ElementType = 13 - DTD_NODE ElementType = 14 - ELEMENT_DECL ElementType = 15 - ATTRIBUTE_DECL ElementType = 16 - ENTITY_DECL ElementType = 17 - NAMESPACE_DECL ElementType = 18 - XINCLUDE_START ElementType = 19 - XINCLUDE_END ElementType = 20 -) - -type X_xmlNotation struct { - Name *Char - PublicID *Char - SystemID *Char -} -type Notation X_xmlNotation -type NotationPtr *Notation -type AttributeType c.Int - -const ( - ATTRIBUTE_CDATA AttributeType = 1 - ATTRIBUTE_ID AttributeType = 2 - ATTRIBUTE_IDREF AttributeType = 3 - ATTRIBUTE_IDREFS AttributeType = 4 - ATTRIBUTE_ENTITY AttributeType = 5 - ATTRIBUTE_ENTITIES AttributeType = 6 - ATTRIBUTE_NMTOKEN AttributeType = 7 - ATTRIBUTE_NMTOKENS AttributeType = 8 - ATTRIBUTE_ENUMERATION AttributeType = 9 - ATTRIBUTE_NOTATION AttributeType = 10 -) - -type AttributeDefault c.Int - -const ( - ATTRIBUTE_NONE AttributeDefault = 1 - ATTRIBUTE_REQUIRED AttributeDefault = 2 - ATTRIBUTE_IMPLIED AttributeDefault = 3 - ATTRIBUTE_FIXED AttributeDefault = 4 -) - -type X_xmlEnumeration struct { - Next *X_xmlEnumeration - Name *Char -} -type Enumeration X_xmlEnumeration -type EnumerationPtr *Enumeration - -type X_xmlAttribute struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlDtd - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Nexth *X_xmlAttribute - Atype AttributeType - Def AttributeDefault - DefaultValue *Char - Tree EnumerationPtr - Prefix *Char - Elem *Char -} -type Attribute X_xmlAttribute -type AttributePtr *Attribute - -type X_xmlNode struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlNode - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Ns *Ns - Content *Char - Properties *X_xmlAttr - NsDef *Ns - Psvi c.Pointer - Line uint16 - Extra uint16 -} - -type X_xmlDtd struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlDoc - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Notations c.Pointer - Elements c.Pointer - Attributes c.Pointer - Entities c.Pointer - ExternalID *Char - SystemID *Char - Pentities c.Pointer -} - -type X_xmlDoc struct { - X_private c.Pointer - Type ElementType - Name *c.Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlNode - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Compression c.Int - Standalone c.Int - IntSubset *X_xmlDtd - ExtSubset *X_xmlDtd - OldNs *X_xmlNs - Version *Char - Encoding *Char - Ids c.Pointer - Refs c.Pointer - URL *Char - Charset c.Int - Dict *X_xmlDict - Psvi c.Pointer - ParseFlags c.Int - Properties c.Int -} -type ElementContentType c.Int - -const ( - ELEMENT_CONTENT_PCDATA ElementContentType = 1 - ELEMENT_CONTENT_ELEMENT ElementContentType = 2 - ELEMENT_CONTENT_SEQ ElementContentType = 3 - ELEMENT_CONTENT_OR ElementContentType = 4 -) - -type ElementContentOccur c.Int - -const ( - ELEMENT_CONTENT_ONCE ElementContentOccur = 1 - ELEMENT_CONTENT_OPT ElementContentOccur = 2 - ELEMENT_CONTENT_MULT ElementContentOccur = 3 - ELEMENT_CONTENT_PLUS ElementContentOccur = 4 -) - -type X_xmlElementContent struct { - Type ElementContentType - Ocur ElementContentOccur - Name *Char - C1 *X_xmlElementContent - C2 *X_xmlElementContent - Parent *X_xmlElementContent - Prefix *Char -} -type ElementContent X_xmlElementContent -type ElementContentPtr *ElementContent -type ElementTypeVal c.Int - -const ( - ELEMENT_TYPE_UNDEFINED ElementTypeVal = 0 - ELEMENT_TYPE_EMPTY ElementTypeVal = 1 - ELEMENT_TYPE_ANY ElementTypeVal = 2 - ELEMENT_TYPE_MIXED ElementTypeVal = 3 - ELEMENT_TYPE_ELEMENT ElementTypeVal = 4 -) - -type X_xmlElement struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlDtd - Next *X_xmlNode - Prev *X_xmlNode - Doc *X_xmlDoc - Etype ElementTypeVal - Content ElementContentPtr - Attributes AttributePtr - Prefix *Char - ContModel RegexpPtr -} -type Element X_xmlElement -type ElementPtr *Element -type NsType ElementType - -type X_xmlNs struct { - Next *X_xmlNs - Type NsType - Href *Char - Prefix *Char - X_private c.Pointer - Context *X_xmlDoc -} -type Ns X_xmlNs -type NsPtr *Ns -type Dtd X_xmlDtd -type DtdPtr *Dtd - -type X_xmlAttr struct { - X_private c.Pointer - Type ElementType - Name *Char - Children *X_xmlNode - Last *X_xmlNode - Parent *X_xmlNode - Next *X_xmlAttr - Prev *X_xmlAttr - Doc *X_xmlDoc - Ns *Ns - Atype AttributeType - Psvi c.Pointer - Id *X_xmlID -} -type Attr X_xmlAttr -type AttrPtr *Attr - -type X_xmlID struct { - Next *X_xmlID - Value *Char - Attr AttrPtr - Name *Char - Lineno c.Int - Doc *X_xmlDoc -} -type ID X_xmlID -type IDPtr *ID - -type X_xmlRef struct { - Next *X_xmlRef - Value *Char - Attr AttrPtr - Name *Char - Lineno c.Int -} -type Ref X_xmlRef -type RefPtr *Ref -type Node X_xmlNode -type NodePtr *Node -type DocProperties c.Int - -const ( - DOC_WELLFORMED DocProperties = 1 - DOC_NSVALID DocProperties = 2 - DOC_OLD10 DocProperties = 4 - DOC_DTDVALID DocProperties = 8 - DOC_XINCLUDE DocProperties = 16 - DOC_USERBUILT DocProperties = 32 - DOC_INTERNAL DocProperties = 64 - DOC_HTML DocProperties = 128 -) - -type Doc X_xmlDoc -type DocPtr *Doc - -type X_xmlDict struct { - Unused [8]uint8 -} - -type X_xmlDOMWrapCtxt struct { - X_private c.Pointer - Type c.Int - NamespaceMap c.Pointer - GetNsForNodeFunc DOMWrapAcquireNsFunction -} -type DOMWrapCtxt X_xmlDOMWrapCtxt -type DOMWrapCtxtPtr *DOMWrapCtxt - -// llgo:type C -type DOMWrapAcquireNsFunction func(DOMWrapCtxtPtr, NodePtr, *Char, *Char) NsPtr - -// llgo:type C -type RegisterNodeFunc func(NodePtr) - -// llgo:type C -type DeregisterNodeFunc func(NodePtr) - -//go:linkname X__xmlBufferAllocScheme C.__xmlBufferAllocScheme -func X__xmlBufferAllocScheme() *BufferAllocationScheme - -//go:linkname X__xmlDefaultBufferSize C.__xmlDefaultBufferSize -func X__xmlDefaultBufferSize() *c.Int - -//go:linkname X__xmlRegisterNodeDefaultValue C.__xmlRegisterNodeDefaultValue -func X__xmlRegisterNodeDefaultValue() RegisterNodeFunc - -//go:linkname X__xmlDeregisterNodeDefaultValue C.__xmlDeregisterNodeDefaultValue -func X__xmlDeregisterNodeDefaultValue() DeregisterNodeFunc - -/* - * Some helper functions - */ -// llgo:link (*Char).ValidateNCName C.xmlValidateNCName -func (recv_ *Char) ValidateNCName(space c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).ValidateQName C.xmlValidateQName -func (recv_ *Char) ValidateQName(space c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).ValidateName C.xmlValidateName -func (recv_ *Char) ValidateName(space c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).ValidateNMToken C.xmlValidateNMToken -func (recv_ *Char) ValidateNMToken(space c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).BuildQName C.xmlBuildQName -func (recv_ *Char) BuildQName(prefix *Char, memory *Char, len c.Int) *Char { - return nil -} - -// llgo:link (*Char).SplitQName2 C.xmlSplitQName2 -func (recv_ *Char) SplitQName2(prefix **Char) *Char { - return nil -} - -// llgo:link (*Char).SplitQName3 C.xmlSplitQName3 -func (recv_ *Char) SplitQName3(len *c.Int) *Char { - return nil -} - -/* - * Handling Buffers, the old ones see @xmlBuf for the new ones. - */ -// llgo:link BufferAllocationScheme.SetBufferAllocationScheme C.xmlSetBufferAllocationScheme -func (recv_ BufferAllocationScheme) SetBufferAllocationScheme() { -} - -//go:linkname GetBufferAllocationScheme C.xmlGetBufferAllocationScheme -func GetBufferAllocationScheme() BufferAllocationScheme - -//go:linkname BufferCreate C.xmlBufferCreate -func BufferCreate() BufferPtr - -//go:linkname BufferCreateSize C.xmlBufferCreateSize -func BufferCreateSize(size c.SizeT) BufferPtr - -//go:linkname BufferCreateStatic C.xmlBufferCreateStatic -func BufferCreateStatic(mem c.Pointer, size c.SizeT) BufferPtr - -//go:linkname BufferResize C.xmlBufferResize -func BufferResize(buf BufferPtr, size c.Uint) c.Int - -//go:linkname BufferFree C.xmlBufferFree -func BufferFree(buf BufferPtr) - -//go:linkname BufferDump C.xmlBufferDump -func BufferDump(file *c.FILE, buf BufferPtr) c.Int - -//go:linkname BufferAdd C.xmlBufferAdd -func BufferAdd(buf BufferPtr, str *Char, len c.Int) c.Int - -//go:linkname BufferAddHead C.xmlBufferAddHead -func BufferAddHead(buf BufferPtr, str *Char, len c.Int) c.Int - -//go:linkname BufferCat C.xmlBufferCat -func BufferCat(buf BufferPtr, str *Char) c.Int - -//go:linkname BufferCCat C.xmlBufferCCat -func BufferCCat(buf BufferPtr, str *c.Char) c.Int - -//go:linkname BufferShrink C.xmlBufferShrink -func BufferShrink(buf BufferPtr, len c.Uint) c.Int - -//go:linkname BufferGrow C.xmlBufferGrow -func BufferGrow(buf BufferPtr, len c.Uint) c.Int - -//go:linkname BufferEmpty C.xmlBufferEmpty -func BufferEmpty(buf BufferPtr) - -// llgo:link (*Buffer).BufferContent C.xmlBufferContent -func (recv_ *Buffer) BufferContent() *Char { - return nil -} - -//go:linkname BufferDetach C.xmlBufferDetach -func BufferDetach(buf BufferPtr) *Char - -//go:linkname BufferSetAllocationScheme C.xmlBufferSetAllocationScheme -func BufferSetAllocationScheme(buf BufferPtr, scheme BufferAllocationScheme) - -// llgo:link (*Buffer).BufferLength C.xmlBufferLength -func (recv_ *Buffer) BufferLength() c.Int { - return 0 -} - -/* - * Creating/freeing new structures. - */ -//go:linkname CreateIntSubset C.xmlCreateIntSubset -func CreateIntSubset(doc DocPtr, name *Char, ExternalID *Char, SystemID *Char) DtdPtr - -//go:linkname NewDtd C.xmlNewDtd -func NewDtd(doc DocPtr, name *Char, ExternalID *Char, SystemID *Char) DtdPtr - -// llgo:link (*Doc).GetIntSubset C.xmlGetIntSubset -func (recv_ *Doc) GetIntSubset() DtdPtr { - return nil -} - -//go:linkname FreeDtd C.xmlFreeDtd -func FreeDtd(cur DtdPtr) - -//go:linkname NewGlobalNs C.xmlNewGlobalNs -func NewGlobalNs(doc DocPtr, href *Char, prefix *Char) NsPtr - -//go:linkname NewNs C.xmlNewNs -func NewNs(node NodePtr, href *Char, prefix *Char) NsPtr - -//go:linkname FreeNs C.xmlFreeNs -func FreeNs(cur NsPtr) - -//go:linkname FreeNsList C.xmlFreeNsList -func FreeNsList(cur NsPtr) - -// llgo:link (*Char).NewDoc C.xmlNewDoc -func (recv_ *Char) NewDoc() DocPtr { - return nil -} - -//go:linkname FreeDoc C.xmlFreeDoc -func FreeDoc(cur DocPtr) - -//go:linkname NewDocProp C.xmlNewDocProp -func NewDocProp(doc DocPtr, name *Char, value *Char) AttrPtr - -//go:linkname NewProp C.xmlNewProp -func NewProp(node NodePtr, name *Char, value *Char) AttrPtr - -//go:linkname NewNsProp C.xmlNewNsProp -func NewNsProp(node NodePtr, ns NsPtr, name *Char, value *Char) AttrPtr - -//go:linkname NewNsPropEatName C.xmlNewNsPropEatName -func NewNsPropEatName(node NodePtr, ns NsPtr, name *Char, value *Char) AttrPtr - -//go:linkname FreePropList C.xmlFreePropList -func FreePropList(cur AttrPtr) - -//go:linkname FreeProp C.xmlFreeProp -func FreeProp(cur AttrPtr) - -//go:linkname CopyProp C.xmlCopyProp -func CopyProp(target NodePtr, cur AttrPtr) AttrPtr - -//go:linkname CopyPropList C.xmlCopyPropList -func CopyPropList(target NodePtr, cur AttrPtr) AttrPtr - -//go:linkname CopyDtd C.xmlCopyDtd -func CopyDtd(dtd DtdPtr) DtdPtr - -//go:linkname CopyDoc C.xmlCopyDoc -func CopyDoc(doc DocPtr, recursive c.Int) DocPtr - -/* - * Creating new nodes. - */ -//go:linkname NewDocNode C.xmlNewDocNode -func NewDocNode(doc DocPtr, ns NsPtr, name *Char, content *Char) NodePtr - -//go:linkname NewDocNodeEatName C.xmlNewDocNodeEatName -func NewDocNodeEatName(doc DocPtr, ns NsPtr, name *Char, content *Char) NodePtr - -//go:linkname NewNode C.xmlNewNode -func NewNode(ns NsPtr, name *Char) NodePtr - -//go:linkname NewNodeEatName C.xmlNewNodeEatName -func NewNodeEatName(ns NsPtr, name *Char) NodePtr - -//go:linkname NewChild C.xmlNewChild -func NewChild(parent NodePtr, ns NsPtr, name *Char, content *Char) NodePtr - -// llgo:link (*Doc).NewDocText C.xmlNewDocText -func (recv_ *Doc) NewDocText(content *Char) NodePtr { - return nil -} - -// llgo:link (*Char).NewText C.xmlNewText -func (recv_ *Char) NewText() NodePtr { - return nil -} - -//go:linkname NewDocPI C.xmlNewDocPI -func NewDocPI(doc DocPtr, name *Char, content *Char) NodePtr - -// llgo:link (*Char).NewPI C.xmlNewPI -func (recv_ *Char) NewPI(content *Char) NodePtr { - return nil -} - -//go:linkname NewDocTextLen C.xmlNewDocTextLen -func NewDocTextLen(doc DocPtr, content *Char, len c.Int) NodePtr - -// llgo:link (*Char).NewTextLen C.xmlNewTextLen -func (recv_ *Char) NewTextLen(len c.Int) NodePtr { - return nil -} - -//go:linkname NewDocComment C.xmlNewDocComment -func NewDocComment(doc DocPtr, content *Char) NodePtr - -// llgo:link (*Char).NewComment C.xmlNewComment -func (recv_ *Char) NewComment() NodePtr { - return nil -} - -//go:linkname NewCDataBlock C.xmlNewCDataBlock -func NewCDataBlock(doc DocPtr, content *Char, len c.Int) NodePtr - -//go:linkname NewCharRef C.xmlNewCharRef -func NewCharRef(doc DocPtr, name *Char) NodePtr - -// llgo:link (*Doc).NewReference C.xmlNewReference -func (recv_ *Doc) NewReference(name *Char) NodePtr { - return nil -} - -//go:linkname CopyNode C.xmlCopyNode -func CopyNode(node NodePtr, recursive c.Int) NodePtr - -//go:linkname DocCopyNode C.xmlDocCopyNode -func DocCopyNode(node NodePtr, doc DocPtr, recursive c.Int) NodePtr - -//go:linkname DocCopyNodeList C.xmlDocCopyNodeList -func DocCopyNodeList(doc DocPtr, node NodePtr) NodePtr - -//go:linkname CopyNodeList C.xmlCopyNodeList -func CopyNodeList(node NodePtr) NodePtr - -//go:linkname NewTextChild C.xmlNewTextChild -func NewTextChild(parent NodePtr, ns NsPtr, name *Char, content *Char) NodePtr - -//go:linkname NewDocRawNode C.xmlNewDocRawNode -func NewDocRawNode(doc DocPtr, ns NsPtr, name *Char, content *Char) NodePtr - -//go:linkname NewDocFragment C.xmlNewDocFragment -func NewDocFragment(doc DocPtr) NodePtr - -/* - * Navigating. - */ -// llgo:link (*Node).GetLineNo C.xmlGetLineNo -func (recv_ *Node) GetLineNo() c.Long { - return 0 -} - -// llgo:link (*Node).GetNodePath C.xmlGetNodePath -func (recv_ *Node) GetNodePath() *Char { - return nil -} - -// llgo:link (*Doc).DocGetRootElement C.xmlDocGetRootElement -func (recv_ *Doc) DocGetRootElement() NodePtr { - return nil -} - -// llgo:link (*Node).GetLastChild C.xmlGetLastChild -func (recv_ *Node) GetLastChild() NodePtr { - return nil -} - -// llgo:link (*Node).NodeIsText C.xmlNodeIsText -func (recv_ *Node) NodeIsText() c.Int { - return 0 -} - -// llgo:link (*Node).IsBlankNode C.xmlIsBlankNode -func (recv_ *Node) IsBlankNode() c.Int { - return 0 -} - -/* - * Changing the structure. - */ -//go:linkname DocSetRootElement C.xmlDocSetRootElement -func DocSetRootElement(doc DocPtr, root NodePtr) NodePtr - -//go:linkname NodeSetName C.xmlNodeSetName -func NodeSetName(cur NodePtr, name *Char) - -//go:linkname AddChild C.xmlAddChild -func AddChild(parent NodePtr, cur NodePtr) NodePtr - -//go:linkname AddChildList C.xmlAddChildList -func AddChildList(parent NodePtr, cur NodePtr) NodePtr - -//go:linkname ReplaceNode C.xmlReplaceNode -func ReplaceNode(old NodePtr, cur NodePtr) NodePtr - -//go:linkname AddPrevSibling C.xmlAddPrevSibling -func AddPrevSibling(cur NodePtr, elem NodePtr) NodePtr - -//go:linkname AddSibling C.xmlAddSibling -func AddSibling(cur NodePtr, elem NodePtr) NodePtr - -//go:linkname AddNextSibling C.xmlAddNextSibling -func AddNextSibling(cur NodePtr, elem NodePtr) NodePtr - -//go:linkname UnlinkNode C.xmlUnlinkNode -func UnlinkNode(cur NodePtr) - -//go:linkname TextMerge C.xmlTextMerge -func TextMerge(first NodePtr, second NodePtr) NodePtr - -//go:linkname TextConcat C.xmlTextConcat -func TextConcat(node NodePtr, content *Char, len c.Int) c.Int - -//go:linkname FreeNodeList C.xmlFreeNodeList -func FreeNodeList(cur NodePtr) - -//go:linkname FreeNode C.xmlFreeNode -func FreeNode(cur NodePtr) - -//go:linkname SetTreeDoc C.xmlSetTreeDoc -func SetTreeDoc(tree NodePtr, doc DocPtr) c.Int - -//go:linkname SetListDoc C.xmlSetListDoc -func SetListDoc(list NodePtr, doc DocPtr) c.Int - -/* - * Namespaces. - */ -//go:linkname SearchNs C.xmlSearchNs -func SearchNs(doc DocPtr, node NodePtr, nameSpace *Char) NsPtr - -//go:linkname SearchNsByHref C.xmlSearchNsByHref -func SearchNsByHref(doc DocPtr, node NodePtr, href *Char) NsPtr - -// llgo:link (*Doc).GetNsListSafe C.xmlGetNsListSafe -func (recv_ *Doc) GetNsListSafe(node *Node, out **NsPtr) c.Int { - return 0 -} - -// llgo:link (*Doc).GetNsList C.xmlGetNsList -func (recv_ *Doc) GetNsList(node *Node) *NsPtr { - return nil -} - -//go:linkname SetNs C.xmlSetNs -func SetNs(node NodePtr, ns NsPtr) - -//go:linkname CopyNamespace C.xmlCopyNamespace -func CopyNamespace(cur NsPtr) NsPtr - -//go:linkname CopyNamespaceList C.xmlCopyNamespaceList -func CopyNamespaceList(cur NsPtr) NsPtr - -/* - * Changing the content. - */ -//go:linkname SetProp C.xmlSetProp -func SetProp(node NodePtr, name *Char, value *Char) AttrPtr - -//go:linkname SetNsProp C.xmlSetNsProp -func SetNsProp(node NodePtr, ns NsPtr, name *Char, value *Char) AttrPtr - -// llgo:link (*Node).NodeGetAttrValue C.xmlNodeGetAttrValue -func (recv_ *Node) NodeGetAttrValue(name *Char, nsUri *Char, out **Char) c.Int { - return 0 -} - -// llgo:link (*Node).GetNoNsProp C.xmlGetNoNsProp -func (recv_ *Node) GetNoNsProp(name *Char) *Char { - return nil -} - -// llgo:link (*Node).GetProp C.xmlGetProp -func (recv_ *Node) GetProp(name *Char) *Char { - return nil -} - -// llgo:link (*Node).HasProp C.xmlHasProp -func (recv_ *Node) HasProp(name *Char) AttrPtr { - return nil -} - -// llgo:link (*Node).HasNsProp C.xmlHasNsProp -func (recv_ *Node) HasNsProp(name *Char, nameSpace *Char) AttrPtr { - return nil -} - -// llgo:link (*Node).GetNsProp C.xmlGetNsProp -func (recv_ *Node) GetNsProp(name *Char, nameSpace *Char) *Char { - return nil -} - -// llgo:link (*Doc).StringGetNodeList C.xmlStringGetNodeList -func (recv_ *Doc) StringGetNodeList(value *Char) NodePtr { - return nil -} - -// llgo:link (*Doc).StringLenGetNodeList C.xmlStringLenGetNodeList -func (recv_ *Doc) StringLenGetNodeList(value *Char, len c.Int) NodePtr { - return nil -} - -//go:linkname NodeListGetString C.xmlNodeListGetString -func NodeListGetString(doc DocPtr, list *Node, inLine c.Int) *Char - -// llgo:link (*Doc).NodeListGetRawString C.xmlNodeListGetRawString -func (recv_ *Doc) NodeListGetRawString(list *Node, inLine c.Int) *Char { - return nil -} - -//go:linkname NodeSetContent C.xmlNodeSetContent -func NodeSetContent(cur NodePtr, content *Char) c.Int - -//go:linkname NodeSetContentLen C.xmlNodeSetContentLen -func NodeSetContentLen(cur NodePtr, content *Char, len c.Int) c.Int - -//go:linkname NodeAddContent C.xmlNodeAddContent -func NodeAddContent(cur NodePtr, content *Char) c.Int - -//go:linkname NodeAddContentLen C.xmlNodeAddContentLen -func NodeAddContentLen(cur NodePtr, content *Char, len c.Int) c.Int - -// llgo:link (*Node).NodeGetContent C.xmlNodeGetContent -func (recv_ *Node) NodeGetContent() *Char { - return nil -} - -//go:linkname NodeBufGetContent C.xmlNodeBufGetContent -func NodeBufGetContent(buffer BufferPtr, cur *Node) c.Int - -//go:linkname BufGetNodeContent C.xmlBufGetNodeContent -func BufGetNodeContent(buf BufPtr, cur *Node) c.Int - -// llgo:link (*Node).NodeGetLang C.xmlNodeGetLang -func (recv_ *Node) NodeGetLang() *Char { - return nil -} - -// llgo:link (*Node).NodeGetSpacePreserve C.xmlNodeGetSpacePreserve -func (recv_ *Node) NodeGetSpacePreserve() c.Int { - return 0 -} - -//go:linkname NodeSetLang C.xmlNodeSetLang -func NodeSetLang(cur NodePtr, lang *Char) c.Int - -//go:linkname NodeSetSpacePreserve C.xmlNodeSetSpacePreserve -func NodeSetSpacePreserve(cur NodePtr, val c.Int) c.Int - -// llgo:link (*Doc).NodeGetBaseSafe C.xmlNodeGetBaseSafe -func (recv_ *Doc) NodeGetBaseSafe(cur *Node, baseOut **Char) c.Int { - return 0 -} - -// llgo:link (*Doc).NodeGetBase C.xmlNodeGetBase -func (recv_ *Doc) NodeGetBase(cur *Node) *Char { - return nil -} - -//go:linkname NodeSetBase C.xmlNodeSetBase -func NodeSetBase(cur NodePtr, uri *Char) c.Int - -/* - * Removing content. - */ -//go:linkname RemoveProp C.xmlRemoveProp -func RemoveProp(cur AttrPtr) c.Int - -//go:linkname UnsetNsProp C.xmlUnsetNsProp -func UnsetNsProp(node NodePtr, ns NsPtr, name *Char) c.Int - -//go:linkname UnsetProp C.xmlUnsetProp -func UnsetProp(node NodePtr, name *Char) c.Int - -/* - * Internal, don't use. - */ -//go:linkname BufferWriteCHAR C.xmlBufferWriteCHAR -func BufferWriteCHAR(buf BufferPtr, string *Char) - -//go:linkname BufferWriteChar C.xmlBufferWriteChar -func BufferWriteChar(buf BufferPtr, string *c.Char) - -//go:linkname BufferWriteQuotedString C.xmlBufferWriteQuotedString -func BufferWriteQuotedString(buf BufferPtr, string *Char) - -//go:linkname AttrSerializeTxtContent C.xmlAttrSerializeTxtContent -func AttrSerializeTxtContent(buf BufferPtr, doc DocPtr, attr AttrPtr, string *Char) - -/* - * Namespace handling. - */ -//go:linkname ReconciliateNs C.xmlReconciliateNs -func ReconciliateNs(doc DocPtr, tree NodePtr) c.Int - -/* - * Saving. - */ -//go:linkname DocDumpFormatMemory C.xmlDocDumpFormatMemory -func DocDumpFormatMemory(cur DocPtr, mem **Char, size *c.Int, format c.Int) - -//go:linkname DocDumpMemory C.xmlDocDumpMemory -func DocDumpMemory(cur DocPtr, mem **Char, size *c.Int) - -//go:linkname DocDumpMemoryEnc C.xmlDocDumpMemoryEnc -func DocDumpMemoryEnc(out_doc DocPtr, doc_txt_ptr **Char, doc_txt_len *c.Int, txt_encoding *c.Char) - -//go:linkname DocDumpFormatMemoryEnc C.xmlDocDumpFormatMemoryEnc -func DocDumpFormatMemoryEnc(out_doc DocPtr, doc_txt_ptr **Char, doc_txt_len *c.Int, txt_encoding *c.Char, format c.Int) - -//go:linkname DocFormatDump C.xmlDocFormatDump -func DocFormatDump(f *c.FILE, cur DocPtr, format c.Int) c.Int - -//go:linkname DocDump C.xmlDocDump -func DocDump(f *c.FILE, cur DocPtr) c.Int - -//go:linkname ElemDump C.xmlElemDump -func ElemDump(f *c.FILE, doc DocPtr, cur NodePtr) - -//go:linkname SaveFile C.xmlSaveFile -func SaveFile(filename *c.Char, cur DocPtr) c.Int - -//go:linkname SaveFormatFile C.xmlSaveFormatFile -func SaveFormatFile(filename *c.Char, cur DocPtr, format c.Int) c.Int - -//go:linkname BufNodeDump C.xmlBufNodeDump -func BufNodeDump(buf BufPtr, doc DocPtr, cur NodePtr, level c.Int, format c.Int) c.SizeT - -//go:linkname NodeDump C.xmlNodeDump -func NodeDump(buf BufferPtr, doc DocPtr, cur NodePtr, level c.Int, format c.Int) c.Int - -//go:linkname SaveFileTo C.xmlSaveFileTo -func SaveFileTo(buf OutputBufferPtr, cur DocPtr, encoding *c.Char) c.Int - -//go:linkname SaveFormatFileTo C.xmlSaveFormatFileTo -func SaveFormatFileTo(buf OutputBufferPtr, cur DocPtr, encoding *c.Char, format c.Int) c.Int - -//go:linkname NodeDumpOutput C.xmlNodeDumpOutput -func NodeDumpOutput(buf OutputBufferPtr, doc DocPtr, cur NodePtr, level c.Int, format c.Int, encoding *c.Char) - -//go:linkname SaveFormatFileEnc C.xmlSaveFormatFileEnc -func SaveFormatFileEnc(filename *c.Char, cur DocPtr, encoding *c.Char, format c.Int) c.Int - -//go:linkname SaveFileEnc C.xmlSaveFileEnc -func SaveFileEnc(filename *c.Char, cur DocPtr, encoding *c.Char) c.Int - -/* - * XHTML - */ -// llgo:link (*Char).IsXHTML C.xmlIsXHTML -func (recv_ *Char) IsXHTML(publicID *Char) c.Int { - return 0 -} - -/* - * Compression. - */ -// llgo:link (*Doc).GetDocCompressMode C.xmlGetDocCompressMode -func (recv_ *Doc) GetDocCompressMode() c.Int { - return 0 -} - -//go:linkname SetDocCompressMode C.xmlSetDocCompressMode -func SetDocCompressMode(doc DocPtr, mode c.Int) - -//go:linkname GetCompressMode C.xmlGetCompressMode -func GetCompressMode() c.Int - -//go:linkname SetCompressMode C.xmlSetCompressMode -func SetCompressMode(mode c.Int) - -/* -* DOM-wrapper helper functions. - */ -//go:linkname DOMWrapNewCtxt C.xmlDOMWrapNewCtxt -func DOMWrapNewCtxt() DOMWrapCtxtPtr - -//go:linkname DOMWrapFreeCtxt C.xmlDOMWrapFreeCtxt -func DOMWrapFreeCtxt(ctxt DOMWrapCtxtPtr) - -//go:linkname DOMWrapReconcileNamespaces C.xmlDOMWrapReconcileNamespaces -func DOMWrapReconcileNamespaces(ctxt DOMWrapCtxtPtr, elem NodePtr, options c.Int) c.Int - -//go:linkname DOMWrapAdoptNode C.xmlDOMWrapAdoptNode -func DOMWrapAdoptNode(ctxt DOMWrapCtxtPtr, sourceDoc DocPtr, node NodePtr, destDoc DocPtr, destParent NodePtr, options c.Int) c.Int - -//go:linkname DOMWrapRemoveNode C.xmlDOMWrapRemoveNode -func DOMWrapRemoveNode(ctxt DOMWrapCtxtPtr, doc DocPtr, node NodePtr, options c.Int) c.Int - -//go:linkname DOMWrapCloneNode C.xmlDOMWrapCloneNode -func DOMWrapCloneNode(ctxt DOMWrapCtxtPtr, sourceDoc DocPtr, node NodePtr, clonedNode *NodePtr, destDoc DocPtr, destParent NodePtr, deep c.Int, options c.Int) c.Int - -/* - * 5 interfaces from DOM ElementTraversal, but different in entities - * traversal. - */ -//go:linkname ChildElementCount C.xmlChildElementCount -func ChildElementCount(parent NodePtr) c.Ulong - -//go:linkname NextElementSibling C.xmlNextElementSibling -func NextElementSibling(node NodePtr) NodePtr - -//go:linkname FirstElementChild C.xmlFirstElementChild -func FirstElementChild(parent NodePtr) NodePtr - -//go:linkname LastElementChild C.xmlLastElementChild -func LastElementChild(parent NodePtr) NodePtr - -//go:linkname PreviousElementSibling C.xmlPreviousElementSibling -func PreviousElementSibling(node NodePtr) NodePtr - -//go:linkname RegisterNodeDefault C.xmlRegisterNodeDefault -func RegisterNodeDefault(func_ RegisterNodeFunc) RegisterNodeFunc - -//go:linkname DeregisterNodeDefault C.xmlDeregisterNodeDefault -func DeregisterNodeDefault(func_ DeregisterNodeFunc) DeregisterNodeFunc - -//go:linkname ThrDefRegisterNodeDefault C.xmlThrDefRegisterNodeDefault -func ThrDefRegisterNodeDefault(func_ RegisterNodeFunc) RegisterNodeFunc - -//go:linkname ThrDefDeregisterNodeDefault C.xmlThrDefDeregisterNodeDefault -func ThrDefDeregisterNodeDefault(func_ DeregisterNodeFunc) DeregisterNodeFunc - -// llgo:link BufferAllocationScheme.ThrDefBufferAllocScheme C.xmlThrDefBufferAllocScheme -func (recv_ BufferAllocationScheme) ThrDefBufferAllocScheme() BufferAllocationScheme { - return 0 -} - -//go:linkname ThrDefDefaultBufferSize C.xmlThrDefDefaultBufferSize -func ThrDefDefaultBufferSize(v c.Int) c.Int diff --git a/libxml2/uri.go b/libxml2/uri.go deleted file mode 100644 index 39c06856..00000000 --- a/libxml2/uri.go +++ /dev/null @@ -1,97 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlURI struct { - Scheme *c.Char - Opaque *c.Char - Authority *c.Char - Server *c.Char - User *c.Char - Port c.Int - Path *c.Char - Query *c.Char - Fragment *c.Char - Cleanup c.Int - QueryRaw *c.Char -} -type URI X_xmlURI -type URIPtr *URI - -/* - * This function is in tree.h: - * xmlChar * xmlNodeGetBase (xmlDocPtr doc, - * xmlNodePtr cur); - */ -//go:linkname CreateURI C.xmlCreateURI -func CreateURI() URIPtr - -// llgo:link (*Char).BuildURISafe C.xmlBuildURISafe -func (recv_ *Char) BuildURISafe(base *Char, out **Char) c.Int { - return 0 -} - -// llgo:link (*Char).BuildURI C.xmlBuildURI -func (recv_ *Char) BuildURI(base *Char) *Char { - return nil -} - -// llgo:link (*Char).BuildRelativeURISafe C.xmlBuildRelativeURISafe -func (recv_ *Char) BuildRelativeURISafe(base *Char, out **Char) c.Int { - return 0 -} - -// llgo:link (*Char).BuildRelativeURI C.xmlBuildRelativeURI -func (recv_ *Char) BuildRelativeURI(base *Char) *Char { - return nil -} - -//go:linkname ParseURI C.xmlParseURI -func ParseURI(str *c.Char) URIPtr - -//go:linkname ParseURISafe C.xmlParseURISafe -func ParseURISafe(str *c.Char, uri *URIPtr) c.Int - -//go:linkname ParseURIRaw C.xmlParseURIRaw -func ParseURIRaw(str *c.Char, raw c.Int) URIPtr - -//go:linkname ParseURIReference C.xmlParseURIReference -func ParseURIReference(uri URIPtr, str *c.Char) c.Int - -//go:linkname SaveUri C.xmlSaveUri -func SaveUri(uri URIPtr) *Char - -//go:linkname PrintURI C.xmlPrintURI -func PrintURI(stream *c.FILE, uri URIPtr) - -// llgo:link (*Char).URIEscapeStr C.xmlURIEscapeStr -func (recv_ *Char) URIEscapeStr(list *Char) *Char { - return nil -} - -//go:linkname URIUnescapeString C.xmlURIUnescapeString -func URIUnescapeString(str *c.Char, len c.Int, target *c.Char) *c.Char - -//go:linkname NormalizeURIPath C.xmlNormalizeURIPath -func NormalizeURIPath(path *c.Char) c.Int - -// llgo:link (*Char).URIEscape C.xmlURIEscape -func (recv_ *Char) URIEscape() *Char { - return nil -} - -//go:linkname FreeURI C.xmlFreeURI -func FreeURI(uri URIPtr) - -// llgo:link (*Char).CanonicPath C.xmlCanonicPath -func (recv_ *Char) CanonicPath() *Char { - return nil -} - -// llgo:link (*Char).PathToURI C.xmlPathToURI -func (recv_ *Char) PathToURI() *Char { - return nil -} diff --git a/libxml2/valid.go b/libxml2/valid.go deleted file mode 100644 index 2ca85a79..00000000 --- a/libxml2/valid.go +++ /dev/null @@ -1,294 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlValidState struct { - Unused [8]uint8 -} -type ValidState X_xmlValidState -type ValidStatePtr *ValidState - -// llgo:type C -type ValidityErrorFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type ValidityWarningFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -type X_xmlValidCtxt struct { - UserData c.Pointer - Error ValidityErrorFunc - Warning ValidityWarningFunc - Node NodePtr - NodeNr c.Int - NodeMax c.Int - NodeTab *NodePtr - Flags c.Uint - Doc DocPtr - Valid c.Int - Vstate *ValidState - VstateNr c.Int - VstateMax c.Int - VstateTab *ValidState - Am AutomataPtr - State AutomataStatePtr -} -type ValidCtxt X_xmlValidCtxt -type ValidCtxtPtr *ValidCtxt -type NotationTable X_xmlHashTable -type NotationTablePtr *NotationTable -type ElementTable X_xmlHashTable -type ElementTablePtr *ElementTable -type AttributeTable X_xmlHashTable -type AttributeTablePtr *AttributeTable -type IDTable X_xmlHashTable -type IDTablePtr *IDTable -type RefTable X_xmlHashTable -type RefTablePtr *RefTable - -/* Notation */ -//go:linkname AddNotationDecl C.xmlAddNotationDecl -func AddNotationDecl(ctxt ValidCtxtPtr, dtd DtdPtr, name *Char, PublicID *Char, SystemID *Char) NotationPtr - -//go:linkname CopyNotationTable C.xmlCopyNotationTable -func CopyNotationTable(table NotationTablePtr) NotationTablePtr - -//go:linkname FreeNotationTable C.xmlFreeNotationTable -func FreeNotationTable(table NotationTablePtr) - -//go:linkname DumpNotationDecl C.xmlDumpNotationDecl -func DumpNotationDecl(buf BufferPtr, nota NotationPtr) - -/* XML_DEPRECATED, still used in lxml */ -//go:linkname DumpNotationTable C.xmlDumpNotationTable -func DumpNotationTable(buf BufferPtr, table NotationTablePtr) - -/* Element Content */ -/* the non Doc version are being deprecated */ -// llgo:link (*Char).NewElementContent C.xmlNewElementContent -func (recv_ *Char) NewElementContent(type_ ElementContentType) ElementContentPtr { - return nil -} - -//go:linkname CopyElementContent C.xmlCopyElementContent -func CopyElementContent(content ElementContentPtr) ElementContentPtr - -//go:linkname FreeElementContent C.xmlFreeElementContent -func FreeElementContent(cur ElementContentPtr) - -/* the new versions with doc argument */ -//go:linkname NewDocElementContent C.xmlNewDocElementContent -func NewDocElementContent(doc DocPtr, name *Char, type_ ElementContentType) ElementContentPtr - -//go:linkname CopyDocElementContent C.xmlCopyDocElementContent -func CopyDocElementContent(doc DocPtr, content ElementContentPtr) ElementContentPtr - -//go:linkname FreeDocElementContent C.xmlFreeDocElementContent -func FreeDocElementContent(doc DocPtr, cur ElementContentPtr) - -//go:linkname SnprintfElementContent C.xmlSnprintfElementContent -func SnprintfElementContent(buf *c.Char, size c.Int, content ElementContentPtr, englob c.Int) - -//go:linkname SprintfElementContent C.xmlSprintfElementContent -func SprintfElementContent(buf *c.Char, content ElementContentPtr, englob c.Int) - -/* Element */ -//go:linkname AddElementDecl C.xmlAddElementDecl -func AddElementDecl(ctxt ValidCtxtPtr, dtd DtdPtr, name *Char, type_ ElementTypeVal, content ElementContentPtr) ElementPtr - -//go:linkname CopyElementTable C.xmlCopyElementTable -func CopyElementTable(table ElementTablePtr) ElementTablePtr - -//go:linkname FreeElementTable C.xmlFreeElementTable -func FreeElementTable(table ElementTablePtr) - -//go:linkname DumpElementTable C.xmlDumpElementTable -func DumpElementTable(buf BufferPtr, table ElementTablePtr) - -//go:linkname DumpElementDecl C.xmlDumpElementDecl -func DumpElementDecl(buf BufferPtr, elem ElementPtr) - -/* Enumeration */ -// llgo:link (*Char).CreateEnumeration C.xmlCreateEnumeration -func (recv_ *Char) CreateEnumeration() EnumerationPtr { - return nil -} - -//go:linkname FreeEnumeration C.xmlFreeEnumeration -func FreeEnumeration(cur EnumerationPtr) - -//go:linkname CopyEnumeration C.xmlCopyEnumeration -func CopyEnumeration(cur EnumerationPtr) EnumerationPtr - -/* Attribute */ -//go:linkname AddAttributeDecl C.xmlAddAttributeDecl -func AddAttributeDecl(ctxt ValidCtxtPtr, dtd DtdPtr, elem *Char, name *Char, ns *Char, type_ AttributeType, def AttributeDefault, defaultValue *Char, tree EnumerationPtr) AttributePtr - -//go:linkname CopyAttributeTable C.xmlCopyAttributeTable -func CopyAttributeTable(table AttributeTablePtr) AttributeTablePtr - -//go:linkname FreeAttributeTable C.xmlFreeAttributeTable -func FreeAttributeTable(table AttributeTablePtr) - -//go:linkname DumpAttributeTable C.xmlDumpAttributeTable -func DumpAttributeTable(buf BufferPtr, table AttributeTablePtr) - -//go:linkname DumpAttributeDecl C.xmlDumpAttributeDecl -func DumpAttributeDecl(buf BufferPtr, attr AttributePtr) - -/* IDs */ -//go:linkname AddIDSafe C.xmlAddIDSafe -func AddIDSafe(attr AttrPtr, value *Char) c.Int - -//go:linkname AddID C.xmlAddID -func AddID(ctxt ValidCtxtPtr, doc DocPtr, value *Char, attr AttrPtr) IDPtr - -//go:linkname FreeIDTable C.xmlFreeIDTable -func FreeIDTable(table IDTablePtr) - -//go:linkname GetID C.xmlGetID -func GetID(doc DocPtr, ID *Char) AttrPtr - -//go:linkname IsID C.xmlIsID -func IsID(doc DocPtr, elem NodePtr, attr AttrPtr) c.Int - -//go:linkname RemoveID C.xmlRemoveID -func RemoveID(doc DocPtr, attr AttrPtr) c.Int - -/* IDREFs */ -//go:linkname AddRef C.xmlAddRef -func AddRef(ctxt ValidCtxtPtr, doc DocPtr, value *Char, attr AttrPtr) RefPtr - -//go:linkname FreeRefTable C.xmlFreeRefTable -func FreeRefTable(table RefTablePtr) - -//go:linkname IsRef C.xmlIsRef -func IsRef(doc DocPtr, elem NodePtr, attr AttrPtr) c.Int - -//go:linkname RemoveRef C.xmlRemoveRef -func RemoveRef(doc DocPtr, attr AttrPtr) c.Int - -//go:linkname GetRefs C.xmlGetRefs -func GetRefs(doc DocPtr, ID *Char) ListPtr - -/* Allocate/Release Validation Contexts */ -//go:linkname NewValidCtxt C.xmlNewValidCtxt -func NewValidCtxt() ValidCtxtPtr - -//go:linkname FreeValidCtxt C.xmlFreeValidCtxt -func FreeValidCtxt(ValidCtxtPtr) - -//go:linkname ValidateRoot C.xmlValidateRoot -func ValidateRoot(ctxt ValidCtxtPtr, doc DocPtr) c.Int - -//go:linkname ValidateElementDecl C.xmlValidateElementDecl -func ValidateElementDecl(ctxt ValidCtxtPtr, doc DocPtr, elem ElementPtr) c.Int - -//go:linkname ValidNormalizeAttributeValue C.xmlValidNormalizeAttributeValue -func ValidNormalizeAttributeValue(doc DocPtr, elem NodePtr, name *Char, value *Char) *Char - -//go:linkname ValidCtxtNormalizeAttributeValue C.xmlValidCtxtNormalizeAttributeValue -func ValidCtxtNormalizeAttributeValue(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr, name *Char, value *Char) *Char - -//go:linkname ValidateAttributeDecl C.xmlValidateAttributeDecl -func ValidateAttributeDecl(ctxt ValidCtxtPtr, doc DocPtr, attr AttributePtr) c.Int - -// llgo:link AttributeType.ValidateAttributeValue C.xmlValidateAttributeValue -func (recv_ AttributeType) ValidateAttributeValue(value *Char) c.Int { - return 0 -} - -//go:linkname ValidateNotationDecl C.xmlValidateNotationDecl -func ValidateNotationDecl(ctxt ValidCtxtPtr, doc DocPtr, nota NotationPtr) c.Int - -//go:linkname ValidateDtd C.xmlValidateDtd -func ValidateDtd(ctxt ValidCtxtPtr, doc DocPtr, dtd DtdPtr) c.Int - -//go:linkname ValidateDtdFinal C.xmlValidateDtdFinal -func ValidateDtdFinal(ctxt ValidCtxtPtr, doc DocPtr) c.Int - -//go:linkname ValidateDocument C.xmlValidateDocument -func ValidateDocument(ctxt ValidCtxtPtr, doc DocPtr) c.Int - -//go:linkname ValidateElement C.xmlValidateElement -func ValidateElement(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr) c.Int - -//go:linkname ValidateOneElement C.xmlValidateOneElement -func ValidateOneElement(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr) c.Int - -//go:linkname ValidateOneAttribute C.xmlValidateOneAttribute -func ValidateOneAttribute(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr, attr AttrPtr, value *Char) c.Int - -//go:linkname ValidateOneNamespace C.xmlValidateOneNamespace -func ValidateOneNamespace(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr, prefix *Char, ns NsPtr, value *Char) c.Int - -//go:linkname ValidateDocumentFinal C.xmlValidateDocumentFinal -func ValidateDocumentFinal(ctxt ValidCtxtPtr, doc DocPtr) c.Int - -//go:linkname ValidateNotationUse C.xmlValidateNotationUse -func ValidateNotationUse(ctxt ValidCtxtPtr, doc DocPtr, notationName *Char) c.Int - -//go:linkname IsMixedElement C.xmlIsMixedElement -func IsMixedElement(doc DocPtr, name *Char) c.Int - -//go:linkname GetDtdAttrDesc C.xmlGetDtdAttrDesc -func GetDtdAttrDesc(dtd DtdPtr, elem *Char, name *Char) AttributePtr - -//go:linkname GetDtdQAttrDesc C.xmlGetDtdQAttrDesc -func GetDtdQAttrDesc(dtd DtdPtr, elem *Char, name *Char, prefix *Char) AttributePtr - -//go:linkname GetDtdNotationDesc C.xmlGetDtdNotationDesc -func GetDtdNotationDesc(dtd DtdPtr, name *Char) NotationPtr - -//go:linkname GetDtdQElementDesc C.xmlGetDtdQElementDesc -func GetDtdQElementDesc(dtd DtdPtr, name *Char, prefix *Char) ElementPtr - -//go:linkname GetDtdElementDesc C.xmlGetDtdElementDesc -func GetDtdElementDesc(dtd DtdPtr, name *Char) ElementPtr - -// llgo:link (*ElementContent).ValidGetPotentialChildren C.xmlValidGetPotentialChildren -func (recv_ *ElementContent) ValidGetPotentialChildren(names **Char, len *c.Int, max c.Int) c.Int { - return 0 -} - -// llgo:link (*Node).ValidGetValidElements C.xmlValidGetValidElements -func (recv_ *Node) ValidGetValidElements(next *Node, names **Char, max c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).ValidateNameValue C.xmlValidateNameValue -func (recv_ *Char) ValidateNameValue() c.Int { - return 0 -} - -// llgo:link (*Char).ValidateNamesValue C.xmlValidateNamesValue -func (recv_ *Char) ValidateNamesValue() c.Int { - return 0 -} - -// llgo:link (*Char).ValidateNmtokenValue C.xmlValidateNmtokenValue -func (recv_ *Char) ValidateNmtokenValue() c.Int { - return 0 -} - -// llgo:link (*Char).ValidateNmtokensValue C.xmlValidateNmtokensValue -func (recv_ *Char) ValidateNmtokensValue() c.Int { - return 0 -} - -/* - * Validation based on the regexp support - */ -//go:linkname ValidBuildContentModel C.xmlValidBuildContentModel -func ValidBuildContentModel(ctxt ValidCtxtPtr, elem ElementPtr) c.Int - -//go:linkname ValidatePushElement C.xmlValidatePushElement -func ValidatePushElement(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr, qname *Char) c.Int - -//go:linkname ValidatePushCData C.xmlValidatePushCData -func ValidatePushCData(ctxt ValidCtxtPtr, data *Char, len c.Int) c.Int - -//go:linkname ValidatePopElement C.xmlValidatePopElement -func ValidatePopElement(ctxt ValidCtxtPtr, doc DocPtr, elem NodePtr, qname *Char) c.Int diff --git a/libxml2/xinclude.go b/libxml2/xinclude.go deleted file mode 100644 index fa7a014f..00000000 --- a/libxml2/xinclude.go +++ /dev/null @@ -1,54 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlXIncludeCtxt struct { - Unused [8]uint8 -} -type XIncludeCtxt X_xmlXIncludeCtxt -type XIncludeCtxtPtr *XIncludeCtxt - -/* - * standalone processing - */ -//go:linkname XIncludeProcess C.xmlXIncludeProcess -func XIncludeProcess(doc DocPtr) c.Int - -//go:linkname XIncludeProcessFlags C.xmlXIncludeProcessFlags -func XIncludeProcessFlags(doc DocPtr, flags c.Int) c.Int - -//go:linkname XIncludeProcessFlagsData C.xmlXIncludeProcessFlagsData -func XIncludeProcessFlagsData(doc DocPtr, flags c.Int, data c.Pointer) c.Int - -//go:linkname XIncludeProcessTreeFlagsData C.xmlXIncludeProcessTreeFlagsData -func XIncludeProcessTreeFlagsData(tree NodePtr, flags c.Int, data c.Pointer) c.Int - -//go:linkname XIncludeProcessTree C.xmlXIncludeProcessTree -func XIncludeProcessTree(tree NodePtr) c.Int - -//go:linkname XIncludeProcessTreeFlags C.xmlXIncludeProcessTreeFlags -func XIncludeProcessTreeFlags(tree NodePtr, flags c.Int) c.Int - -/* - * contextual processing - */ -//go:linkname XIncludeNewContext C.xmlXIncludeNewContext -func XIncludeNewContext(doc DocPtr) XIncludeCtxtPtr - -//go:linkname XIncludeSetFlags C.xmlXIncludeSetFlags -func XIncludeSetFlags(ctxt XIncludeCtxtPtr, flags c.Int) c.Int - -//go:linkname XIncludeSetErrorHandler C.xmlXIncludeSetErrorHandler -func XIncludeSetErrorHandler(ctxt XIncludeCtxtPtr, handler StructuredErrorFunc, data c.Pointer) - -//go:linkname XIncludeGetLastError C.xmlXIncludeGetLastError -func XIncludeGetLastError(ctxt XIncludeCtxtPtr) c.Int - -//go:linkname XIncludeFreeContext C.xmlXIncludeFreeContext -func XIncludeFreeContext(ctxt XIncludeCtxtPtr) - -//go:linkname XIncludeProcessNode C.xmlXIncludeProcessNode -func XIncludeProcessNode(ctxt XIncludeCtxtPtr, tree NodePtr) c.Int diff --git a/libxml2/xlink.go b/libxml2/xlink.go deleted file mode 100644 index 638fc3d5..00000000 --- a/libxml2/xlink.go +++ /dev/null @@ -1,80 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type XlinkHRef *Char -type XlinkRole *Char -type XlinkTitle *Char -type XlinkType c.Int - -const ( - XLINK_TYPE_NONE XlinkType = 0 - XLINK_TYPE_SIMPLE XlinkType = 1 - XLINK_TYPE_EXTENDED XlinkType = 2 - XLINK_TYPE_EXTENDED_SET XlinkType = 3 -) - -type XlinkShow c.Int - -const ( - XLINK_SHOW_NONE XlinkShow = 0 - XLINK_SHOW_NEW XlinkShow = 1 - XLINK_SHOW_EMBED XlinkShow = 2 - XLINK_SHOW_REPLACE XlinkShow = 3 -) - -type XlinkActuate c.Int - -const ( - XLINK_ACTUATE_NONE XlinkActuate = 0 - XLINK_ACTUATE_AUTO XlinkActuate = 1 - XLINK_ACTUATE_ONREQUEST XlinkActuate = 2 -) - -// llgo:type C -type XlinkNodeDetectFunc func(c.Pointer, NodePtr) - -// llgo:type C -type XlinkSimpleLinkFunk func(c.Pointer, NodePtr, XlinkHRef, XlinkRole, XlinkTitle) - -// llgo:type C -type XlinkExtendedLinkFunk func(c.Pointer, NodePtr, c.Int, *XlinkHRef, *XlinkRole, c.Int, *XlinkRole, *XlinkRole, *XlinkShow, *XlinkActuate, c.Int, *XlinkTitle, **Char) - -// llgo:type C -type XlinkExtendedLinkSetFunk func(c.Pointer, NodePtr, c.Int, *XlinkHRef, *XlinkRole, c.Int, *XlinkTitle, **Char) - -type X_xlinkHandler struct { - Simple XlinkSimpleLinkFunk - Extended XlinkExtendedLinkFunk - Set XlinkExtendedLinkSetFunk -} -type XlinkHandler X_xlinkHandler -type XlinkHandlerPtr *XlinkHandler - -/* - * The default detection routine, can be overridden, they call the default - * detection callbacks. - */ -//go:linkname XlinkGetDefaultDetect C.xlinkGetDefaultDetect -func XlinkGetDefaultDetect() XlinkNodeDetectFunc - -//go:linkname XlinkSetDefaultDetect C.xlinkSetDefaultDetect -func XlinkSetDefaultDetect(func_ XlinkNodeDetectFunc) - -/* - * Routines to set/get the default handlers. - */ -//go:linkname XlinkGetDefaultHandler C.xlinkGetDefaultHandler -func XlinkGetDefaultHandler() XlinkHandlerPtr - -//go:linkname XlinkSetDefaultHandler C.xlinkSetDefaultHandler -func XlinkSetDefaultHandler(handler XlinkHandlerPtr) - -/* - * Link detection module itself. - */ -//go:linkname XlinkIsLink C.xlinkIsLink -func XlinkIsLink(doc DocPtr, node NodePtr) XlinkType diff --git a/libxml2/xmlIO.go b/libxml2/xmlIO.go deleted file mode 100644 index 3d392e78..00000000 --- a/libxml2/xmlIO.go +++ /dev/null @@ -1,237 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -// llgo:type C -type InputMatchCallback func(*c.Char) c.Int - -// llgo:type C -type InputOpenCallback func(*c.Char) c.Pointer - -// llgo:type C -type InputReadCallback func(c.Pointer, *c.Char, c.Int) c.Int - -// llgo:type C -type InputCloseCallback func(c.Pointer) c.Int - -// llgo:type C -type OutputMatchCallback func(*c.Char) c.Int - -// llgo:type C -type OutputOpenCallback func(*c.Char) c.Pointer - -// llgo:type C -type OutputWriteCallback func(c.Pointer, *c.Char, c.Int) c.Int - -// llgo:type C -type OutputCloseCallback func(c.Pointer) c.Int - -// llgo:type C -type ParserInputBufferCreateFilenameFunc func(*c.Char, CharEncoding) ParserInputBufferPtr - -// llgo:type C -type OutputBufferCreateFilenameFunc func(*c.Char, CharEncodingHandlerPtr, c.Int) OutputBufferPtr - -//go:linkname X__xmlParserInputBufferCreateFilenameValue C.__xmlParserInputBufferCreateFilenameValue -func X__xmlParserInputBufferCreateFilenameValue() ParserInputBufferCreateFilenameFunc - -//go:linkname X__xmlOutputBufferCreateFilenameValue C.__xmlOutputBufferCreateFilenameValue -func X__xmlOutputBufferCreateFilenameValue() OutputBufferCreateFilenameFunc - -/* - * Interfaces for input - */ -//go:linkname CleanupInputCallbacks C.xmlCleanupInputCallbacks -func CleanupInputCallbacks() - -//go:linkname PopInputCallbacks C.xmlPopInputCallbacks -func PopInputCallbacks() c.Int - -//go:linkname RegisterDefaultInputCallbacks C.xmlRegisterDefaultInputCallbacks -func RegisterDefaultInputCallbacks() - -// llgo:link CharEncoding.AllocParserInputBuffer C.xmlAllocParserInputBuffer -func (recv_ CharEncoding) AllocParserInputBuffer() ParserInputBufferPtr { - return nil -} - -//go:linkname ParserInputBufferCreateFilename C.xmlParserInputBufferCreateFilename -func ParserInputBufferCreateFilename(URI *c.Char, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferCreateFile C.xmlParserInputBufferCreateFile -func ParserInputBufferCreateFile(file *c.FILE, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferCreateFd C.xmlParserInputBufferCreateFd -func ParserInputBufferCreateFd(fd c.Int, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferCreateMem C.xmlParserInputBufferCreateMem -func ParserInputBufferCreateMem(mem *c.Char, size c.Int, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferCreateStatic C.xmlParserInputBufferCreateStatic -func ParserInputBufferCreateStatic(mem *c.Char, size c.Int, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferCreateIO C.xmlParserInputBufferCreateIO -func ParserInputBufferCreateIO(ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, enc CharEncoding) ParserInputBufferPtr - -//go:linkname ParserInputBufferRead C.xmlParserInputBufferRead -func ParserInputBufferRead(in ParserInputBufferPtr, len c.Int) c.Int - -//go:linkname ParserInputBufferGrow C.xmlParserInputBufferGrow -func ParserInputBufferGrow(in ParserInputBufferPtr, len c.Int) c.Int - -//go:linkname ParserInputBufferPush C.xmlParserInputBufferPush -func ParserInputBufferPush(in ParserInputBufferPtr, len c.Int, buf *c.Char) c.Int - -//go:linkname FreeParserInputBuffer C.xmlFreeParserInputBuffer -func FreeParserInputBuffer(in ParserInputBufferPtr) - -//go:linkname ParserGetDirectory C.xmlParserGetDirectory -func ParserGetDirectory(filename *c.Char) *c.Char - -//go:linkname RegisterInputCallbacks C.xmlRegisterInputCallbacks -func RegisterInputCallbacks(matchFunc InputMatchCallback, openFunc InputOpenCallback, readFunc InputReadCallback, closeFunc InputCloseCallback) c.Int - -//go:linkname X__xmlParserInputBufferCreateFilename C.__xmlParserInputBufferCreateFilename -func X__xmlParserInputBufferCreateFilename(URI *c.Char, enc CharEncoding) ParserInputBufferPtr - -/* - * Interfaces for output - */ -//go:linkname CleanupOutputCallbacks C.xmlCleanupOutputCallbacks -func CleanupOutputCallbacks() - -//go:linkname PopOutputCallbacks C.xmlPopOutputCallbacks -func PopOutputCallbacks() c.Int - -//go:linkname RegisterDefaultOutputCallbacks C.xmlRegisterDefaultOutputCallbacks -func RegisterDefaultOutputCallbacks() - -//go:linkname AllocOutputBuffer C.xmlAllocOutputBuffer -func AllocOutputBuffer(encoder CharEncodingHandlerPtr) OutputBufferPtr - -//go:linkname OutputBufferCreateFilename C.xmlOutputBufferCreateFilename -func OutputBufferCreateFilename(URI *c.Char, encoder CharEncodingHandlerPtr, compression c.Int) OutputBufferPtr - -//go:linkname OutputBufferCreateFile C.xmlOutputBufferCreateFile -func OutputBufferCreateFile(file *c.FILE, encoder CharEncodingHandlerPtr) OutputBufferPtr - -//go:linkname OutputBufferCreateBuffer C.xmlOutputBufferCreateBuffer -func OutputBufferCreateBuffer(buffer BufferPtr, encoder CharEncodingHandlerPtr) OutputBufferPtr - -//go:linkname OutputBufferCreateFd C.xmlOutputBufferCreateFd -func OutputBufferCreateFd(fd c.Int, encoder CharEncodingHandlerPtr) OutputBufferPtr - -//go:linkname OutputBufferCreateIO C.xmlOutputBufferCreateIO -func OutputBufferCreateIO(iowrite OutputWriteCallback, ioclose OutputCloseCallback, ioctx c.Pointer, encoder CharEncodingHandlerPtr) OutputBufferPtr - -/* Couple of APIs to get the output without digging into the buffers */ -//go:linkname OutputBufferGetContent C.xmlOutputBufferGetContent -func OutputBufferGetContent(out OutputBufferPtr) *Char - -//go:linkname OutputBufferGetSize C.xmlOutputBufferGetSize -func OutputBufferGetSize(out OutputBufferPtr) c.SizeT - -//go:linkname OutputBufferWrite C.xmlOutputBufferWrite -func OutputBufferWrite(out OutputBufferPtr, len c.Int, buf *c.Char) c.Int - -//go:linkname OutputBufferWriteString C.xmlOutputBufferWriteString -func OutputBufferWriteString(out OutputBufferPtr, str *c.Char) c.Int - -//go:linkname OutputBufferWriteEscape C.xmlOutputBufferWriteEscape -func OutputBufferWriteEscape(out OutputBufferPtr, str *Char, escaping CharEncodingOutputFunc) c.Int - -//go:linkname OutputBufferFlush C.xmlOutputBufferFlush -func OutputBufferFlush(out OutputBufferPtr) c.Int - -//go:linkname OutputBufferClose C.xmlOutputBufferClose -func OutputBufferClose(out OutputBufferPtr) c.Int - -//go:linkname RegisterOutputCallbacks C.xmlRegisterOutputCallbacks -func RegisterOutputCallbacks(matchFunc OutputMatchCallback, openFunc OutputOpenCallback, writeFunc OutputWriteCallback, closeFunc OutputCloseCallback) c.Int - -//go:linkname X__xmlOutputBufferCreateFilename C.__xmlOutputBufferCreateFilename -func X__xmlOutputBufferCreateFilename(URI *c.Char, encoder CharEncodingHandlerPtr, compression c.Int) OutputBufferPtr - -/* This function only exists if HTTP support built into the library */ -//go:linkname RegisterHTTPPostCallbacks C.xmlRegisterHTTPPostCallbacks -func RegisterHTTPPostCallbacks() - -//go:linkname CheckHTTPInput C.xmlCheckHTTPInput -func CheckHTTPInput(ctxt ParserCtxtPtr, ret ParserInputPtr) ParserInputPtr - -/* - * A predefined entity loader disabling network accesses - */ -//go:linkname NoNetExternalEntityLoader C.xmlNoNetExternalEntityLoader -func NoNetExternalEntityLoader(URL *c.Char, ID *c.Char, ctxt ParserCtxtPtr) ParserInputPtr - -// llgo:link (*Char).NormalizeWindowsPath C.xmlNormalizeWindowsPath -func (recv_ *Char) NormalizeWindowsPath() *Char { - return nil -} - -//go:linkname CheckFilename C.xmlCheckFilename -func CheckFilename(path *c.Char) c.Int - -/** - * Default 'file://' protocol callbacks - */ -//go:linkname FileMatch C.xmlFileMatch -func FileMatch(filename *c.Char) c.Int - -//go:linkname FileOpen C.xmlFileOpen -func FileOpen(filename *c.Char) c.Pointer - -//go:linkname FileRead C.xmlFileRead -func FileRead(context c.Pointer, buffer *c.Char, len c.Int) c.Int - -//go:linkname FileClose C.xmlFileClose -func FileClose(context c.Pointer) c.Int - -/** - * Default 'http://' protocol callbacks - */ -//go:linkname IOHTTPMatch C.xmlIOHTTPMatch -func IOHTTPMatch(filename *c.Char) c.Int - -//go:linkname IOHTTPOpen C.xmlIOHTTPOpen -func IOHTTPOpen(filename *c.Char) c.Pointer - -//go:linkname IOHTTPOpenW C.xmlIOHTTPOpenW -func IOHTTPOpenW(post_uri *c.Char, compression c.Int) c.Pointer - -//go:linkname IOHTTPRead C.xmlIOHTTPRead -func IOHTTPRead(context c.Pointer, buffer *c.Char, len c.Int) c.Int - -//go:linkname IOHTTPClose C.xmlIOHTTPClose -func IOHTTPClose(context c.Pointer) c.Int - -/** - * Default 'ftp://' protocol callbacks - */ -//go:linkname IOFTPMatch C.xmlIOFTPMatch -func IOFTPMatch(filename *c.Char) c.Int - -//go:linkname IOFTPOpen C.xmlIOFTPOpen -func IOFTPOpen(filename *c.Char) c.Pointer - -//go:linkname IOFTPRead C.xmlIOFTPRead -func IOFTPRead(context c.Pointer, buffer *c.Char, len c.Int) c.Int - -//go:linkname IOFTPClose C.xmlIOFTPClose -func IOFTPClose(context c.Pointer) c.Int - -//go:linkname ParserInputBufferCreateFilenameDefault C.xmlParserInputBufferCreateFilenameDefault -func ParserInputBufferCreateFilenameDefault(func_ ParserInputBufferCreateFilenameFunc) ParserInputBufferCreateFilenameFunc - -//go:linkname OutputBufferCreateFilenameDefault C.xmlOutputBufferCreateFilenameDefault -func OutputBufferCreateFilenameDefault(func_ OutputBufferCreateFilenameFunc) OutputBufferCreateFilenameFunc - -//go:linkname ThrDefOutputBufferCreateFilenameDefault C.xmlThrDefOutputBufferCreateFilenameDefault -func ThrDefOutputBufferCreateFilenameDefault(func_ OutputBufferCreateFilenameFunc) OutputBufferCreateFilenameFunc - -//go:linkname ThrDefParserInputBufferCreateFilenameDefault C.xmlThrDefParserInputBufferCreateFilenameDefault -func ThrDefParserInputBufferCreateFilenameDefault(func_ ParserInputBufferCreateFilenameFunc) ParserInputBufferCreateFilenameFunc diff --git a/libxml2/xmlautomata.go b/libxml2/xmlautomata.go deleted file mode 100644 index c4c6a6a1..00000000 --- a/libxml2/xmlautomata.go +++ /dev/null @@ -1,78 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlAutomata struct { - Unused [8]uint8 -} -type Automata X_xmlAutomata -type AutomataPtr *Automata - -type X_xmlAutomataState struct { - Unused [8]uint8 -} -type AutomataState X_xmlAutomataState -type AutomataStatePtr *AutomataState - -/* - * Building API - */ -//go:linkname NewAutomata C.xmlNewAutomata -func NewAutomata() AutomataPtr - -//go:linkname FreeAutomata C.xmlFreeAutomata -func FreeAutomata(am AutomataPtr) - -//go:linkname AutomataGetInitState C.xmlAutomataGetInitState -func AutomataGetInitState(am AutomataPtr) AutomataStatePtr - -//go:linkname AutomataSetFinalState C.xmlAutomataSetFinalState -func AutomataSetFinalState(am AutomataPtr, state AutomataStatePtr) c.Int - -//go:linkname AutomataNewState C.xmlAutomataNewState -func AutomataNewState(am AutomataPtr) AutomataStatePtr - -//go:linkname AutomataNewTransition C.xmlAutomataNewTransition -func AutomataNewTransition(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewTransition2 C.xmlAutomataNewTransition2 -func AutomataNewTransition2(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, token2 *Char, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewNegTrans C.xmlAutomataNewNegTrans -func AutomataNewNegTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, token2 *Char, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewCountTrans C.xmlAutomataNewCountTrans -func AutomataNewCountTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, min c.Int, max c.Int, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewCountTrans2 C.xmlAutomataNewCountTrans2 -func AutomataNewCountTrans2(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, token2 *Char, min c.Int, max c.Int, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewOnceTrans C.xmlAutomataNewOnceTrans -func AutomataNewOnceTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, min c.Int, max c.Int, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewOnceTrans2 C.xmlAutomataNewOnceTrans2 -func AutomataNewOnceTrans2(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, token *Char, token2 *Char, min c.Int, max c.Int, data c.Pointer) AutomataStatePtr - -//go:linkname AutomataNewAllTrans C.xmlAutomataNewAllTrans -func AutomataNewAllTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, lax c.Int) AutomataStatePtr - -//go:linkname AutomataNewEpsilon C.xmlAutomataNewEpsilon -func AutomataNewEpsilon(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr) AutomataStatePtr - -//go:linkname AutomataNewCountedTrans C.xmlAutomataNewCountedTrans -func AutomataNewCountedTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, counter c.Int) AutomataStatePtr - -//go:linkname AutomataNewCounterTrans C.xmlAutomataNewCounterTrans -func AutomataNewCounterTrans(am AutomataPtr, from AutomataStatePtr, to AutomataStatePtr, counter c.Int) AutomataStatePtr - -//go:linkname AutomataNewCounter C.xmlAutomataNewCounter -func AutomataNewCounter(am AutomataPtr, min c.Int, max c.Int) c.Int - -//go:linkname AutomataCompile C.xmlAutomataCompile -func AutomataCompile(am AutomataPtr) *X_xmlRegexp - -//go:linkname AutomataIsDeterminist C.xmlAutomataIsDeterminist -func AutomataIsDeterminist(am AutomataPtr) c.Int diff --git a/libxml2/xmlerror.go b/libxml2/xmlerror.go deleted file mode 100644 index 0699bbca..00000000 --- a/libxml2/xmlerror.go +++ /dev/null @@ -1,908 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type ErrorLevel c.Int - -const ( - ERR_NONE ErrorLevel = 0 - ERR_WARNING ErrorLevel = 1 - ERR_ERROR ErrorLevel = 2 - ERR_FATAL ErrorLevel = 3 -) - -type ErrorDomain c.Int - -const ( - FROM_NONE ErrorDomain = 0 - FROM_PARSER ErrorDomain = 1 - FROM_TREE ErrorDomain = 2 - FROM_NAMESPACE ErrorDomain = 3 - FROM_DTD ErrorDomain = 4 - FROM_HTML ErrorDomain = 5 - FROM_MEMORY ErrorDomain = 6 - FROM_OUTPUT ErrorDomain = 7 - FROM_IO ErrorDomain = 8 - FROM_FTP ErrorDomain = 9 - FROM_HTTP ErrorDomain = 10 - FROM_XINCLUDE ErrorDomain = 11 - FROM_XPATH ErrorDomain = 12 - FROM_XPOINTER ErrorDomain = 13 - FROM_REGEXP ErrorDomain = 14 - FROM_DATATYPE ErrorDomain = 15 - FROM_SCHEMASP ErrorDomain = 16 - FROM_SCHEMASV ErrorDomain = 17 - FROM_RELAXNGP ErrorDomain = 18 - FROM_RELAXNGV ErrorDomain = 19 - FROM_CATALOG ErrorDomain = 20 - FROM_C14N ErrorDomain = 21 - FROM_XSLT ErrorDomain = 22 - FROM_VALID ErrorDomain = 23 - FROM_CHECK ErrorDomain = 24 - FROM_WRITER ErrorDomain = 25 - FROM_MODULE ErrorDomain = 26 - FROM_I18N ErrorDomain = 27 - FROM_SCHEMATRONV ErrorDomain = 28 - FROM_BUFFER ErrorDomain = 29 - FROM_URI ErrorDomain = 30 -) - -type X_xmlError struct { - Domain c.Int - Code c.Int - Message *c.Char - Level ErrorLevel - File *c.Char - Line c.Int - Str1 *c.Char - Str2 *c.Char - Str3 *c.Char - Int1 c.Int - Int2 c.Int - Ctxt c.Pointer - Node c.Pointer -} -type Error X_xmlError -type ErrorPtr *Error -type ParserErrors c.Int - -const ( - ERR_OK ParserErrors = 0 - ERR_INTERNAL_ERROR ParserErrors = 1 - ERR_NO_MEMORY ParserErrors = 2 - ERR_DOCUMENT_START ParserErrors = 3 - ERR_DOCUMENT_EMPTY ParserErrors = 4 - ERR_DOCUMENT_END ParserErrors = 5 - ERR_INVALID_HEX_CHARREF ParserErrors = 6 - ERR_INVALID_DEC_CHARREF ParserErrors = 7 - ERR_INVALID_CHARREF ParserErrors = 8 - ERR_INVALID_CHAR ParserErrors = 9 - ERR_CHARREF_AT_EOF ParserErrors = 10 - ERR_CHARREF_IN_PROLOG ParserErrors = 11 - ERR_CHARREF_IN_EPILOG ParserErrors = 12 - ERR_CHARREF_IN_DTD ParserErrors = 13 - ERR_ENTITYREF_AT_EOF ParserErrors = 14 - ERR_ENTITYREF_IN_PROLOG ParserErrors = 15 - ERR_ENTITYREF_IN_EPILOG ParserErrors = 16 - ERR_ENTITYREF_IN_DTD ParserErrors = 17 - ERR_PEREF_AT_EOF ParserErrors = 18 - ERR_PEREF_IN_PROLOG ParserErrors = 19 - ERR_PEREF_IN_EPILOG ParserErrors = 20 - ERR_PEREF_IN_INT_SUBSET ParserErrors = 21 - ERR_ENTITYREF_NO_NAME ParserErrors = 22 - ERR_ENTITYREF_SEMICOL_MISSING ParserErrors = 23 - ERR_PEREF_NO_NAME ParserErrors = 24 - ERR_PEREF_SEMICOL_MISSING ParserErrors = 25 - ERR_UNDECLARED_ENTITY ParserErrors = 26 - WAR_UNDECLARED_ENTITY ParserErrors = 27 - ERR_UNPARSED_ENTITY ParserErrors = 28 - ERR_ENTITY_IS_EXTERNAL ParserErrors = 29 - ERR_ENTITY_IS_PARAMETER ParserErrors = 30 - ERR_UNKNOWN_ENCODING ParserErrors = 31 - ERR_UNSUPPORTED_ENCODING ParserErrors = 32 - ERR_STRING_NOT_STARTED ParserErrors = 33 - ERR_STRING_NOT_CLOSED ParserErrors = 34 - ERR_NS_DECL_ERROR ParserErrors = 35 - ERR_ENTITY_NOT_STARTED ParserErrors = 36 - ERR_ENTITY_NOT_FINISHED ParserErrors = 37 - ERR_LT_IN_ATTRIBUTE ParserErrors = 38 - ERR_ATTRIBUTE_NOT_STARTED ParserErrors = 39 - ERR_ATTRIBUTE_NOT_FINISHED ParserErrors = 40 - ERR_ATTRIBUTE_WITHOUT_VALUE ParserErrors = 41 - ERR_ATTRIBUTE_REDEFINED ParserErrors = 42 - ERR_LITERAL_NOT_STARTED ParserErrors = 43 - ERR_LITERAL_NOT_FINISHED ParserErrors = 44 - ERR_COMMENT_NOT_FINISHED ParserErrors = 45 - ERR_PI_NOT_STARTED ParserErrors = 46 - ERR_PI_NOT_FINISHED ParserErrors = 47 - ERR_NOTATION_NOT_STARTED ParserErrors = 48 - ERR_NOTATION_NOT_FINISHED ParserErrors = 49 - ERR_ATTLIST_NOT_STARTED ParserErrors = 50 - ERR_ATTLIST_NOT_FINISHED ParserErrors = 51 - ERR_MIXED_NOT_STARTED ParserErrors = 52 - ERR_MIXED_NOT_FINISHED ParserErrors = 53 - ERR_ELEMCONTENT_NOT_STARTED ParserErrors = 54 - ERR_ELEMCONTENT_NOT_FINISHED ParserErrors = 55 - ERR_XMLDECL_NOT_STARTED ParserErrors = 56 - ERR_XMLDECL_NOT_FINISHED ParserErrors = 57 - ERR_CONDSEC_NOT_STARTED ParserErrors = 58 - ERR_CONDSEC_NOT_FINISHED ParserErrors = 59 - ERR_EXT_SUBSET_NOT_FINISHED ParserErrors = 60 - ERR_DOCTYPE_NOT_FINISHED ParserErrors = 61 - ERR_MISPLACED_CDATA_END ParserErrors = 62 - ERR_CDATA_NOT_FINISHED ParserErrors = 63 - ERR_RESERVED_XML_NAME ParserErrors = 64 - ERR_SPACE_REQUIRED ParserErrors = 65 - ERR_SEPARATOR_REQUIRED ParserErrors = 66 - ERR_NMTOKEN_REQUIRED ParserErrors = 67 - ERR_NAME_REQUIRED ParserErrors = 68 - ERR_PCDATA_REQUIRED ParserErrors = 69 - ERR_URI_REQUIRED ParserErrors = 70 - ERR_PUBID_REQUIRED ParserErrors = 71 - ERR_LT_REQUIRED ParserErrors = 72 - ERR_GT_REQUIRED ParserErrors = 73 - ERR_LTSLASH_REQUIRED ParserErrors = 74 - ERR_EQUAL_REQUIRED ParserErrors = 75 - ERR_TAG_NAME_MISMATCH ParserErrors = 76 - ERR_TAG_NOT_FINISHED ParserErrors = 77 - ERR_STANDALONE_VALUE ParserErrors = 78 - ERR_ENCODING_NAME ParserErrors = 79 - ERR_HYPHEN_IN_COMMENT ParserErrors = 80 - ERR_INVALID_ENCODING ParserErrors = 81 - ERR_EXT_ENTITY_STANDALONE ParserErrors = 82 - ERR_CONDSEC_INVALID ParserErrors = 83 - ERR_VALUE_REQUIRED ParserErrors = 84 - ERR_NOT_WELL_BALANCED ParserErrors = 85 - ERR_EXTRA_CONTENT ParserErrors = 86 - ERR_ENTITY_CHAR_ERROR ParserErrors = 87 - ERR_ENTITY_PE_INTERNAL ParserErrors = 88 - ERR_ENTITY_LOOP ParserErrors = 89 - ERR_ENTITY_BOUNDARY ParserErrors = 90 - ERR_INVALID_URI ParserErrors = 91 - ERR_URI_FRAGMENT ParserErrors = 92 - WAR_CATALOG_PI ParserErrors = 93 - ERR_NO_DTD ParserErrors = 94 - ERR_CONDSEC_INVALID_KEYWORD ParserErrors = 95 - ERR_VERSION_MISSING ParserErrors = 96 - WAR_UNKNOWN_VERSION ParserErrors = 97 - WAR_LANG_VALUE ParserErrors = 98 - WAR_NS_URI ParserErrors = 99 - WAR_NS_URI_RELATIVE ParserErrors = 100 - ERR_MISSING_ENCODING ParserErrors = 101 - WAR_SPACE_VALUE ParserErrors = 102 - ERR_NOT_STANDALONE ParserErrors = 103 - ERR_ENTITY_PROCESSING ParserErrors = 104 - ERR_NOTATION_PROCESSING ParserErrors = 105 - WAR_NS_COLUMN ParserErrors = 106 - WAR_ENTITY_REDEFINED ParserErrors = 107 - ERR_UNKNOWN_VERSION ParserErrors = 108 - ERR_VERSION_MISMATCH ParserErrors = 109 - ERR_NAME_TOO_LONG ParserErrors = 110 - ERR_USER_STOP ParserErrors = 111 - ERR_COMMENT_ABRUPTLY_ENDED ParserErrors = 112 - WAR_ENCODING_MISMATCH ParserErrors = 113 - ERR_RESOURCE_LIMIT ParserErrors = 114 - ERR_ARGUMENT ParserErrors = 115 - ERR_SYSTEM ParserErrors = 116 - ERR_REDECL_PREDEF_ENTITY ParserErrors = 117 - ERR_INT_SUBSET_NOT_FINISHED ParserErrors = 118 - NS_ERR_XML_NAMESPACE ParserErrors = 200 - NS_ERR_UNDEFINED_NAMESPACE ParserErrors = 201 - NS_ERR_QNAME ParserErrors = 202 - NS_ERR_ATTRIBUTE_REDEFINED ParserErrors = 203 - NS_ERR_EMPTY ParserErrors = 204 - NS_ERR_COLON ParserErrors = 205 - DTD_ATTRIBUTE_DEFAULT ParserErrors = 500 - DTD_ATTRIBUTE_REDEFINED ParserErrors = 501 - DTD_ATTRIBUTE_VALUE ParserErrors = 502 - DTD_CONTENT_ERROR ParserErrors = 503 - DTD_CONTENT_MODEL ParserErrors = 504 - DTD_CONTENT_NOT_DETERMINIST ParserErrors = 505 - DTD_DIFFERENT_PREFIX ParserErrors = 506 - DTD_ELEM_DEFAULT_NAMESPACE ParserErrors = 507 - DTD_ELEM_NAMESPACE ParserErrors = 508 - DTD_ELEM_REDEFINED ParserErrors = 509 - DTD_EMPTY_NOTATION ParserErrors = 510 - DTD_ENTITY_TYPE ParserErrors = 511 - DTD_ID_FIXED ParserErrors = 512 - DTD_ID_REDEFINED ParserErrors = 513 - DTD_ID_SUBSET ParserErrors = 514 - DTD_INVALID_CHILD ParserErrors = 515 - DTD_INVALID_DEFAULT ParserErrors = 516 - DTD_LOAD_ERROR ParserErrors = 517 - DTD_MISSING_ATTRIBUTE ParserErrors = 518 - DTD_MIXED_CORRUPT ParserErrors = 519 - DTD_MULTIPLE_ID ParserErrors = 520 - DTD_NO_DOC ParserErrors = 521 - DTD_NO_DTD ParserErrors = 522 - DTD_NO_ELEM_NAME ParserErrors = 523 - DTD_NO_PREFIX ParserErrors = 524 - DTD_NO_ROOT ParserErrors = 525 - DTD_NOTATION_REDEFINED ParserErrors = 526 - DTD_NOTATION_VALUE ParserErrors = 527 - DTD_NOT_EMPTY ParserErrors = 528 - DTD_NOT_PCDATA ParserErrors = 529 - DTD_NOT_STANDALONE ParserErrors = 530 - DTD_ROOT_NAME ParserErrors = 531 - DTD_STANDALONE_WHITE_SPACE ParserErrors = 532 - DTD_UNKNOWN_ATTRIBUTE ParserErrors = 533 - DTD_UNKNOWN_ELEM ParserErrors = 534 - DTD_UNKNOWN_ENTITY ParserErrors = 535 - DTD_UNKNOWN_ID ParserErrors = 536 - DTD_UNKNOWN_NOTATION ParserErrors = 537 - DTD_STANDALONE_DEFAULTED ParserErrors = 538 - DTD_XMLID_VALUE ParserErrors = 539 - DTD_XMLID_TYPE ParserErrors = 540 - DTD_DUP_TOKEN ParserErrors = 541 - HTML_STRUCURE_ERROR ParserErrors = 800 - HTML_UNKNOWN_TAG ParserErrors = 801 - HTML_INCORRECTLY_OPENED_COMMENT ParserErrors = 802 - RNGP_ANYNAME_ATTR_ANCESTOR ParserErrors = 1000 - RNGP_ATTR_CONFLICT ParserErrors = 1001 - RNGP_ATTRIBUTE_CHILDREN ParserErrors = 1002 - RNGP_ATTRIBUTE_CONTENT ParserErrors = 1003 - RNGP_ATTRIBUTE_EMPTY ParserErrors = 1004 - RNGP_ATTRIBUTE_NOOP ParserErrors = 1005 - RNGP_CHOICE_CONTENT ParserErrors = 1006 - RNGP_CHOICE_EMPTY ParserErrors = 1007 - RNGP_CREATE_FAILURE ParserErrors = 1008 - RNGP_DATA_CONTENT ParserErrors = 1009 - RNGP_DEF_CHOICE_AND_INTERLEAVE ParserErrors = 1010 - RNGP_DEFINE_CREATE_FAILED ParserErrors = 1011 - RNGP_DEFINE_EMPTY ParserErrors = 1012 - RNGP_DEFINE_MISSING ParserErrors = 1013 - RNGP_DEFINE_NAME_MISSING ParserErrors = 1014 - RNGP_ELEM_CONTENT_EMPTY ParserErrors = 1015 - RNGP_ELEM_CONTENT_ERROR ParserErrors = 1016 - RNGP_ELEMENT_EMPTY ParserErrors = 1017 - RNGP_ELEMENT_CONTENT ParserErrors = 1018 - RNGP_ELEMENT_NAME ParserErrors = 1019 - RNGP_ELEMENT_NO_CONTENT ParserErrors = 1020 - RNGP_ELEM_TEXT_CONFLICT ParserErrors = 1021 - RNGP_EMPTY ParserErrors = 1022 - RNGP_EMPTY_CONSTRUCT ParserErrors = 1023 - RNGP_EMPTY_CONTENT ParserErrors = 1024 - RNGP_EMPTY_NOT_EMPTY ParserErrors = 1025 - RNGP_ERROR_TYPE_LIB ParserErrors = 1026 - RNGP_EXCEPT_EMPTY ParserErrors = 1027 - RNGP_EXCEPT_MISSING ParserErrors = 1028 - RNGP_EXCEPT_MULTIPLE ParserErrors = 1029 - RNGP_EXCEPT_NO_CONTENT ParserErrors = 1030 - RNGP_EXTERNALREF_EMTPY ParserErrors = 1031 - RNGP_EXTERNAL_REF_FAILURE ParserErrors = 1032 - RNGP_EXTERNALREF_RECURSE ParserErrors = 1033 - RNGP_FORBIDDEN_ATTRIBUTE ParserErrors = 1034 - RNGP_FOREIGN_ELEMENT ParserErrors = 1035 - RNGP_GRAMMAR_CONTENT ParserErrors = 1036 - RNGP_GRAMMAR_EMPTY ParserErrors = 1037 - RNGP_GRAMMAR_MISSING ParserErrors = 1038 - RNGP_GRAMMAR_NO_START ParserErrors = 1039 - RNGP_GROUP_ATTR_CONFLICT ParserErrors = 1040 - RNGP_HREF_ERROR ParserErrors = 1041 - RNGP_INCLUDE_EMPTY ParserErrors = 1042 - RNGP_INCLUDE_FAILURE ParserErrors = 1043 - RNGP_INCLUDE_RECURSE ParserErrors = 1044 - RNGP_INTERLEAVE_ADD ParserErrors = 1045 - RNGP_INTERLEAVE_CREATE_FAILED ParserErrors = 1046 - RNGP_INTERLEAVE_EMPTY ParserErrors = 1047 - RNGP_INTERLEAVE_NO_CONTENT ParserErrors = 1048 - RNGP_INVALID_DEFINE_NAME ParserErrors = 1049 - RNGP_INVALID_URI ParserErrors = 1050 - RNGP_INVALID_VALUE ParserErrors = 1051 - RNGP_MISSING_HREF ParserErrors = 1052 - RNGP_NAME_MISSING ParserErrors = 1053 - RNGP_NEED_COMBINE ParserErrors = 1054 - RNGP_NOTALLOWED_NOT_EMPTY ParserErrors = 1055 - RNGP_NSNAME_ATTR_ANCESTOR ParserErrors = 1056 - RNGP_NSNAME_NO_NS ParserErrors = 1057 - RNGP_PARAM_FORBIDDEN ParserErrors = 1058 - RNGP_PARAM_NAME_MISSING ParserErrors = 1059 - RNGP_PARENTREF_CREATE_FAILED ParserErrors = 1060 - RNGP_PARENTREF_NAME_INVALID ParserErrors = 1061 - RNGP_PARENTREF_NO_NAME ParserErrors = 1062 - RNGP_PARENTREF_NO_PARENT ParserErrors = 1063 - RNGP_PARENTREF_NOT_EMPTY ParserErrors = 1064 - RNGP_PARSE_ERROR ParserErrors = 1065 - RNGP_PAT_ANYNAME_EXCEPT_ANYNAME ParserErrors = 1066 - RNGP_PAT_ATTR_ATTR ParserErrors = 1067 - RNGP_PAT_ATTR_ELEM ParserErrors = 1068 - RNGP_PAT_DATA_EXCEPT_ATTR ParserErrors = 1069 - RNGP_PAT_DATA_EXCEPT_ELEM ParserErrors = 1070 - RNGP_PAT_DATA_EXCEPT_EMPTY ParserErrors = 1071 - RNGP_PAT_DATA_EXCEPT_GROUP ParserErrors = 1072 - RNGP_PAT_DATA_EXCEPT_INTERLEAVE ParserErrors = 1073 - RNGP_PAT_DATA_EXCEPT_LIST ParserErrors = 1074 - RNGP_PAT_DATA_EXCEPT_ONEMORE ParserErrors = 1075 - RNGP_PAT_DATA_EXCEPT_REF ParserErrors = 1076 - RNGP_PAT_DATA_EXCEPT_TEXT ParserErrors = 1077 - RNGP_PAT_LIST_ATTR ParserErrors = 1078 - RNGP_PAT_LIST_ELEM ParserErrors = 1079 - RNGP_PAT_LIST_INTERLEAVE ParserErrors = 1080 - RNGP_PAT_LIST_LIST ParserErrors = 1081 - RNGP_PAT_LIST_REF ParserErrors = 1082 - RNGP_PAT_LIST_TEXT ParserErrors = 1083 - RNGP_PAT_NSNAME_EXCEPT_ANYNAME ParserErrors = 1084 - RNGP_PAT_NSNAME_EXCEPT_NSNAME ParserErrors = 1085 - RNGP_PAT_ONEMORE_GROUP_ATTR ParserErrors = 1086 - RNGP_PAT_ONEMORE_INTERLEAVE_ATTR ParserErrors = 1087 - RNGP_PAT_START_ATTR ParserErrors = 1088 - RNGP_PAT_START_DATA ParserErrors = 1089 - RNGP_PAT_START_EMPTY ParserErrors = 1090 - RNGP_PAT_START_GROUP ParserErrors = 1091 - RNGP_PAT_START_INTERLEAVE ParserErrors = 1092 - RNGP_PAT_START_LIST ParserErrors = 1093 - RNGP_PAT_START_ONEMORE ParserErrors = 1094 - RNGP_PAT_START_TEXT ParserErrors = 1095 - RNGP_PAT_START_VALUE ParserErrors = 1096 - RNGP_PREFIX_UNDEFINED ParserErrors = 1097 - RNGP_REF_CREATE_FAILED ParserErrors = 1098 - RNGP_REF_CYCLE ParserErrors = 1099 - RNGP_REF_NAME_INVALID ParserErrors = 1100 - RNGP_REF_NO_DEF ParserErrors = 1101 - RNGP_REF_NO_NAME ParserErrors = 1102 - RNGP_REF_NOT_EMPTY ParserErrors = 1103 - RNGP_START_CHOICE_AND_INTERLEAVE ParserErrors = 1104 - RNGP_START_CONTENT ParserErrors = 1105 - RNGP_START_EMPTY ParserErrors = 1106 - RNGP_START_MISSING ParserErrors = 1107 - RNGP_TEXT_EXPECTED ParserErrors = 1108 - RNGP_TEXT_HAS_CHILD ParserErrors = 1109 - RNGP_TYPE_MISSING ParserErrors = 1110 - RNGP_TYPE_NOT_FOUND ParserErrors = 1111 - RNGP_TYPE_VALUE ParserErrors = 1112 - RNGP_UNKNOWN_ATTRIBUTE ParserErrors = 1113 - RNGP_UNKNOWN_COMBINE ParserErrors = 1114 - RNGP_UNKNOWN_CONSTRUCT ParserErrors = 1115 - RNGP_UNKNOWN_TYPE_LIB ParserErrors = 1116 - RNGP_URI_FRAGMENT ParserErrors = 1117 - RNGP_URI_NOT_ABSOLUTE ParserErrors = 1118 - RNGP_VALUE_EMPTY ParserErrors = 1119 - RNGP_VALUE_NO_CONTENT ParserErrors = 1120 - RNGP_XMLNS_NAME ParserErrors = 1121 - RNGP_XML_NS ParserErrors = 1122 - XPATH_EXPRESSION_OK ParserErrors = 1200 - XPATH_NUMBER_ERROR ParserErrors = 1201 - XPATH_UNFINISHED_LITERAL_ERROR ParserErrors = 1202 - XPATH_START_LITERAL_ERROR ParserErrors = 1203 - XPATH_VARIABLE_REF_ERROR ParserErrors = 1204 - XPATH_UNDEF_VARIABLE_ERROR ParserErrors = 1205 - XPATH_INVALID_PREDICATE_ERROR ParserErrors = 1206 - XPATH_EXPR_ERROR ParserErrors = 1207 - XPATH_UNCLOSED_ERROR ParserErrors = 1208 - XPATH_UNKNOWN_FUNC_ERROR ParserErrors = 1209 - XPATH_INVALID_OPERAND ParserErrors = 1210 - XPATH_INVALID_TYPE ParserErrors = 1211 - XPATH_INVALID_ARITY ParserErrors = 1212 - XPATH_INVALID_CTXT_SIZE ParserErrors = 1213 - XPATH_INVALID_CTXT_POSITION ParserErrors = 1214 - XPATH_MEMORY_ERROR ParserErrors = 1215 - XPTR_SYNTAX_ERROR ParserErrors = 1216 - XPTR_RESOURCE_ERROR ParserErrors = 1217 - XPTR_SUB_RESOURCE_ERROR ParserErrors = 1218 - XPATH_UNDEF_PREFIX_ERROR ParserErrors = 1219 - XPATH_ENCODING_ERROR ParserErrors = 1220 - XPATH_INVALID_CHAR_ERROR ParserErrors = 1221 - TREE_INVALID_HEX ParserErrors = 1300 - TREE_INVALID_DEC ParserErrors = 1301 - TREE_UNTERMINATED_ENTITY ParserErrors = 1302 - TREE_NOT_UTF8 ParserErrors = 1303 - SAVE_NOT_UTF8 ParserErrors = 1400 - SAVE_CHAR_INVALID ParserErrors = 1401 - SAVE_NO_DOCTYPE ParserErrors = 1402 - SAVE_UNKNOWN_ENCODING ParserErrors = 1403 - REGEXP_COMPILE_ERROR ParserErrors = 1450 - IO_UNKNOWN ParserErrors = 1500 - IO_EACCES ParserErrors = 1501 - IO_EAGAIN ParserErrors = 1502 - IO_EBADF ParserErrors = 1503 - IO_EBADMSG ParserErrors = 1504 - IO_EBUSY ParserErrors = 1505 - IO_ECANCELED ParserErrors = 1506 - IO_ECHILD ParserErrors = 1507 - IO_EDEADLK ParserErrors = 1508 - IO_EDOM ParserErrors = 1509 - IO_EEXIST ParserErrors = 1510 - IO_EFAULT ParserErrors = 1511 - IO_EFBIG ParserErrors = 1512 - IO_EINPROGRESS ParserErrors = 1513 - IO_EINTR ParserErrors = 1514 - IO_EINVAL ParserErrors = 1515 - IO_EIO ParserErrors = 1516 - IO_EISDIR ParserErrors = 1517 - IO_EMFILE ParserErrors = 1518 - IO_EMLINK ParserErrors = 1519 - IO_EMSGSIZE ParserErrors = 1520 - IO_ENAMETOOLONG ParserErrors = 1521 - IO_ENFILE ParserErrors = 1522 - IO_ENODEV ParserErrors = 1523 - IO_ENOENT ParserErrors = 1524 - IO_ENOEXEC ParserErrors = 1525 - IO_ENOLCK ParserErrors = 1526 - IO_ENOMEM ParserErrors = 1527 - IO_ENOSPC ParserErrors = 1528 - IO_ENOSYS ParserErrors = 1529 - IO_ENOTDIR ParserErrors = 1530 - IO_ENOTEMPTY ParserErrors = 1531 - IO_ENOTSUP ParserErrors = 1532 - IO_ENOTTY ParserErrors = 1533 - IO_ENXIO ParserErrors = 1534 - IO_EPERM ParserErrors = 1535 - IO_EPIPE ParserErrors = 1536 - IO_ERANGE ParserErrors = 1537 - IO_EROFS ParserErrors = 1538 - IO_ESPIPE ParserErrors = 1539 - IO_ESRCH ParserErrors = 1540 - IO_ETIMEDOUT ParserErrors = 1541 - IO_EXDEV ParserErrors = 1542 - IO_NETWORK_ATTEMPT ParserErrors = 1543 - IO_ENCODER ParserErrors = 1544 - IO_FLUSH ParserErrors = 1545 - IO_WRITE ParserErrors = 1546 - IO_NO_INPUT ParserErrors = 1547 - IO_BUFFER_FULL ParserErrors = 1548 - IO_LOAD_ERROR ParserErrors = 1549 - IO_ENOTSOCK ParserErrors = 1550 - IO_EISCONN ParserErrors = 1551 - IO_ECONNREFUSED ParserErrors = 1552 - IO_ENETUNREACH ParserErrors = 1553 - IO_EADDRINUSE ParserErrors = 1554 - IO_EALREADY ParserErrors = 1555 - IO_EAFNOSUPPORT ParserErrors = 1556 - IO_UNSUPPORTED_PROTOCOL ParserErrors = 1557 - XINCLUDE_RECURSION ParserErrors = 1600 - XINCLUDE_PARSE_VALUE ParserErrors = 1601 - XINCLUDE_ENTITY_DEF_MISMATCH ParserErrors = 1602 - XINCLUDE_NO_HREF ParserErrors = 1603 - XINCLUDE_NO_FALLBACK ParserErrors = 1604 - XINCLUDE_HREF_URI ParserErrors = 1605 - XINCLUDE_TEXT_FRAGMENT ParserErrors = 1606 - XINCLUDE_TEXT_DOCUMENT ParserErrors = 1607 - XINCLUDE_INVALID_CHAR ParserErrors = 1608 - XINCLUDE_BUILD_FAILED ParserErrors = 1609 - XINCLUDE_UNKNOWN_ENCODING ParserErrors = 1610 - XINCLUDE_MULTIPLE_ROOT ParserErrors = 1611 - XINCLUDE_XPTR_FAILED ParserErrors = 1612 - XINCLUDE_XPTR_RESULT ParserErrors = 1613 - XINCLUDE_INCLUDE_IN_INCLUDE ParserErrors = 1614 - XINCLUDE_FALLBACKS_IN_INCLUDE ParserErrors = 1615 - XINCLUDE_FALLBACK_NOT_IN_INCLUDE ParserErrors = 1616 - XINCLUDE_DEPRECATED_NS ParserErrors = 1617 - XINCLUDE_FRAGMENT_ID ParserErrors = 1618 - CATALOG_MISSING_ATTR ParserErrors = 1650 - CATALOG_ENTRY_BROKEN ParserErrors = 1651 - CATALOG_PREFER_VALUE ParserErrors = 1652 - CATALOG_NOT_CATALOG ParserErrors = 1653 - CATALOG_RECURSION ParserErrors = 1654 - SCHEMAP_PREFIX_UNDEFINED ParserErrors = 1700 - SCHEMAP_ATTRFORMDEFAULT_VALUE ParserErrors = 1701 - SCHEMAP_ATTRGRP_NONAME_NOREF ParserErrors = 1702 - SCHEMAP_ATTR_NONAME_NOREF ParserErrors = 1703 - SCHEMAP_COMPLEXTYPE_NONAME_NOREF ParserErrors = 1704 - SCHEMAP_ELEMFORMDEFAULT_VALUE ParserErrors = 1705 - SCHEMAP_ELEM_NONAME_NOREF ParserErrors = 1706 - SCHEMAP_EXTENSION_NO_BASE ParserErrors = 1707 - SCHEMAP_FACET_NO_VALUE ParserErrors = 1708 - SCHEMAP_FAILED_BUILD_IMPORT ParserErrors = 1709 - SCHEMAP_GROUP_NONAME_NOREF ParserErrors = 1710 - SCHEMAP_IMPORT_NAMESPACE_NOT_URI ParserErrors = 1711 - SCHEMAP_IMPORT_REDEFINE_NSNAME ParserErrors = 1712 - SCHEMAP_IMPORT_SCHEMA_NOT_URI ParserErrors = 1713 - SCHEMAP_INVALID_BOOLEAN ParserErrors = 1714 - SCHEMAP_INVALID_ENUM ParserErrors = 1715 - SCHEMAP_INVALID_FACET ParserErrors = 1716 - SCHEMAP_INVALID_FACET_VALUE ParserErrors = 1717 - SCHEMAP_INVALID_MAXOCCURS ParserErrors = 1718 - SCHEMAP_INVALID_MINOCCURS ParserErrors = 1719 - SCHEMAP_INVALID_REF_AND_SUBTYPE ParserErrors = 1720 - SCHEMAP_INVALID_WHITE_SPACE ParserErrors = 1721 - SCHEMAP_NOATTR_NOREF ParserErrors = 1722 - SCHEMAP_NOTATION_NO_NAME ParserErrors = 1723 - SCHEMAP_NOTYPE_NOREF ParserErrors = 1724 - SCHEMAP_REF_AND_SUBTYPE ParserErrors = 1725 - SCHEMAP_RESTRICTION_NONAME_NOREF ParserErrors = 1726 - SCHEMAP_SIMPLETYPE_NONAME ParserErrors = 1727 - SCHEMAP_TYPE_AND_SUBTYPE ParserErrors = 1728 - SCHEMAP_UNKNOWN_ALL_CHILD ParserErrors = 1729 - SCHEMAP_UNKNOWN_ANYATTRIBUTE_CHILD ParserErrors = 1730 - SCHEMAP_UNKNOWN_ATTR_CHILD ParserErrors = 1731 - SCHEMAP_UNKNOWN_ATTRGRP_CHILD ParserErrors = 1732 - SCHEMAP_UNKNOWN_ATTRIBUTE_GROUP ParserErrors = 1733 - SCHEMAP_UNKNOWN_BASE_TYPE ParserErrors = 1734 - SCHEMAP_UNKNOWN_CHOICE_CHILD ParserErrors = 1735 - SCHEMAP_UNKNOWN_COMPLEXCONTENT_CHILD ParserErrors = 1736 - SCHEMAP_UNKNOWN_COMPLEXTYPE_CHILD ParserErrors = 1737 - SCHEMAP_UNKNOWN_ELEM_CHILD ParserErrors = 1738 - SCHEMAP_UNKNOWN_EXTENSION_CHILD ParserErrors = 1739 - SCHEMAP_UNKNOWN_FACET_CHILD ParserErrors = 1740 - SCHEMAP_UNKNOWN_FACET_TYPE ParserErrors = 1741 - SCHEMAP_UNKNOWN_GROUP_CHILD ParserErrors = 1742 - SCHEMAP_UNKNOWN_IMPORT_CHILD ParserErrors = 1743 - SCHEMAP_UNKNOWN_LIST_CHILD ParserErrors = 1744 - SCHEMAP_UNKNOWN_NOTATION_CHILD ParserErrors = 1745 - SCHEMAP_UNKNOWN_PROCESSCONTENT_CHILD ParserErrors = 1746 - SCHEMAP_UNKNOWN_REF ParserErrors = 1747 - SCHEMAP_UNKNOWN_RESTRICTION_CHILD ParserErrors = 1748 - SCHEMAP_UNKNOWN_SCHEMAS_CHILD ParserErrors = 1749 - SCHEMAP_UNKNOWN_SEQUENCE_CHILD ParserErrors = 1750 - SCHEMAP_UNKNOWN_SIMPLECONTENT_CHILD ParserErrors = 1751 - SCHEMAP_UNKNOWN_SIMPLETYPE_CHILD ParserErrors = 1752 - SCHEMAP_UNKNOWN_TYPE ParserErrors = 1753 - SCHEMAP_UNKNOWN_UNION_CHILD ParserErrors = 1754 - SCHEMAP_ELEM_DEFAULT_FIXED ParserErrors = 1755 - SCHEMAP_REGEXP_INVALID ParserErrors = 1756 - SCHEMAP_FAILED_LOAD ParserErrors = 1757 - SCHEMAP_NOTHING_TO_PARSE ParserErrors = 1758 - SCHEMAP_NOROOT ParserErrors = 1759 - SCHEMAP_REDEFINED_GROUP ParserErrors = 1760 - SCHEMAP_REDEFINED_TYPE ParserErrors = 1761 - SCHEMAP_REDEFINED_ELEMENT ParserErrors = 1762 - SCHEMAP_REDEFINED_ATTRGROUP ParserErrors = 1763 - SCHEMAP_REDEFINED_ATTR ParserErrors = 1764 - SCHEMAP_REDEFINED_NOTATION ParserErrors = 1765 - SCHEMAP_FAILED_PARSE ParserErrors = 1766 - SCHEMAP_UNKNOWN_PREFIX ParserErrors = 1767 - SCHEMAP_DEF_AND_PREFIX ParserErrors = 1768 - SCHEMAP_UNKNOWN_INCLUDE_CHILD ParserErrors = 1769 - SCHEMAP_INCLUDE_SCHEMA_NOT_URI ParserErrors = 1770 - SCHEMAP_INCLUDE_SCHEMA_NO_URI ParserErrors = 1771 - SCHEMAP_NOT_SCHEMA ParserErrors = 1772 - SCHEMAP_UNKNOWN_MEMBER_TYPE ParserErrors = 1773 - SCHEMAP_INVALID_ATTR_USE ParserErrors = 1774 - SCHEMAP_RECURSIVE ParserErrors = 1775 - SCHEMAP_SUPERNUMEROUS_LIST_ITEM_TYPE ParserErrors = 1776 - SCHEMAP_INVALID_ATTR_COMBINATION ParserErrors = 1777 - SCHEMAP_INVALID_ATTR_INLINE_COMBINATION ParserErrors = 1778 - SCHEMAP_MISSING_SIMPLETYPE_CHILD ParserErrors = 1779 - SCHEMAP_INVALID_ATTR_NAME ParserErrors = 1780 - SCHEMAP_REF_AND_CONTENT ParserErrors = 1781 - SCHEMAP_CT_PROPS_CORRECT_1 ParserErrors = 1782 - SCHEMAP_CT_PROPS_CORRECT_2 ParserErrors = 1783 - SCHEMAP_CT_PROPS_CORRECT_3 ParserErrors = 1784 - SCHEMAP_CT_PROPS_CORRECT_4 ParserErrors = 1785 - SCHEMAP_CT_PROPS_CORRECT_5 ParserErrors = 1786 - SCHEMAP_DERIVATION_OK_RESTRICTION_1 ParserErrors = 1787 - SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_1 ParserErrors = 1788 - SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_2 ParserErrors = 1789 - SCHEMAP_DERIVATION_OK_RESTRICTION_2_2 ParserErrors = 1790 - SCHEMAP_DERIVATION_OK_RESTRICTION_3 ParserErrors = 1791 - SCHEMAP_WILDCARD_INVALID_NS_MEMBER ParserErrors = 1792 - SCHEMAP_INTERSECTION_NOT_EXPRESSIBLE ParserErrors = 1793 - SCHEMAP_UNION_NOT_EXPRESSIBLE ParserErrors = 1794 - SCHEMAP_SRC_IMPORT_3_1 ParserErrors = 1795 - SCHEMAP_SRC_IMPORT_3_2 ParserErrors = 1796 - SCHEMAP_DERIVATION_OK_RESTRICTION_4_1 ParserErrors = 1797 - SCHEMAP_DERIVATION_OK_RESTRICTION_4_2 ParserErrors = 1798 - SCHEMAP_DERIVATION_OK_RESTRICTION_4_3 ParserErrors = 1799 - SCHEMAP_COS_CT_EXTENDS_1_3 ParserErrors = 1800 - SCHEMAV_NOROOT ParserErrors = 1801 - SCHEMAV_UNDECLAREDELEM ParserErrors = 1802 - SCHEMAV_NOTTOPLEVEL ParserErrors = 1803 - SCHEMAV_MISSING ParserErrors = 1804 - SCHEMAV_WRONGELEM ParserErrors = 1805 - SCHEMAV_NOTYPE ParserErrors = 1806 - SCHEMAV_NOROLLBACK ParserErrors = 1807 - SCHEMAV_ISABSTRACT ParserErrors = 1808 - SCHEMAV_NOTEMPTY ParserErrors = 1809 - SCHEMAV_ELEMCONT ParserErrors = 1810 - SCHEMAV_HAVEDEFAULT ParserErrors = 1811 - SCHEMAV_NOTNILLABLE ParserErrors = 1812 - SCHEMAV_EXTRACONTENT ParserErrors = 1813 - SCHEMAV_INVALIDATTR ParserErrors = 1814 - SCHEMAV_INVALIDELEM ParserErrors = 1815 - SCHEMAV_NOTDETERMINIST ParserErrors = 1816 - SCHEMAV_CONSTRUCT ParserErrors = 1817 - SCHEMAV_INTERNAL ParserErrors = 1818 - SCHEMAV_NOTSIMPLE ParserErrors = 1819 - SCHEMAV_ATTRUNKNOWN ParserErrors = 1820 - SCHEMAV_ATTRINVALID ParserErrors = 1821 - SCHEMAV_VALUE ParserErrors = 1822 - SCHEMAV_FACET ParserErrors = 1823 - SCHEMAV_CVC_DATATYPE_VALID_1_2_1 ParserErrors = 1824 - SCHEMAV_CVC_DATATYPE_VALID_1_2_2 ParserErrors = 1825 - SCHEMAV_CVC_DATATYPE_VALID_1_2_3 ParserErrors = 1826 - SCHEMAV_CVC_TYPE_3_1_1 ParserErrors = 1827 - SCHEMAV_CVC_TYPE_3_1_2 ParserErrors = 1828 - SCHEMAV_CVC_FACET_VALID ParserErrors = 1829 - SCHEMAV_CVC_LENGTH_VALID ParserErrors = 1830 - SCHEMAV_CVC_MINLENGTH_VALID ParserErrors = 1831 - SCHEMAV_CVC_MAXLENGTH_VALID ParserErrors = 1832 - SCHEMAV_CVC_MININCLUSIVE_VALID ParserErrors = 1833 - SCHEMAV_CVC_MAXINCLUSIVE_VALID ParserErrors = 1834 - SCHEMAV_CVC_MINEXCLUSIVE_VALID ParserErrors = 1835 - SCHEMAV_CVC_MAXEXCLUSIVE_VALID ParserErrors = 1836 - SCHEMAV_CVC_TOTALDIGITS_VALID ParserErrors = 1837 - SCHEMAV_CVC_FRACTIONDIGITS_VALID ParserErrors = 1838 - SCHEMAV_CVC_PATTERN_VALID ParserErrors = 1839 - SCHEMAV_CVC_ENUMERATION_VALID ParserErrors = 1840 - SCHEMAV_CVC_COMPLEX_TYPE_2_1 ParserErrors = 1841 - SCHEMAV_CVC_COMPLEX_TYPE_2_2 ParserErrors = 1842 - SCHEMAV_CVC_COMPLEX_TYPE_2_3 ParserErrors = 1843 - SCHEMAV_CVC_COMPLEX_TYPE_2_4 ParserErrors = 1844 - SCHEMAV_CVC_ELT_1 ParserErrors = 1845 - SCHEMAV_CVC_ELT_2 ParserErrors = 1846 - SCHEMAV_CVC_ELT_3_1 ParserErrors = 1847 - SCHEMAV_CVC_ELT_3_2_1 ParserErrors = 1848 - SCHEMAV_CVC_ELT_3_2_2 ParserErrors = 1849 - SCHEMAV_CVC_ELT_4_1 ParserErrors = 1850 - SCHEMAV_CVC_ELT_4_2 ParserErrors = 1851 - SCHEMAV_CVC_ELT_4_3 ParserErrors = 1852 - SCHEMAV_CVC_ELT_5_1_1 ParserErrors = 1853 - SCHEMAV_CVC_ELT_5_1_2 ParserErrors = 1854 - SCHEMAV_CVC_ELT_5_2_1 ParserErrors = 1855 - SCHEMAV_CVC_ELT_5_2_2_1 ParserErrors = 1856 - SCHEMAV_CVC_ELT_5_2_2_2_1 ParserErrors = 1857 - SCHEMAV_CVC_ELT_5_2_2_2_2 ParserErrors = 1858 - SCHEMAV_CVC_ELT_6 ParserErrors = 1859 - SCHEMAV_CVC_ELT_7 ParserErrors = 1860 - SCHEMAV_CVC_ATTRIBUTE_1 ParserErrors = 1861 - SCHEMAV_CVC_ATTRIBUTE_2 ParserErrors = 1862 - SCHEMAV_CVC_ATTRIBUTE_3 ParserErrors = 1863 - SCHEMAV_CVC_ATTRIBUTE_4 ParserErrors = 1864 - SCHEMAV_CVC_COMPLEX_TYPE_3_1 ParserErrors = 1865 - SCHEMAV_CVC_COMPLEX_TYPE_3_2_1 ParserErrors = 1866 - SCHEMAV_CVC_COMPLEX_TYPE_3_2_2 ParserErrors = 1867 - SCHEMAV_CVC_COMPLEX_TYPE_4 ParserErrors = 1868 - SCHEMAV_CVC_COMPLEX_TYPE_5_1 ParserErrors = 1869 - SCHEMAV_CVC_COMPLEX_TYPE_5_2 ParserErrors = 1870 - SCHEMAV_ELEMENT_CONTENT ParserErrors = 1871 - SCHEMAV_DOCUMENT_ELEMENT_MISSING ParserErrors = 1872 - SCHEMAV_CVC_COMPLEX_TYPE_1 ParserErrors = 1873 - SCHEMAV_CVC_AU ParserErrors = 1874 - SCHEMAV_CVC_TYPE_1 ParserErrors = 1875 - SCHEMAV_CVC_TYPE_2 ParserErrors = 1876 - SCHEMAV_CVC_IDC ParserErrors = 1877 - SCHEMAV_CVC_WILDCARD ParserErrors = 1878 - SCHEMAV_MISC ParserErrors = 1879 - XPTR_UNKNOWN_SCHEME ParserErrors = 1900 - XPTR_CHILDSEQ_START ParserErrors = 1901 - XPTR_EVAL_FAILED ParserErrors = 1902 - XPTR_EXTRA_OBJECTS ParserErrors = 1903 - C14N_CREATE_CTXT ParserErrors = 1950 - C14N_REQUIRES_UTF8 ParserErrors = 1951 - C14N_CREATE_STACK ParserErrors = 1952 - C14N_INVALID_NODE ParserErrors = 1953 - C14N_UNKNOW_NODE ParserErrors = 1954 - C14N_RELATIVE_NAMESPACE ParserErrors = 1955 - FTP_PASV_ANSWER ParserErrors = 2000 - FTP_EPSV_ANSWER ParserErrors = 2001 - FTP_ACCNT ParserErrors = 2002 - FTP_URL_SYNTAX ParserErrors = 2003 - HTTP_URL_SYNTAX ParserErrors = 2020 - HTTP_USE_IP ParserErrors = 2021 - HTTP_UNKNOWN_HOST ParserErrors = 2022 - SCHEMAP_SRC_SIMPLE_TYPE_1 ParserErrors = 3000 - SCHEMAP_SRC_SIMPLE_TYPE_2 ParserErrors = 3001 - SCHEMAP_SRC_SIMPLE_TYPE_3 ParserErrors = 3002 - SCHEMAP_SRC_SIMPLE_TYPE_4 ParserErrors = 3003 - SCHEMAP_SRC_RESOLVE ParserErrors = 3004 - SCHEMAP_SRC_RESTRICTION_BASE_OR_SIMPLETYPE ParserErrors = 3005 - SCHEMAP_SRC_LIST_ITEMTYPE_OR_SIMPLETYPE ParserErrors = 3006 - SCHEMAP_SRC_UNION_MEMBERTYPES_OR_SIMPLETYPES ParserErrors = 3007 - SCHEMAP_ST_PROPS_CORRECT_1 ParserErrors = 3008 - SCHEMAP_ST_PROPS_CORRECT_2 ParserErrors = 3009 - SCHEMAP_ST_PROPS_CORRECT_3 ParserErrors = 3010 - SCHEMAP_COS_ST_RESTRICTS_1_1 ParserErrors = 3011 - SCHEMAP_COS_ST_RESTRICTS_1_2 ParserErrors = 3012 - SCHEMAP_COS_ST_RESTRICTS_1_3_1 ParserErrors = 3013 - SCHEMAP_COS_ST_RESTRICTS_1_3_2 ParserErrors = 3014 - SCHEMAP_COS_ST_RESTRICTS_2_1 ParserErrors = 3015 - SCHEMAP_COS_ST_RESTRICTS_2_3_1_1 ParserErrors = 3016 - SCHEMAP_COS_ST_RESTRICTS_2_3_1_2 ParserErrors = 3017 - SCHEMAP_COS_ST_RESTRICTS_2_3_2_1 ParserErrors = 3018 - SCHEMAP_COS_ST_RESTRICTS_2_3_2_2 ParserErrors = 3019 - SCHEMAP_COS_ST_RESTRICTS_2_3_2_3 ParserErrors = 3020 - SCHEMAP_COS_ST_RESTRICTS_2_3_2_4 ParserErrors = 3021 - SCHEMAP_COS_ST_RESTRICTS_2_3_2_5 ParserErrors = 3022 - SCHEMAP_COS_ST_RESTRICTS_3_1 ParserErrors = 3023 - SCHEMAP_COS_ST_RESTRICTS_3_3_1 ParserErrors = 3024 - SCHEMAP_COS_ST_RESTRICTS_3_3_1_2 ParserErrors = 3025 - SCHEMAP_COS_ST_RESTRICTS_3_3_2_2 ParserErrors = 3026 - SCHEMAP_COS_ST_RESTRICTS_3_3_2_1 ParserErrors = 3027 - SCHEMAP_COS_ST_RESTRICTS_3_3_2_3 ParserErrors = 3028 - SCHEMAP_COS_ST_RESTRICTS_3_3_2_4 ParserErrors = 3029 - SCHEMAP_COS_ST_RESTRICTS_3_3_2_5 ParserErrors = 3030 - SCHEMAP_COS_ST_DERIVED_OK_2_1 ParserErrors = 3031 - SCHEMAP_COS_ST_DERIVED_OK_2_2 ParserErrors = 3032 - SCHEMAP_S4S_ELEM_NOT_ALLOWED ParserErrors = 3033 - SCHEMAP_S4S_ELEM_MISSING ParserErrors = 3034 - SCHEMAP_S4S_ATTR_NOT_ALLOWED ParserErrors = 3035 - SCHEMAP_S4S_ATTR_MISSING ParserErrors = 3036 - SCHEMAP_S4S_ATTR_INVALID_VALUE ParserErrors = 3037 - SCHEMAP_SRC_ELEMENT_1 ParserErrors = 3038 - SCHEMAP_SRC_ELEMENT_2_1 ParserErrors = 3039 - SCHEMAP_SRC_ELEMENT_2_2 ParserErrors = 3040 - SCHEMAP_SRC_ELEMENT_3 ParserErrors = 3041 - SCHEMAP_P_PROPS_CORRECT_1 ParserErrors = 3042 - SCHEMAP_P_PROPS_CORRECT_2_1 ParserErrors = 3043 - SCHEMAP_P_PROPS_CORRECT_2_2 ParserErrors = 3044 - SCHEMAP_E_PROPS_CORRECT_2 ParserErrors = 3045 - SCHEMAP_E_PROPS_CORRECT_3 ParserErrors = 3046 - SCHEMAP_E_PROPS_CORRECT_4 ParserErrors = 3047 - SCHEMAP_E_PROPS_CORRECT_5 ParserErrors = 3048 - SCHEMAP_E_PROPS_CORRECT_6 ParserErrors = 3049 - SCHEMAP_SRC_INCLUDE ParserErrors = 3050 - SCHEMAP_SRC_ATTRIBUTE_1 ParserErrors = 3051 - SCHEMAP_SRC_ATTRIBUTE_2 ParserErrors = 3052 - SCHEMAP_SRC_ATTRIBUTE_3_1 ParserErrors = 3053 - SCHEMAP_SRC_ATTRIBUTE_3_2 ParserErrors = 3054 - SCHEMAP_SRC_ATTRIBUTE_4 ParserErrors = 3055 - SCHEMAP_NO_XMLNS ParserErrors = 3056 - SCHEMAP_NO_XSI ParserErrors = 3057 - SCHEMAP_COS_VALID_DEFAULT_1 ParserErrors = 3058 - SCHEMAP_COS_VALID_DEFAULT_2_1 ParserErrors = 3059 - SCHEMAP_COS_VALID_DEFAULT_2_2_1 ParserErrors = 3060 - SCHEMAP_COS_VALID_DEFAULT_2_2_2 ParserErrors = 3061 - SCHEMAP_CVC_SIMPLE_TYPE ParserErrors = 3062 - SCHEMAP_COS_CT_EXTENDS_1_1 ParserErrors = 3063 - SCHEMAP_SRC_IMPORT_1_1 ParserErrors = 3064 - SCHEMAP_SRC_IMPORT_1_2 ParserErrors = 3065 - SCHEMAP_SRC_IMPORT_2 ParserErrors = 3066 - SCHEMAP_SRC_IMPORT_2_1 ParserErrors = 3067 - SCHEMAP_SRC_IMPORT_2_2 ParserErrors = 3068 - SCHEMAP_INTERNAL ParserErrors = 3069 - SCHEMAP_NOT_DETERMINISTIC ParserErrors = 3070 - SCHEMAP_SRC_ATTRIBUTE_GROUP_1 ParserErrors = 3071 - SCHEMAP_SRC_ATTRIBUTE_GROUP_2 ParserErrors = 3072 - SCHEMAP_SRC_ATTRIBUTE_GROUP_3 ParserErrors = 3073 - SCHEMAP_MG_PROPS_CORRECT_1 ParserErrors = 3074 - SCHEMAP_MG_PROPS_CORRECT_2 ParserErrors = 3075 - SCHEMAP_SRC_CT_1 ParserErrors = 3076 - SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_3 ParserErrors = 3077 - SCHEMAP_AU_PROPS_CORRECT_2 ParserErrors = 3078 - SCHEMAP_A_PROPS_CORRECT_2 ParserErrors = 3079 - SCHEMAP_C_PROPS_CORRECT ParserErrors = 3080 - SCHEMAP_SRC_REDEFINE ParserErrors = 3081 - SCHEMAP_SRC_IMPORT ParserErrors = 3082 - SCHEMAP_WARN_SKIP_SCHEMA ParserErrors = 3083 - SCHEMAP_WARN_UNLOCATED_SCHEMA ParserErrors = 3084 - SCHEMAP_WARN_ATTR_REDECL_PROH ParserErrors = 3085 - SCHEMAP_WARN_ATTR_POINTLESS_PROH ParserErrors = 3086 - SCHEMAP_AG_PROPS_CORRECT ParserErrors = 3087 - SCHEMAP_COS_CT_EXTENDS_1_2 ParserErrors = 3088 - SCHEMAP_AU_PROPS_CORRECT ParserErrors = 3089 - SCHEMAP_A_PROPS_CORRECT_3 ParserErrors = 3090 - SCHEMAP_COS_ALL_LIMITED ParserErrors = 3091 - SCHEMATRONV_ASSERT ParserErrors = 4000 - SCHEMATRONV_REPORT ParserErrors = 4001 - MODULE_OPEN ParserErrors = 4900 - MODULE_CLOSE ParserErrors = 4901 - CHECK_FOUND_ELEMENT ParserErrors = 5000 - CHECK_FOUND_ATTRIBUTE ParserErrors = 5001 - CHECK_FOUND_TEXT ParserErrors = 5002 - CHECK_FOUND_CDATA ParserErrors = 5003 - CHECK_FOUND_ENTITYREF ParserErrors = 5004 - CHECK_FOUND_ENTITY ParserErrors = 5005 - CHECK_FOUND_PI ParserErrors = 5006 - CHECK_FOUND_COMMENT ParserErrors = 5007 - CHECK_FOUND_DOCTYPE ParserErrors = 5008 - CHECK_FOUND_FRAGMENT ParserErrors = 5009 - CHECK_FOUND_NOTATION ParserErrors = 5010 - CHECK_UNKNOWN_NODE ParserErrors = 5011 - CHECK_ENTITY_TYPE ParserErrors = 5012 - CHECK_NO_PARENT ParserErrors = 5013 - CHECK_NO_DOC ParserErrors = 5014 - CHECK_NO_NAME ParserErrors = 5015 - CHECK_NO_ELEM ParserErrors = 5016 - CHECK_WRONG_DOC ParserErrors = 5017 - CHECK_NO_PREV ParserErrors = 5018 - CHECK_WRONG_PREV ParserErrors = 5019 - CHECK_NO_NEXT ParserErrors = 5020 - CHECK_WRONG_NEXT ParserErrors = 5021 - CHECK_NOT_DTD ParserErrors = 5022 - CHECK_NOT_ATTR ParserErrors = 5023 - CHECK_NOT_ATTR_DECL ParserErrors = 5024 - CHECK_NOT_ELEM_DECL ParserErrors = 5025 - CHECK_NOT_ENTITY_DECL ParserErrors = 5026 - CHECK_NOT_NS_DECL ParserErrors = 5027 - CHECK_NO_HREF ParserErrors = 5028 - CHECK_WRONG_PARENT ParserErrors = 5029 - CHECK_NS_SCOPE ParserErrors = 5030 - CHECK_NS_ANCESTOR ParserErrors = 5031 - CHECK_NOT_UTF8 ParserErrors = 5032 - CHECK_NO_DICT ParserErrors = 5033 - CHECK_NOT_NCNAME ParserErrors = 5034 - CHECK_OUTSIDE_DICT ParserErrors = 5035 - CHECK_WRONG_NAME ParserErrors = 5036 - CHECK_NAME_NOT_NULL ParserErrors = 5037 - I18N_NO_NAME ParserErrors = 6000 - I18N_NO_HANDLER ParserErrors = 6001 - I18N_EXCESS_HANDLER ParserErrors = 6002 - I18N_CONV_FAILED ParserErrors = 6003 - I18N_NO_OUTPUT ParserErrors = 6004 - BUF_OVERFLOW ParserErrors = 7000 -) - -// llgo:type C -type GenericErrorFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type StructuredErrorFunc func(c.Pointer, *Error) - -//go:linkname X__xmlLastError C.__xmlLastError -func X__xmlLastError() *Error - -//go:linkname X__xmlGenericError C.__xmlGenericError -func X__xmlGenericError() GenericErrorFunc - -//go:linkname X__xmlGenericErrorContext C.__xmlGenericErrorContext -func X__xmlGenericErrorContext() *c.Pointer - -//go:linkname X__xmlStructuredError C.__xmlStructuredError -func X__xmlStructuredError() StructuredErrorFunc - -//go:linkname X__xmlStructuredErrorContext C.__xmlStructuredErrorContext -func X__xmlStructuredErrorContext() *c.Pointer - -/* - * Use the following function to reset the two global variables - * xmlGenericError and xmlGenericErrorContext. - */ -//go:linkname SetGenericErrorFunc C.xmlSetGenericErrorFunc -func SetGenericErrorFunc(ctx c.Pointer, handler GenericErrorFunc) - -//go:linkname ThrDefSetGenericErrorFunc C.xmlThrDefSetGenericErrorFunc -func ThrDefSetGenericErrorFunc(ctx c.Pointer, handler GenericErrorFunc) - -//go:linkname InitGenericErrorDefaultFunc C.initGenericErrorDefaultFunc -func InitGenericErrorDefaultFunc(handler GenericErrorFunc) - -//go:linkname SetStructuredErrorFunc C.xmlSetStructuredErrorFunc -func SetStructuredErrorFunc(ctx c.Pointer, handler StructuredErrorFunc) - -//go:linkname ThrDefSetStructuredErrorFunc C.xmlThrDefSetStructuredErrorFunc -func ThrDefSetStructuredErrorFunc(ctx c.Pointer, handler StructuredErrorFunc) - -/* - * Default message routines used by SAX and Valid context for error - * and warning reporting. - */ -//go:linkname ParserError C.xmlParserError -func ParserError(ctx c.Pointer, msg *c.Char, __llgo_va_list ...interface{}) - -//go:linkname ParserWarning C.xmlParserWarning -func ParserWarning(ctx c.Pointer, msg *c.Char, __llgo_va_list ...interface{}) - -//go:linkname ParserValidityError C.xmlParserValidityError -func ParserValidityError(ctx c.Pointer, msg *c.Char, __llgo_va_list ...interface{}) - -//go:linkname ParserValidityWarning C.xmlParserValidityWarning -func ParserValidityWarning(ctx c.Pointer, msg *c.Char, __llgo_va_list ...interface{}) - -/** DOC_ENABLE */ -// llgo:link (*X_xmlParserInput).ParserPrintFileInfo C.xmlParserPrintFileInfo -func (recv_ *X_xmlParserInput) ParserPrintFileInfo() { -} - -// llgo:link (*X_xmlParserInput).ParserPrintFileContext C.xmlParserPrintFileContext -func (recv_ *X_xmlParserInput) ParserPrintFileContext() { -} - -// llgo:link (*Error).FormatError C.xmlFormatError -func (recv_ *Error) FormatError(channel GenericErrorFunc, data c.Pointer) { -} - -/* - * Extended error information routines - */ -//go:linkname GetLastError C.xmlGetLastError -func GetLastError() *Error - -//go:linkname ResetLastError C.xmlResetLastError -func ResetLastError() - -//go:linkname CtxtGetLastError C.xmlCtxtGetLastError -func CtxtGetLastError(ctx c.Pointer) *Error - -//go:linkname CtxtResetLastError C.xmlCtxtResetLastError -func CtxtResetLastError(ctx c.Pointer) - -//go:linkname ResetError C.xmlResetError -func ResetError(err ErrorPtr) - -// llgo:link (*Error).CopyError C.xmlCopyError -func (recv_ *Error) CopyError(to ErrorPtr) c.Int { - return 0 -} diff --git a/libxml2/xmlexports.go b/libxml2/xmlexports.go deleted file mode 100644 index 31e3a101..00000000 --- a/libxml2/xmlexports.go +++ /dev/null @@ -1,12 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -/* - * Originally declared in xmlversion.h which is generated - */ -//go:linkname CheckVersion C.xmlCheckVersion -func CheckVersion(version c.Int) diff --git a/libxml2/xmlmemory.go b/libxml2/xmlmemory.go deleted file mode 100644 index 9debab38..00000000 --- a/libxml2/xmlmemory.go +++ /dev/null @@ -1,95 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -// llgo:type C -type FreeFunc func(c.Pointer) - -// llgo:type C -type MallocFunc func(c.SizeT) c.Pointer - -// llgo:type C -type ReallocFunc func(c.Pointer, c.SizeT) c.Pointer - -// llgo:type C -type StrdupFunc func(*c.Char) *c.Char - -/* - * The way to overload the existing functions. - * The xmlGc function have an extra entry for atomic block - * allocations useful for garbage collected memory allocators - */ -//go:linkname MemSetup C.xmlMemSetup -func MemSetup(freeFunc FreeFunc, mallocFunc MallocFunc, reallocFunc ReallocFunc, strdupFunc StrdupFunc) c.Int - -//go:linkname MemGet C.xmlMemGet -func MemGet(freeFunc FreeFunc, mallocFunc MallocFunc, reallocFunc ReallocFunc, strdupFunc StrdupFunc) c.Int - -//go:linkname GcMemSetup C.xmlGcMemSetup -func GcMemSetup(freeFunc FreeFunc, mallocFunc MallocFunc, mallocAtomicFunc MallocFunc, reallocFunc ReallocFunc, strdupFunc StrdupFunc) c.Int - -//go:linkname GcMemGet C.xmlGcMemGet -func GcMemGet(freeFunc FreeFunc, mallocFunc MallocFunc, mallocAtomicFunc MallocFunc, reallocFunc ReallocFunc, strdupFunc StrdupFunc) c.Int - -/* - * Initialization of the memory layer. - */ -//go:linkname InitMemory C.xmlInitMemory -func InitMemory() c.Int - -/* - * Cleanup of the memory layer. - */ -//go:linkname CleanupMemory C.xmlCleanupMemory -func CleanupMemory() - -/* - * These are specific to the XML debug memory wrapper. - */ -//go:linkname MemSize C.xmlMemSize -func MemSize(ptr c.Pointer) c.SizeT - -//go:linkname MemUsed C.xmlMemUsed -func MemUsed() c.Int - -//go:linkname MemBlocks C.xmlMemBlocks -func MemBlocks() c.Int - -//go:linkname MemDisplay C.xmlMemDisplay -func MemDisplay(fp *c.FILE) - -//go:linkname MemDisplayLast C.xmlMemDisplayLast -func MemDisplayLast(fp *c.FILE, nbBytes c.Long) - -//go:linkname MemShow C.xmlMemShow -func MemShow(fp *c.FILE, nr c.Int) - -//go:linkname MemoryDump C.xmlMemoryDump -func MemoryDump() - -//go:linkname MemMalloc C.xmlMemMalloc -func MemMalloc(size c.SizeT) c.Pointer - -//go:linkname MemRealloc C.xmlMemRealloc -func MemRealloc(ptr c.Pointer, size c.SizeT) c.Pointer - -//go:linkname MemFree C.xmlMemFree -func MemFree(ptr c.Pointer) - -//go:linkname MemoryStrdup C.xmlMemoryStrdup -func MemoryStrdup(str *c.Char) *c.Char - -//go:linkname MallocLoc C.xmlMallocLoc -func MallocLoc(size c.SizeT, file *c.Char, line c.Int) c.Pointer - -//go:linkname ReallocLoc C.xmlReallocLoc -func ReallocLoc(ptr c.Pointer, size c.SizeT, file *c.Char, line c.Int) c.Pointer - -//go:linkname MallocAtomicLoc C.xmlMallocAtomicLoc -func MallocAtomicLoc(size c.SizeT, file *c.Char, line c.Int) c.Pointer - -//go:linkname MemStrdupLoc C.xmlMemStrdupLoc -func MemStrdupLoc(str *c.Char, file *c.Char, line c.Int) *c.Char diff --git a/libxml2/xmlmodule.go b/libxml2/xmlmodule.go deleted file mode 100644 index 8d66f5e3..00000000 --- a/libxml2/xmlmodule.go +++ /dev/null @@ -1,30 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlModule struct { - Unused [8]uint8 -} -type Module X_xmlModule -type ModulePtr *Module -type ModuleOption c.Int - -const ( - MODULE_LAZY ModuleOption = 1 - MODULE_LOCAL ModuleOption = 2 -) - -//go:linkname ModuleOpen C.xmlModuleOpen -func ModuleOpen(filename *c.Char, options c.Int) ModulePtr - -//go:linkname ModuleSymbol C.xmlModuleSymbol -func ModuleSymbol(module ModulePtr, name *c.Char, result *c.Pointer) c.Int - -//go:linkname ModuleClose C.xmlModuleClose -func ModuleClose(module ModulePtr) c.Int - -//go:linkname ModuleFree C.xmlModuleFree -func ModuleFree(module ModulePtr) c.Int diff --git a/libxml2/xmlreader.go b/libxml2/xmlreader.go deleted file mode 100644 index 0cf89be6..00000000 --- a/libxml2/xmlreader.go +++ /dev/null @@ -1,360 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type ParserSeverities c.Int - -const ( - PARSER_SEVERITY_VALIDITY_WARNING ParserSeverities = 1 - PARSER_SEVERITY_VALIDITY_ERROR ParserSeverities = 2 - PARSER_SEVERITY_WARNING ParserSeverities = 3 - PARSER_SEVERITY_ERROR ParserSeverities = 4 -) - -type TextReaderMode c.Int - -const ( - TEXTREADER_MODE_INITIAL TextReaderMode = 0 - TEXTREADER_MODE_INTERACTIVE TextReaderMode = 1 - TEXTREADER_MODE_ERROR TextReaderMode = 2 - TEXTREADER_MODE_EOF TextReaderMode = 3 - TEXTREADER_MODE_CLOSED TextReaderMode = 4 - TEXTREADER_MODE_READING TextReaderMode = 5 -) - -type ParserProperties c.Int - -const ( - PARSER_LOADDTD ParserProperties = 1 - PARSER_DEFAULTATTRS ParserProperties = 2 - PARSER_VALIDATE ParserProperties = 3 - PARSER_SUBST_ENTITIES ParserProperties = 4 -) - -type ReaderTypes c.Int - -const ( - READER_TYPE_NONE ReaderTypes = 0 - READER_TYPE_ELEMENT ReaderTypes = 1 - READER_TYPE_ATTRIBUTE ReaderTypes = 2 - READER_TYPE_TEXT ReaderTypes = 3 - READER_TYPE_CDATA ReaderTypes = 4 - READER_TYPE_ENTITY_REFERENCE ReaderTypes = 5 - READER_TYPE_ENTITY ReaderTypes = 6 - READER_TYPE_PROCESSING_INSTRUCTION ReaderTypes = 7 - READER_TYPE_COMMENT ReaderTypes = 8 - READER_TYPE_DOCUMENT ReaderTypes = 9 - READER_TYPE_DOCUMENT_TYPE ReaderTypes = 10 - READER_TYPE_DOCUMENT_FRAGMENT ReaderTypes = 11 - READER_TYPE_NOTATION ReaderTypes = 12 - READER_TYPE_WHITESPACE ReaderTypes = 13 - READER_TYPE_SIGNIFICANT_WHITESPACE ReaderTypes = 14 - READER_TYPE_END_ELEMENT ReaderTypes = 15 - READER_TYPE_END_ENTITY ReaderTypes = 16 - READER_TYPE_XML_DECLARATION ReaderTypes = 17 -) - -type X_xmlTextReader struct { - Unused [8]uint8 -} -type TextReader X_xmlTextReader -type TextReaderPtr *TextReader - -/* - * Constructors & Destructor - */ -//go:linkname NewTextReader C.xmlNewTextReader -func NewTextReader(input ParserInputBufferPtr, URI *c.Char) TextReaderPtr - -//go:linkname NewTextReaderFilename C.xmlNewTextReaderFilename -func NewTextReaderFilename(URI *c.Char) TextReaderPtr - -//go:linkname FreeTextReader C.xmlFreeTextReader -func FreeTextReader(reader TextReaderPtr) - -//go:linkname TextReaderSetup C.xmlTextReaderSetup -func TextReaderSetup(reader TextReaderPtr, input ParserInputBufferPtr, URL *c.Char, encoding *c.Char, options c.Int) c.Int - -//go:linkname TextReaderSetMaxAmplification C.xmlTextReaderSetMaxAmplification -func TextReaderSetMaxAmplification(reader TextReaderPtr, maxAmpl c.Uint) - -//go:linkname TextReaderGetLastError C.xmlTextReaderGetLastError -func TextReaderGetLastError(reader TextReaderPtr) *Error - -/* - * Iterators - */ -//go:linkname TextReaderRead C.xmlTextReaderRead -func TextReaderRead(reader TextReaderPtr) c.Int - -//go:linkname TextReaderReadInnerXml C.xmlTextReaderReadInnerXml -func TextReaderReadInnerXml(reader TextReaderPtr) *Char - -//go:linkname TextReaderReadOuterXml C.xmlTextReaderReadOuterXml -func TextReaderReadOuterXml(reader TextReaderPtr) *Char - -//go:linkname TextReaderReadString C.xmlTextReaderReadString -func TextReaderReadString(reader TextReaderPtr) *Char - -//go:linkname TextReaderReadAttributeValue C.xmlTextReaderReadAttributeValue -func TextReaderReadAttributeValue(reader TextReaderPtr) c.Int - -/* - * Attributes of the node - */ -//go:linkname TextReaderAttributeCount C.xmlTextReaderAttributeCount -func TextReaderAttributeCount(reader TextReaderPtr) c.Int - -//go:linkname TextReaderDepth C.xmlTextReaderDepth -func TextReaderDepth(reader TextReaderPtr) c.Int - -//go:linkname TextReaderHasAttributes C.xmlTextReaderHasAttributes -func TextReaderHasAttributes(reader TextReaderPtr) c.Int - -//go:linkname TextReaderHasValue C.xmlTextReaderHasValue -func TextReaderHasValue(reader TextReaderPtr) c.Int - -//go:linkname TextReaderIsDefault C.xmlTextReaderIsDefault -func TextReaderIsDefault(reader TextReaderPtr) c.Int - -//go:linkname TextReaderIsEmptyElement C.xmlTextReaderIsEmptyElement -func TextReaderIsEmptyElement(reader TextReaderPtr) c.Int - -//go:linkname TextReaderNodeType C.xmlTextReaderNodeType -func TextReaderNodeType(reader TextReaderPtr) c.Int - -//go:linkname TextReaderQuoteChar C.xmlTextReaderQuoteChar -func TextReaderQuoteChar(reader TextReaderPtr) c.Int - -//go:linkname TextReaderReadState C.xmlTextReaderReadState -func TextReaderReadState(reader TextReaderPtr) c.Int - -//go:linkname TextReaderIsNamespaceDecl C.xmlTextReaderIsNamespaceDecl -func TextReaderIsNamespaceDecl(reader TextReaderPtr) c.Int - -//go:linkname TextReaderConstBaseUri C.xmlTextReaderConstBaseUri -func TextReaderConstBaseUri(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstLocalName C.xmlTextReaderConstLocalName -func TextReaderConstLocalName(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstName C.xmlTextReaderConstName -func TextReaderConstName(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstNamespaceUri C.xmlTextReaderConstNamespaceUri -func TextReaderConstNamespaceUri(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstPrefix C.xmlTextReaderConstPrefix -func TextReaderConstPrefix(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstXmlLang C.xmlTextReaderConstXmlLang -func TextReaderConstXmlLang(reader TextReaderPtr) *Char - -//go:linkname TextReaderConstString C.xmlTextReaderConstString -func TextReaderConstString(reader TextReaderPtr, str *Char) *Char - -//go:linkname TextReaderConstValue C.xmlTextReaderConstValue -func TextReaderConstValue(reader TextReaderPtr) *Char - -/* - * use the Const version of the routine for - * better performance and simpler code - */ -//go:linkname TextReaderBaseUri C.xmlTextReaderBaseUri -func TextReaderBaseUri(reader TextReaderPtr) *Char - -//go:linkname TextReaderLocalName C.xmlTextReaderLocalName -func TextReaderLocalName(reader TextReaderPtr) *Char - -//go:linkname TextReaderName C.xmlTextReaderName -func TextReaderName(reader TextReaderPtr) *Char - -//go:linkname TextReaderNamespaceUri C.xmlTextReaderNamespaceUri -func TextReaderNamespaceUri(reader TextReaderPtr) *Char - -//go:linkname TextReaderPrefix C.xmlTextReaderPrefix -func TextReaderPrefix(reader TextReaderPtr) *Char - -//go:linkname TextReaderXmlLang C.xmlTextReaderXmlLang -func TextReaderXmlLang(reader TextReaderPtr) *Char - -//go:linkname TextReaderValue C.xmlTextReaderValue -func TextReaderValue(reader TextReaderPtr) *Char - -/* - * Methods of the XmlTextReader - */ -//go:linkname TextReaderClose C.xmlTextReaderClose -func TextReaderClose(reader TextReaderPtr) c.Int - -//go:linkname TextReaderGetAttributeNo C.xmlTextReaderGetAttributeNo -func TextReaderGetAttributeNo(reader TextReaderPtr, no c.Int) *Char - -//go:linkname TextReaderGetAttribute C.xmlTextReaderGetAttribute -func TextReaderGetAttribute(reader TextReaderPtr, name *Char) *Char - -//go:linkname TextReaderGetAttributeNs C.xmlTextReaderGetAttributeNs -func TextReaderGetAttributeNs(reader TextReaderPtr, localName *Char, namespaceURI *Char) *Char - -//go:linkname TextReaderGetRemainder C.xmlTextReaderGetRemainder -func TextReaderGetRemainder(reader TextReaderPtr) ParserInputBufferPtr - -//go:linkname TextReaderLookupNamespace C.xmlTextReaderLookupNamespace -func TextReaderLookupNamespace(reader TextReaderPtr, prefix *Char) *Char - -//go:linkname TextReaderMoveToAttributeNo C.xmlTextReaderMoveToAttributeNo -func TextReaderMoveToAttributeNo(reader TextReaderPtr, no c.Int) c.Int - -//go:linkname TextReaderMoveToAttribute C.xmlTextReaderMoveToAttribute -func TextReaderMoveToAttribute(reader TextReaderPtr, name *Char) c.Int - -//go:linkname TextReaderMoveToAttributeNs C.xmlTextReaderMoveToAttributeNs -func TextReaderMoveToAttributeNs(reader TextReaderPtr, localName *Char, namespaceURI *Char) c.Int - -//go:linkname TextReaderMoveToFirstAttribute C.xmlTextReaderMoveToFirstAttribute -func TextReaderMoveToFirstAttribute(reader TextReaderPtr) c.Int - -//go:linkname TextReaderMoveToNextAttribute C.xmlTextReaderMoveToNextAttribute -func TextReaderMoveToNextAttribute(reader TextReaderPtr) c.Int - -//go:linkname TextReaderMoveToElement C.xmlTextReaderMoveToElement -func TextReaderMoveToElement(reader TextReaderPtr) c.Int - -//go:linkname TextReaderNormalization C.xmlTextReaderNormalization -func TextReaderNormalization(reader TextReaderPtr) c.Int - -//go:linkname TextReaderConstEncoding C.xmlTextReaderConstEncoding -func TextReaderConstEncoding(reader TextReaderPtr) *Char - -/* - * Extensions - */ -//go:linkname TextReaderSetParserProp C.xmlTextReaderSetParserProp -func TextReaderSetParserProp(reader TextReaderPtr, prop c.Int, value c.Int) c.Int - -//go:linkname TextReaderGetParserProp C.xmlTextReaderGetParserProp -func TextReaderGetParserProp(reader TextReaderPtr, prop c.Int) c.Int - -//go:linkname TextReaderCurrentNode C.xmlTextReaderCurrentNode -func TextReaderCurrentNode(reader TextReaderPtr) NodePtr - -//go:linkname TextReaderGetParserLineNumber C.xmlTextReaderGetParserLineNumber -func TextReaderGetParserLineNumber(reader TextReaderPtr) c.Int - -//go:linkname TextReaderGetParserColumnNumber C.xmlTextReaderGetParserColumnNumber -func TextReaderGetParserColumnNumber(reader TextReaderPtr) c.Int - -//go:linkname TextReaderPreserve C.xmlTextReaderPreserve -func TextReaderPreserve(reader TextReaderPtr) NodePtr - -//go:linkname TextReaderPreservePattern C.xmlTextReaderPreservePattern -func TextReaderPreservePattern(reader TextReaderPtr, pattern *Char, namespaces **Char) c.Int - -//go:linkname TextReaderCurrentDoc C.xmlTextReaderCurrentDoc -func TextReaderCurrentDoc(reader TextReaderPtr) DocPtr - -//go:linkname TextReaderExpand C.xmlTextReaderExpand -func TextReaderExpand(reader TextReaderPtr) NodePtr - -//go:linkname TextReaderNext C.xmlTextReaderNext -func TextReaderNext(reader TextReaderPtr) c.Int - -//go:linkname TextReaderNextSibling C.xmlTextReaderNextSibling -func TextReaderNextSibling(reader TextReaderPtr) c.Int - -//go:linkname TextReaderIsValid C.xmlTextReaderIsValid -func TextReaderIsValid(reader TextReaderPtr) c.Int - -//go:linkname TextReaderRelaxNGValidate C.xmlTextReaderRelaxNGValidate -func TextReaderRelaxNGValidate(reader TextReaderPtr, rng *c.Char) c.Int - -//go:linkname TextReaderRelaxNGValidateCtxt C.xmlTextReaderRelaxNGValidateCtxt -func TextReaderRelaxNGValidateCtxt(reader TextReaderPtr, ctxt RelaxNGValidCtxtPtr, options c.Int) c.Int - -//go:linkname TextReaderRelaxNGSetSchema C.xmlTextReaderRelaxNGSetSchema -func TextReaderRelaxNGSetSchema(reader TextReaderPtr, schema RelaxNGPtr) c.Int - -//go:linkname TextReaderSchemaValidate C.xmlTextReaderSchemaValidate -func TextReaderSchemaValidate(reader TextReaderPtr, xsd *c.Char) c.Int - -//go:linkname TextReaderSchemaValidateCtxt C.xmlTextReaderSchemaValidateCtxt -func TextReaderSchemaValidateCtxt(reader TextReaderPtr, ctxt SchemaValidCtxtPtr, options c.Int) c.Int - -//go:linkname TextReaderSetSchema C.xmlTextReaderSetSchema -func TextReaderSetSchema(reader TextReaderPtr, schema SchemaPtr) c.Int - -//go:linkname TextReaderConstXmlVersion C.xmlTextReaderConstXmlVersion -func TextReaderConstXmlVersion(reader TextReaderPtr) *Char - -//go:linkname TextReaderStandalone C.xmlTextReaderStandalone -func TextReaderStandalone(reader TextReaderPtr) c.Int - -/* - * Index lookup - */ -//go:linkname TextReaderByteConsumed C.xmlTextReaderByteConsumed -func TextReaderByteConsumed(reader TextReaderPtr) c.Long - -/* - * New more complete APIs for simpler creation and reuse of readers - */ -//go:linkname ReaderWalker C.xmlReaderWalker -func ReaderWalker(doc DocPtr) TextReaderPtr - -// llgo:link (*Char).ReaderForDoc C.xmlReaderForDoc -func (recv_ *Char) ReaderForDoc(URL *c.Char, encoding *c.Char, options c.Int) TextReaderPtr { - return nil -} - -//go:linkname ReaderForFile C.xmlReaderForFile -func ReaderForFile(filename *c.Char, encoding *c.Char, options c.Int) TextReaderPtr - -//go:linkname ReaderForMemory C.xmlReaderForMemory -func ReaderForMemory(buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) TextReaderPtr - -//go:linkname ReaderForFd C.xmlReaderForFd -func ReaderForFd(fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) TextReaderPtr - -//go:linkname ReaderForIO C.xmlReaderForIO -func ReaderForIO(ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) TextReaderPtr - -//go:linkname ReaderNewWalker C.xmlReaderNewWalker -func ReaderNewWalker(reader TextReaderPtr, doc DocPtr) c.Int - -//go:linkname ReaderNewDoc C.xmlReaderNewDoc -func ReaderNewDoc(reader TextReaderPtr, cur *Char, URL *c.Char, encoding *c.Char, options c.Int) c.Int - -//go:linkname ReaderNewFile C.xmlReaderNewFile -func ReaderNewFile(reader TextReaderPtr, filename *c.Char, encoding *c.Char, options c.Int) c.Int - -//go:linkname ReaderNewMemory C.xmlReaderNewMemory -func ReaderNewMemory(reader TextReaderPtr, buffer *c.Char, size c.Int, URL *c.Char, encoding *c.Char, options c.Int) c.Int - -//go:linkname ReaderNewFd C.xmlReaderNewFd -func ReaderNewFd(reader TextReaderPtr, fd c.Int, URL *c.Char, encoding *c.Char, options c.Int) c.Int - -//go:linkname ReaderNewIO C.xmlReaderNewIO -func ReaderNewIO(reader TextReaderPtr, ioread InputReadCallback, ioclose InputCloseCallback, ioctx c.Pointer, URL *c.Char, encoding *c.Char, options c.Int) c.Int - -type TextReaderLocatorPtr c.Pointer - -// llgo:type C -type TextReaderErrorFunc func(c.Pointer, *c.Char, ParserSeverities, TextReaderLocatorPtr) - -//go:linkname TextReaderLocatorLineNumber C.xmlTextReaderLocatorLineNumber -func TextReaderLocatorLineNumber(locator TextReaderLocatorPtr) c.Int - -//go:linkname TextReaderLocatorBaseURI C.xmlTextReaderLocatorBaseURI -func TextReaderLocatorBaseURI(locator TextReaderLocatorPtr) *Char - -//go:linkname TextReaderSetErrorHandler C.xmlTextReaderSetErrorHandler -func TextReaderSetErrorHandler(reader TextReaderPtr, f TextReaderErrorFunc, arg c.Pointer) - -//go:linkname TextReaderSetStructuredErrorHandler C.xmlTextReaderSetStructuredErrorHandler -func TextReaderSetStructuredErrorHandler(reader TextReaderPtr, f StructuredErrorFunc, arg c.Pointer) - -//go:linkname TextReaderGetErrorHandler C.xmlTextReaderGetErrorHandler -func TextReaderGetErrorHandler(reader TextReaderPtr, f TextReaderErrorFunc, arg *c.Pointer) diff --git a/libxml2/xmlregexp.go b/libxml2/xmlregexp.go deleted file mode 100644 index 9f18e62a..00000000 --- a/libxml2/xmlregexp.go +++ /dev/null @@ -1,62 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlRegexp struct { - Unused [8]uint8 -} -type Regexp X_xmlRegexp -type RegexpPtr *Regexp - -type X_xmlRegExecCtxt struct { - Unused [8]uint8 -} -type RegExecCtxt X_xmlRegExecCtxt -type RegExecCtxtPtr *RegExecCtxt - -/* - * The POSIX like API - */ -// llgo:link (*Char).RegexpCompile C.xmlRegexpCompile -func (recv_ *Char) RegexpCompile() RegexpPtr { - return nil -} - -//go:linkname RegFreeRegexp C.xmlRegFreeRegexp -func RegFreeRegexp(regexp RegexpPtr) - -//go:linkname RegexpExec C.xmlRegexpExec -func RegexpExec(comp RegexpPtr, value *Char) c.Int - -//go:linkname RegexpPrint C.xmlRegexpPrint -func RegexpPrint(output *c.FILE, regexp RegexpPtr) - -//go:linkname RegexpIsDeterminist C.xmlRegexpIsDeterminist -func RegexpIsDeterminist(comp RegexpPtr) c.Int - -// llgo:type C -type RegExecCallbacks func(RegExecCtxtPtr, *Char, c.Pointer, c.Pointer) - -/* - * The progressive API - */ -//go:linkname RegNewExecCtxt C.xmlRegNewExecCtxt -func RegNewExecCtxt(comp RegexpPtr, callback RegExecCallbacks, data c.Pointer) RegExecCtxtPtr - -//go:linkname RegFreeExecCtxt C.xmlRegFreeExecCtxt -func RegFreeExecCtxt(exec RegExecCtxtPtr) - -//go:linkname RegExecPushString C.xmlRegExecPushString -func RegExecPushString(exec RegExecCtxtPtr, value *Char, data c.Pointer) c.Int - -//go:linkname RegExecPushString2 C.xmlRegExecPushString2 -func RegExecPushString2(exec RegExecCtxtPtr, value *Char, value2 *Char, data c.Pointer) c.Int - -//go:linkname RegExecNextValues C.xmlRegExecNextValues -func RegExecNextValues(exec RegExecCtxtPtr, nbval *c.Int, nbneg *c.Int, values **Char, terminal *c.Int) c.Int - -//go:linkname RegExecErrInfo C.xmlRegExecErrInfo -func RegExecErrInfo(exec RegExecCtxtPtr, string **Char, nbval *c.Int, nbneg *c.Int, values **Char, terminal *c.Int) c.Int diff --git a/libxml2/xmlsave.go b/libxml2/xmlsave.go deleted file mode 100644 index 0248cbc3..00000000 --- a/libxml2/xmlsave.go +++ /dev/null @@ -1,67 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type SaveOption c.Int - -const ( - SAVE_FORMAT SaveOption = 1 - SAVE_NO_DECL SaveOption = 2 - SAVE_NO_EMPTY SaveOption = 4 - SAVE_NO_XHTML SaveOption = 8 - SAVE_XHTML SaveOption = 16 - SAVE_AS_XML SaveOption = 32 - SAVE_AS_HTML SaveOption = 64 - SAVE_WSNONSIG SaveOption = 128 -) - -type X_xmlSaveCtxt struct { - Unused [8]uint8 -} -type SaveCtxt X_xmlSaveCtxt -type SaveCtxtPtr *SaveCtxt - -//go:linkname SaveToFd C.xmlSaveToFd -func SaveToFd(fd c.Int, encoding *c.Char, options c.Int) SaveCtxtPtr - -//go:linkname SaveToFilename C.xmlSaveToFilename -func SaveToFilename(filename *c.Char, encoding *c.Char, options c.Int) SaveCtxtPtr - -//go:linkname SaveToBuffer C.xmlSaveToBuffer -func SaveToBuffer(buffer BufferPtr, encoding *c.Char, options c.Int) SaveCtxtPtr - -//go:linkname SaveToIO C.xmlSaveToIO -func SaveToIO(iowrite OutputWriteCallback, ioclose OutputCloseCallback, ioctx c.Pointer, encoding *c.Char, options c.Int) SaveCtxtPtr - -//go:linkname SaveDoc C.xmlSaveDoc -func SaveDoc(ctxt SaveCtxtPtr, doc DocPtr) c.Long - -//go:linkname SaveTree C.xmlSaveTree -func SaveTree(ctxt SaveCtxtPtr, node NodePtr) c.Long - -//go:linkname SaveFlush C.xmlSaveFlush -func SaveFlush(ctxt SaveCtxtPtr) c.Int - -//go:linkname SaveClose C.xmlSaveClose -func SaveClose(ctxt SaveCtxtPtr) c.Int - -//go:linkname SaveFinish C.xmlSaveFinish -func SaveFinish(ctxt SaveCtxtPtr) c.Int - -//go:linkname SaveSetEscape C.xmlSaveSetEscape -func SaveSetEscape(ctxt SaveCtxtPtr, escape CharEncodingOutputFunc) c.Int - -//go:linkname SaveSetAttrEscape C.xmlSaveSetAttrEscape -func SaveSetAttrEscape(ctxt SaveCtxtPtr, escape CharEncodingOutputFunc) c.Int - -//go:linkname ThrDefIndentTreeOutput C.xmlThrDefIndentTreeOutput -func ThrDefIndentTreeOutput(v c.Int) c.Int - -//go:linkname ThrDefTreeIndentString C.xmlThrDefTreeIndentString -func ThrDefTreeIndentString(v *c.Char) *c.Char - -//go:linkname ThrDefSaveNoEmptyTags C.xmlThrDefSaveNoEmptyTags -func ThrDefSaveNoEmptyTags(v c.Int) c.Int diff --git a/libxml2/xmlschemas.go b/libxml2/xmlschemas.go deleted file mode 100644 index 792a0fd1..00000000 --- a/libxml2/xmlschemas.go +++ /dev/null @@ -1,158 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type SchemaValidError c.Int - -const ( - SCHEMAS_ERR_OK SchemaValidError = 0 - SCHEMAS_ERR_NOROOT SchemaValidError = 1 - SCHEMAS_ERR_UNDECLAREDELEM SchemaValidError = 2 - SCHEMAS_ERR_NOTTOPLEVEL SchemaValidError = 3 - SCHEMAS_ERR_MISSING SchemaValidError = 4 - SCHEMAS_ERR_WRONGELEM SchemaValidError = 5 - SCHEMAS_ERR_NOTYPE SchemaValidError = 6 - SCHEMAS_ERR_NOROLLBACK SchemaValidError = 7 - SCHEMAS_ERR_ISABSTRACT SchemaValidError = 8 - SCHEMAS_ERR_NOTEMPTY SchemaValidError = 9 - SCHEMAS_ERR_ELEMCONT SchemaValidError = 10 - SCHEMAS_ERR_HAVEDEFAULT SchemaValidError = 11 - SCHEMAS_ERR_NOTNILLABLE SchemaValidError = 12 - SCHEMAS_ERR_EXTRACONTENT SchemaValidError = 13 - SCHEMAS_ERR_INVALIDATTR SchemaValidError = 14 - SCHEMAS_ERR_INVALIDELEM SchemaValidError = 15 - SCHEMAS_ERR_NOTDETERMINIST SchemaValidError = 16 - SCHEMAS_ERR_CONSTRUCT SchemaValidError = 17 - SCHEMAS_ERR_INTERNAL SchemaValidError = 18 - SCHEMAS_ERR_NOTSIMPLE SchemaValidError = 19 - SCHEMAS_ERR_ATTRUNKNOWN SchemaValidError = 20 - SCHEMAS_ERR_ATTRINVALID SchemaValidError = 21 - SCHEMAS_ERR_VALUE SchemaValidError = 22 - SCHEMAS_ERR_FACET SchemaValidError = 23 - SCHEMAS_ERR_ SchemaValidError = 24 - SCHEMAS_ERR_XXX SchemaValidError = 25 -) - -type SchemaValidOption c.Int - -const SCHEMA_VAL_VC_I_CREATE SchemaValidOption = 1 - -type Schema X_xmlSchema -type SchemaPtr *Schema - -// llgo:type C -type SchemaValidityErrorFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -// llgo:type C -type SchemaValidityWarningFunc func(__llgo_arg_0 c.Pointer, __llgo_arg_1 *c.Char, __llgo_va_list ...interface{}) - -type X_xmlSchemaParserCtxt struct { - Unused [8]uint8 -} -type SchemaParserCtxt X_xmlSchemaParserCtxt -type SchemaParserCtxtPtr *SchemaParserCtxt - -type X_xmlSchemaValidCtxt struct { - Unused [8]uint8 -} -type SchemaValidCtxt X_xmlSchemaValidCtxt -type SchemaValidCtxtPtr *SchemaValidCtxt - -// llgo:type C -type SchemaValidityLocatorFunc func(c.Pointer, **c.Char, *c.Ulong) c.Int - -/* - * Interfaces for parsing. - */ -//go:linkname SchemaNewParserCtxt C.xmlSchemaNewParserCtxt -func SchemaNewParserCtxt(URL *c.Char) SchemaParserCtxtPtr - -//go:linkname SchemaNewMemParserCtxt C.xmlSchemaNewMemParserCtxt -func SchemaNewMemParserCtxt(buffer *c.Char, size c.Int) SchemaParserCtxtPtr - -//go:linkname SchemaNewDocParserCtxt C.xmlSchemaNewDocParserCtxt -func SchemaNewDocParserCtxt(doc DocPtr) SchemaParserCtxtPtr - -//go:linkname SchemaFreeParserCtxt C.xmlSchemaFreeParserCtxt -func SchemaFreeParserCtxt(ctxt SchemaParserCtxtPtr) - -//go:linkname SchemaSetParserErrors C.xmlSchemaSetParserErrors -func SchemaSetParserErrors(ctxt SchemaParserCtxtPtr, err SchemaValidityErrorFunc, warn SchemaValidityWarningFunc, ctx c.Pointer) - -//go:linkname SchemaSetParserStructuredErrors C.xmlSchemaSetParserStructuredErrors -func SchemaSetParserStructuredErrors(ctxt SchemaParserCtxtPtr, serror StructuredErrorFunc, ctx c.Pointer) - -//go:linkname SchemaGetParserErrors C.xmlSchemaGetParserErrors -func SchemaGetParserErrors(ctxt SchemaParserCtxtPtr, err SchemaValidityErrorFunc, warn SchemaValidityWarningFunc, ctx *c.Pointer) c.Int - -//go:linkname SchemaIsValid C.xmlSchemaIsValid -func SchemaIsValid(ctxt SchemaValidCtxtPtr) c.Int - -//go:linkname SchemaParse C.xmlSchemaParse -func SchemaParse(ctxt SchemaParserCtxtPtr) SchemaPtr - -//go:linkname SchemaFree C.xmlSchemaFree -func SchemaFree(schema SchemaPtr) - -//go:linkname SchemaDump C.xmlSchemaDump -func SchemaDump(output *c.FILE, schema SchemaPtr) - -/* - * Interfaces for validating - */ -//go:linkname SchemaSetValidErrors C.xmlSchemaSetValidErrors -func SchemaSetValidErrors(ctxt SchemaValidCtxtPtr, err SchemaValidityErrorFunc, warn SchemaValidityWarningFunc, ctx c.Pointer) - -//go:linkname SchemaSetValidStructuredErrors C.xmlSchemaSetValidStructuredErrors -func SchemaSetValidStructuredErrors(ctxt SchemaValidCtxtPtr, serror StructuredErrorFunc, ctx c.Pointer) - -//go:linkname SchemaGetValidErrors C.xmlSchemaGetValidErrors -func SchemaGetValidErrors(ctxt SchemaValidCtxtPtr, err SchemaValidityErrorFunc, warn SchemaValidityWarningFunc, ctx *c.Pointer) c.Int - -//go:linkname SchemaSetValidOptions C.xmlSchemaSetValidOptions -func SchemaSetValidOptions(ctxt SchemaValidCtxtPtr, options c.Int) c.Int - -//go:linkname SchemaValidateSetFilename C.xmlSchemaValidateSetFilename -func SchemaValidateSetFilename(vctxt SchemaValidCtxtPtr, filename *c.Char) - -//go:linkname SchemaValidCtxtGetOptions C.xmlSchemaValidCtxtGetOptions -func SchemaValidCtxtGetOptions(ctxt SchemaValidCtxtPtr) c.Int - -//go:linkname SchemaNewValidCtxt C.xmlSchemaNewValidCtxt -func SchemaNewValidCtxt(schema SchemaPtr) SchemaValidCtxtPtr - -//go:linkname SchemaFreeValidCtxt C.xmlSchemaFreeValidCtxt -func SchemaFreeValidCtxt(ctxt SchemaValidCtxtPtr) - -//go:linkname SchemaValidateDoc C.xmlSchemaValidateDoc -func SchemaValidateDoc(ctxt SchemaValidCtxtPtr, instance DocPtr) c.Int - -//go:linkname SchemaValidateOneElement C.xmlSchemaValidateOneElement -func SchemaValidateOneElement(ctxt SchemaValidCtxtPtr, elem NodePtr) c.Int - -//go:linkname SchemaValidateStream C.xmlSchemaValidateStream -func SchemaValidateStream(ctxt SchemaValidCtxtPtr, input ParserInputBufferPtr, enc CharEncoding, sax SAXHandlerPtr, user_data c.Pointer) c.Int - -//go:linkname SchemaValidateFile C.xmlSchemaValidateFile -func SchemaValidateFile(ctxt SchemaValidCtxtPtr, filename *c.Char, options c.Int) c.Int - -//go:linkname SchemaValidCtxtGetParserCtxt C.xmlSchemaValidCtxtGetParserCtxt -func SchemaValidCtxtGetParserCtxt(ctxt SchemaValidCtxtPtr) ParserCtxtPtr - -type X_xmlSchemaSAXPlug struct { - Unused [8]uint8 -} -type SchemaSAXPlugStruct X_xmlSchemaSAXPlug -type SchemaSAXPlugPtr *SchemaSAXPlugStruct - -//go:linkname SchemaSAXPlug C.xmlSchemaSAXPlug -func SchemaSAXPlug(ctxt SchemaValidCtxtPtr, sax *SAXHandlerPtr, user_data *c.Pointer) SchemaSAXPlugPtr - -//go:linkname SchemaSAXUnplug C.xmlSchemaSAXUnplug -func SchemaSAXUnplug(plug SchemaSAXPlugPtr) c.Int - -//go:linkname SchemaValidateSetLocator C.xmlSchemaValidateSetLocator -func SchemaValidateSetLocator(vctxt SchemaValidCtxtPtr, f SchemaValidityLocatorFunc, ctxt c.Pointer) diff --git a/libxml2/xmlschemastypes.go b/libxml2/xmlschemastypes.go deleted file mode 100644 index 41a261a9..00000000 --- a/libxml2/xmlschemastypes.go +++ /dev/null @@ -1,131 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type SchemaWhitespaceValueType c.Int - -const ( - SCHEMA_WHITESPACE_UNKNOWN SchemaWhitespaceValueType = 0 - SCHEMA_WHITESPACE_PRESERVE SchemaWhitespaceValueType = 1 - SCHEMA_WHITESPACE_REPLACE SchemaWhitespaceValueType = 2 - SCHEMA_WHITESPACE_COLLAPSE SchemaWhitespaceValueType = 3 -) - -//go:linkname SchemaInitTypes C.xmlSchemaInitTypes -func SchemaInitTypes() c.Int - -//go:linkname SchemaCleanupTypes C.xmlSchemaCleanupTypes -func SchemaCleanupTypes() - -// llgo:link (*Char).SchemaGetPredefinedType C.xmlSchemaGetPredefinedType -func (recv_ *Char) SchemaGetPredefinedType(ns *Char) SchemaTypePtr { - return nil -} - -//go:linkname SchemaValidatePredefinedType C.xmlSchemaValidatePredefinedType -func SchemaValidatePredefinedType(type_ SchemaTypePtr, value *Char, val *SchemaValPtr) c.Int - -//go:linkname SchemaValPredefTypeNode C.xmlSchemaValPredefTypeNode -func SchemaValPredefTypeNode(type_ SchemaTypePtr, value *Char, val *SchemaValPtr, node NodePtr) c.Int - -//go:linkname SchemaValidateFacet C.xmlSchemaValidateFacet -func SchemaValidateFacet(base SchemaTypePtr, facet SchemaFacetPtr, value *Char, val SchemaValPtr) c.Int - -//go:linkname SchemaValidateFacetWhtsp C.xmlSchemaValidateFacetWhtsp -func SchemaValidateFacetWhtsp(facet SchemaFacetPtr, fws SchemaWhitespaceValueType, valType SchemaValType, value *Char, val SchemaValPtr, ws SchemaWhitespaceValueType) c.Int - -//go:linkname SchemaFreeValue C.xmlSchemaFreeValue -func SchemaFreeValue(val SchemaValPtr) - -//go:linkname SchemaNewFacet C.xmlSchemaNewFacet -func SchemaNewFacet() SchemaFacetPtr - -//go:linkname SchemaCheckFacet C.xmlSchemaCheckFacet -func SchemaCheckFacet(facet SchemaFacetPtr, typeDecl SchemaTypePtr, ctxt SchemaParserCtxtPtr, name *Char) c.Int - -//go:linkname SchemaFreeFacet C.xmlSchemaFreeFacet -func SchemaFreeFacet(facet SchemaFacetPtr) - -//go:linkname SchemaCompareValues C.xmlSchemaCompareValues -func SchemaCompareValues(x SchemaValPtr, y SchemaValPtr) c.Int - -//go:linkname SchemaGetBuiltInListSimpleTypeItemType C.xmlSchemaGetBuiltInListSimpleTypeItemType -func SchemaGetBuiltInListSimpleTypeItemType(type_ SchemaTypePtr) SchemaTypePtr - -//go:linkname SchemaValidateListSimpleTypeFacet C.xmlSchemaValidateListSimpleTypeFacet -func SchemaValidateListSimpleTypeFacet(facet SchemaFacetPtr, value *Char, actualLen c.Ulong, expectedLen *c.Ulong) c.Int - -// llgo:link SchemaValType.SchemaGetBuiltInType C.xmlSchemaGetBuiltInType -func (recv_ SchemaValType) SchemaGetBuiltInType() SchemaTypePtr { - return nil -} - -//go:linkname SchemaIsBuiltInTypeFacet C.xmlSchemaIsBuiltInTypeFacet -func SchemaIsBuiltInTypeFacet(type_ SchemaTypePtr, facetType c.Int) c.Int - -// llgo:link (*Char).SchemaCollapseString C.xmlSchemaCollapseString -func (recv_ *Char) SchemaCollapseString() *Char { - return nil -} - -// llgo:link (*Char).SchemaWhiteSpaceReplace C.xmlSchemaWhiteSpaceReplace -func (recv_ *Char) SchemaWhiteSpaceReplace() *Char { - return nil -} - -//go:linkname SchemaGetFacetValueAsULong C.xmlSchemaGetFacetValueAsULong -func SchemaGetFacetValueAsULong(facet SchemaFacetPtr) c.Ulong - -//go:linkname SchemaValidateLengthFacet C.xmlSchemaValidateLengthFacet -func SchemaValidateLengthFacet(type_ SchemaTypePtr, facet SchemaFacetPtr, value *Char, val SchemaValPtr, length *c.Ulong) c.Int - -//go:linkname SchemaValidateLengthFacetWhtsp C.xmlSchemaValidateLengthFacetWhtsp -func SchemaValidateLengthFacetWhtsp(facet SchemaFacetPtr, valType SchemaValType, value *Char, val SchemaValPtr, length *c.Ulong, ws SchemaWhitespaceValueType) c.Int - -//go:linkname SchemaValPredefTypeNodeNoNorm C.xmlSchemaValPredefTypeNodeNoNorm -func SchemaValPredefTypeNodeNoNorm(type_ SchemaTypePtr, value *Char, val *SchemaValPtr, node NodePtr) c.Int - -//go:linkname SchemaGetCanonValue C.xmlSchemaGetCanonValue -func SchemaGetCanonValue(val SchemaValPtr, retValue **Char) c.Int - -//go:linkname SchemaGetCanonValueWhtsp C.xmlSchemaGetCanonValueWhtsp -func SchemaGetCanonValueWhtsp(val SchemaValPtr, retValue **Char, ws SchemaWhitespaceValueType) c.Int - -//go:linkname SchemaValueAppend C.xmlSchemaValueAppend -func SchemaValueAppend(prev SchemaValPtr, cur SchemaValPtr) c.Int - -//go:linkname SchemaValueGetNext C.xmlSchemaValueGetNext -func SchemaValueGetNext(cur SchemaValPtr) SchemaValPtr - -//go:linkname SchemaValueGetAsString C.xmlSchemaValueGetAsString -func SchemaValueGetAsString(val SchemaValPtr) *Char - -//go:linkname SchemaValueGetAsBoolean C.xmlSchemaValueGetAsBoolean -func SchemaValueGetAsBoolean(val SchemaValPtr) c.Int - -// llgo:link SchemaValType.SchemaNewStringValue C.xmlSchemaNewStringValue -func (recv_ SchemaValType) SchemaNewStringValue(value *Char) SchemaValPtr { - return nil -} - -// llgo:link (*Char).SchemaNewNOTATIONValue C.xmlSchemaNewNOTATIONValue -func (recv_ *Char) SchemaNewNOTATIONValue(ns *Char) SchemaValPtr { - return nil -} - -// llgo:link (*Char).SchemaNewQNameValue C.xmlSchemaNewQNameValue -func (recv_ *Char) SchemaNewQNameValue(localName *Char) SchemaValPtr { - return nil -} - -//go:linkname SchemaCompareValuesWhtsp C.xmlSchemaCompareValuesWhtsp -func SchemaCompareValuesWhtsp(x SchemaValPtr, xws SchemaWhitespaceValueType, y SchemaValPtr, yws SchemaWhitespaceValueType) c.Int - -//go:linkname SchemaCopyValue C.xmlSchemaCopyValue -func SchemaCopyValue(val SchemaValPtr) SchemaValPtr - -//go:linkname SchemaGetValType C.xmlSchemaGetValType -func SchemaGetValType(val SchemaValPtr) SchemaValType diff --git a/libxml2/xmlstring.go b/libxml2/xmlstring.go deleted file mode 100644 index c24fea98..00000000 --- a/libxml2/xmlstring.go +++ /dev/null @@ -1,151 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type Char c.Char - -/* - * xmlChar handling - */ -// llgo:link (*Char).Strdup C.xmlStrdup -func (recv_ *Char) Strdup() *Char { - return nil -} - -// llgo:link (*Char).Strndup C.xmlStrndup -func (recv_ *Char) Strndup(len c.Int) *Char { - return nil -} - -//go:linkname CharStrndup C.xmlCharStrndup -func CharStrndup(cur *c.Char, len c.Int) *Char - -//go:linkname CharStrdup C.xmlCharStrdup -func CharStrdup(cur *c.Char) *Char - -// llgo:link (*Char).Strsub C.xmlStrsub -func (recv_ *Char) Strsub(start c.Int, len c.Int) *Char { - return nil -} - -// llgo:link (*Char).Strchr C.xmlStrchr -func (recv_ *Char) Strchr(val Char) *Char { - return nil -} - -// llgo:link (*Char).Strstr C.xmlStrstr -func (recv_ *Char) Strstr(val *Char) *Char { - return nil -} - -// llgo:link (*Char).Strcasestr C.xmlStrcasestr -func (recv_ *Char) Strcasestr(val *Char) *Char { - return nil -} - -// llgo:link (*Char).Strcmp C.xmlStrcmp -func (recv_ *Char) Strcmp(str2 *Char) c.Int { - return 0 -} - -// llgo:link (*Char).Strncmp C.xmlStrncmp -func (recv_ *Char) Strncmp(str2 *Char, len c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).Strcasecmp C.xmlStrcasecmp -func (recv_ *Char) Strcasecmp(str2 *Char) c.Int { - return 0 -} - -// llgo:link (*Char).Strncasecmp C.xmlStrncasecmp -func (recv_ *Char) Strncasecmp(str2 *Char, len c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).StrEqual C.xmlStrEqual -func (recv_ *Char) StrEqual(str2 *Char) c.Int { - return 0 -} - -// llgo:link (*Char).StrQEqual C.xmlStrQEqual -func (recv_ *Char) StrQEqual(name *Char, str *Char) c.Int { - return 0 -} - -// llgo:link (*Char).Strlen C.xmlStrlen -func (recv_ *Char) Strlen() c.Int { - return 0 -} - -// llgo:link (*Char).Strcat C.xmlStrcat -func (recv_ *Char) Strcat(add *Char) *Char { - return nil -} - -// llgo:link (*Char).Strncat C.xmlStrncat -func (recv_ *Char) Strncat(add *Char, len c.Int) *Char { - return nil -} - -// llgo:link (*Char).StrncatNew C.xmlStrncatNew -func (recv_ *Char) StrncatNew(str2 *Char, len c.Int) *Char { - return nil -} - -//go:linkname StrPrintf C.xmlStrPrintf -func StrPrintf(buf *Char, len c.Int, msg *c.Char, __llgo_va_list ...interface{}) c.Int - -// llgo:link (*Char).StrVPrintf C.xmlStrVPrintf -func (recv_ *Char) StrVPrintf(len c.Int, msg *c.Char, ap c.VaList) c.Int { - return 0 -} - -//go:linkname GetUTF8Char C.xmlGetUTF8Char -func GetUTF8Char(utf *c.Char, len *c.Int) c.Int - -//go:linkname CheckUTF8 C.xmlCheckUTF8 -func CheckUTF8(utf *c.Char) c.Int - -// llgo:link (*Char).UTF8Strsize C.xmlUTF8Strsize -func (recv_ *Char) UTF8Strsize(len c.Int) c.Int { - return 0 -} - -// llgo:link (*Char).UTF8Strndup C.xmlUTF8Strndup -func (recv_ *Char) UTF8Strndup(len c.Int) *Char { - return nil -} - -// llgo:link (*Char).UTF8Strpos C.xmlUTF8Strpos -func (recv_ *Char) UTF8Strpos(pos c.Int) *Char { - return nil -} - -// llgo:link (*Char).UTF8Strloc C.xmlUTF8Strloc -func (recv_ *Char) UTF8Strloc(utfchar *Char) c.Int { - return 0 -} - -// llgo:link (*Char).UTF8Strsub C.xmlUTF8Strsub -func (recv_ *Char) UTF8Strsub(start c.Int, len c.Int) *Char { - return nil -} - -// llgo:link (*Char).UTF8Strlen C.xmlUTF8Strlen -func (recv_ *Char) UTF8Strlen() c.Int { - return 0 -} - -// llgo:link (*Char).UTF8Size C.xmlUTF8Size -func (recv_ *Char) UTF8Size() c.Int { - return 0 -} - -// llgo:link (*Char).UTF8Charcmp C.xmlUTF8Charcmp -func (recv_ *Char) UTF8Charcmp(utf2 *Char) c.Int { - return 0 -} diff --git a/libxml2/xmlunicode.go b/libxml2/xmlunicode.go deleted file mode 100644 index 1545f174..00000000 --- a/libxml2/xmlunicode.go +++ /dev/null @@ -1,504 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -//go:linkname UCSIsAegeanNumbers C.xmlUCSIsAegeanNumbers -func UCSIsAegeanNumbers(code c.Int) c.Int - -//go:linkname UCSIsAlphabeticPresentationForms C.xmlUCSIsAlphabeticPresentationForms -func UCSIsAlphabeticPresentationForms(code c.Int) c.Int - -//go:linkname UCSIsArabic C.xmlUCSIsArabic -func UCSIsArabic(code c.Int) c.Int - -//go:linkname UCSIsArabicPresentationFormsA C.xmlUCSIsArabicPresentationFormsA -func UCSIsArabicPresentationFormsA(code c.Int) c.Int - -//go:linkname UCSIsArabicPresentationFormsB C.xmlUCSIsArabicPresentationFormsB -func UCSIsArabicPresentationFormsB(code c.Int) c.Int - -//go:linkname UCSIsArmenian C.xmlUCSIsArmenian -func UCSIsArmenian(code c.Int) c.Int - -//go:linkname UCSIsArrows C.xmlUCSIsArrows -func UCSIsArrows(code c.Int) c.Int - -//go:linkname UCSIsBasicLatin C.xmlUCSIsBasicLatin -func UCSIsBasicLatin(code c.Int) c.Int - -//go:linkname UCSIsBengali C.xmlUCSIsBengali -func UCSIsBengali(code c.Int) c.Int - -//go:linkname UCSIsBlockElements C.xmlUCSIsBlockElements -func UCSIsBlockElements(code c.Int) c.Int - -//go:linkname UCSIsBopomofo C.xmlUCSIsBopomofo -func UCSIsBopomofo(code c.Int) c.Int - -//go:linkname UCSIsBopomofoExtended C.xmlUCSIsBopomofoExtended -func UCSIsBopomofoExtended(code c.Int) c.Int - -//go:linkname UCSIsBoxDrawing C.xmlUCSIsBoxDrawing -func UCSIsBoxDrawing(code c.Int) c.Int - -//go:linkname UCSIsBraillePatterns C.xmlUCSIsBraillePatterns -func UCSIsBraillePatterns(code c.Int) c.Int - -//go:linkname UCSIsBuhid C.xmlUCSIsBuhid -func UCSIsBuhid(code c.Int) c.Int - -//go:linkname UCSIsByzantineMusicalSymbols C.xmlUCSIsByzantineMusicalSymbols -func UCSIsByzantineMusicalSymbols(code c.Int) c.Int - -//go:linkname UCSIsCJKCompatibility C.xmlUCSIsCJKCompatibility -func UCSIsCJKCompatibility(code c.Int) c.Int - -//go:linkname UCSIsCJKCompatibilityForms C.xmlUCSIsCJKCompatibilityForms -func UCSIsCJKCompatibilityForms(code c.Int) c.Int - -//go:linkname UCSIsCJKCompatibilityIdeographs C.xmlUCSIsCJKCompatibilityIdeographs -func UCSIsCJKCompatibilityIdeographs(code c.Int) c.Int - -//go:linkname UCSIsCJKCompatibilityIdeographsSupplement C.xmlUCSIsCJKCompatibilityIdeographsSupplement -func UCSIsCJKCompatibilityIdeographsSupplement(code c.Int) c.Int - -//go:linkname UCSIsCJKRadicalsSupplement C.xmlUCSIsCJKRadicalsSupplement -func UCSIsCJKRadicalsSupplement(code c.Int) c.Int - -//go:linkname UCSIsCJKSymbolsandPunctuation C.xmlUCSIsCJKSymbolsandPunctuation -func UCSIsCJKSymbolsandPunctuation(code c.Int) c.Int - -//go:linkname UCSIsCJKUnifiedIdeographs C.xmlUCSIsCJKUnifiedIdeographs -func UCSIsCJKUnifiedIdeographs(code c.Int) c.Int - -//go:linkname UCSIsCJKUnifiedIdeographsExtensionA C.xmlUCSIsCJKUnifiedIdeographsExtensionA -func UCSIsCJKUnifiedIdeographsExtensionA(code c.Int) c.Int - -//go:linkname UCSIsCJKUnifiedIdeographsExtensionB C.xmlUCSIsCJKUnifiedIdeographsExtensionB -func UCSIsCJKUnifiedIdeographsExtensionB(code c.Int) c.Int - -//go:linkname UCSIsCherokee C.xmlUCSIsCherokee -func UCSIsCherokee(code c.Int) c.Int - -//go:linkname UCSIsCombiningDiacriticalMarks C.xmlUCSIsCombiningDiacriticalMarks -func UCSIsCombiningDiacriticalMarks(code c.Int) c.Int - -//go:linkname UCSIsCombiningDiacriticalMarksforSymbols C.xmlUCSIsCombiningDiacriticalMarksforSymbols -func UCSIsCombiningDiacriticalMarksforSymbols(code c.Int) c.Int - -//go:linkname UCSIsCombiningHalfMarks C.xmlUCSIsCombiningHalfMarks -func UCSIsCombiningHalfMarks(code c.Int) c.Int - -//go:linkname UCSIsCombiningMarksforSymbols C.xmlUCSIsCombiningMarksforSymbols -func UCSIsCombiningMarksforSymbols(code c.Int) c.Int - -//go:linkname UCSIsControlPictures C.xmlUCSIsControlPictures -func UCSIsControlPictures(code c.Int) c.Int - -//go:linkname UCSIsCurrencySymbols C.xmlUCSIsCurrencySymbols -func UCSIsCurrencySymbols(code c.Int) c.Int - -//go:linkname UCSIsCypriotSyllabary C.xmlUCSIsCypriotSyllabary -func UCSIsCypriotSyllabary(code c.Int) c.Int - -//go:linkname UCSIsCyrillic C.xmlUCSIsCyrillic -func UCSIsCyrillic(code c.Int) c.Int - -//go:linkname UCSIsCyrillicSupplement C.xmlUCSIsCyrillicSupplement -func UCSIsCyrillicSupplement(code c.Int) c.Int - -//go:linkname UCSIsDeseret C.xmlUCSIsDeseret -func UCSIsDeseret(code c.Int) c.Int - -//go:linkname UCSIsDevanagari C.xmlUCSIsDevanagari -func UCSIsDevanagari(code c.Int) c.Int - -//go:linkname UCSIsDingbats C.xmlUCSIsDingbats -func UCSIsDingbats(code c.Int) c.Int - -//go:linkname UCSIsEnclosedAlphanumerics C.xmlUCSIsEnclosedAlphanumerics -func UCSIsEnclosedAlphanumerics(code c.Int) c.Int - -//go:linkname UCSIsEnclosedCJKLettersandMonths C.xmlUCSIsEnclosedCJKLettersandMonths -func UCSIsEnclosedCJKLettersandMonths(code c.Int) c.Int - -//go:linkname UCSIsEthiopic C.xmlUCSIsEthiopic -func UCSIsEthiopic(code c.Int) c.Int - -//go:linkname UCSIsGeneralPunctuation C.xmlUCSIsGeneralPunctuation -func UCSIsGeneralPunctuation(code c.Int) c.Int - -//go:linkname UCSIsGeometricShapes C.xmlUCSIsGeometricShapes -func UCSIsGeometricShapes(code c.Int) c.Int - -//go:linkname UCSIsGeorgian C.xmlUCSIsGeorgian -func UCSIsGeorgian(code c.Int) c.Int - -//go:linkname UCSIsGothic C.xmlUCSIsGothic -func UCSIsGothic(code c.Int) c.Int - -//go:linkname UCSIsGreek C.xmlUCSIsGreek -func UCSIsGreek(code c.Int) c.Int - -//go:linkname UCSIsGreekExtended C.xmlUCSIsGreekExtended -func UCSIsGreekExtended(code c.Int) c.Int - -//go:linkname UCSIsGreekandCoptic C.xmlUCSIsGreekandCoptic -func UCSIsGreekandCoptic(code c.Int) c.Int - -//go:linkname UCSIsGujarati C.xmlUCSIsGujarati -func UCSIsGujarati(code c.Int) c.Int - -//go:linkname UCSIsGurmukhi C.xmlUCSIsGurmukhi -func UCSIsGurmukhi(code c.Int) c.Int - -//go:linkname UCSIsHalfwidthandFullwidthForms C.xmlUCSIsHalfwidthandFullwidthForms -func UCSIsHalfwidthandFullwidthForms(code c.Int) c.Int - -//go:linkname UCSIsHangulCompatibilityJamo C.xmlUCSIsHangulCompatibilityJamo -func UCSIsHangulCompatibilityJamo(code c.Int) c.Int - -//go:linkname UCSIsHangulJamo C.xmlUCSIsHangulJamo -func UCSIsHangulJamo(code c.Int) c.Int - -//go:linkname UCSIsHangulSyllables C.xmlUCSIsHangulSyllables -func UCSIsHangulSyllables(code c.Int) c.Int - -//go:linkname UCSIsHanunoo C.xmlUCSIsHanunoo -func UCSIsHanunoo(code c.Int) c.Int - -//go:linkname UCSIsHebrew C.xmlUCSIsHebrew -func UCSIsHebrew(code c.Int) c.Int - -//go:linkname UCSIsHighPrivateUseSurrogates C.xmlUCSIsHighPrivateUseSurrogates -func UCSIsHighPrivateUseSurrogates(code c.Int) c.Int - -//go:linkname UCSIsHighSurrogates C.xmlUCSIsHighSurrogates -func UCSIsHighSurrogates(code c.Int) c.Int - -//go:linkname UCSIsHiragana C.xmlUCSIsHiragana -func UCSIsHiragana(code c.Int) c.Int - -//go:linkname UCSIsIPAExtensions C.xmlUCSIsIPAExtensions -func UCSIsIPAExtensions(code c.Int) c.Int - -//go:linkname UCSIsIdeographicDescriptionCharacters C.xmlUCSIsIdeographicDescriptionCharacters -func UCSIsIdeographicDescriptionCharacters(code c.Int) c.Int - -//go:linkname UCSIsKanbun C.xmlUCSIsKanbun -func UCSIsKanbun(code c.Int) c.Int - -//go:linkname UCSIsKangxiRadicals C.xmlUCSIsKangxiRadicals -func UCSIsKangxiRadicals(code c.Int) c.Int - -//go:linkname UCSIsKannada C.xmlUCSIsKannada -func UCSIsKannada(code c.Int) c.Int - -//go:linkname UCSIsKatakana C.xmlUCSIsKatakana -func UCSIsKatakana(code c.Int) c.Int - -//go:linkname UCSIsKatakanaPhoneticExtensions C.xmlUCSIsKatakanaPhoneticExtensions -func UCSIsKatakanaPhoneticExtensions(code c.Int) c.Int - -//go:linkname UCSIsKhmer C.xmlUCSIsKhmer -func UCSIsKhmer(code c.Int) c.Int - -//go:linkname UCSIsKhmerSymbols C.xmlUCSIsKhmerSymbols -func UCSIsKhmerSymbols(code c.Int) c.Int - -//go:linkname UCSIsLao C.xmlUCSIsLao -func UCSIsLao(code c.Int) c.Int - -//go:linkname UCSIsLatin1Supplement C.xmlUCSIsLatin1Supplement -func UCSIsLatin1Supplement(code c.Int) c.Int - -//go:linkname UCSIsLatinExtendedA C.xmlUCSIsLatinExtendedA -func UCSIsLatinExtendedA(code c.Int) c.Int - -//go:linkname UCSIsLatinExtendedB C.xmlUCSIsLatinExtendedB -func UCSIsLatinExtendedB(code c.Int) c.Int - -//go:linkname UCSIsLatinExtendedAdditional C.xmlUCSIsLatinExtendedAdditional -func UCSIsLatinExtendedAdditional(code c.Int) c.Int - -//go:linkname UCSIsLetterlikeSymbols C.xmlUCSIsLetterlikeSymbols -func UCSIsLetterlikeSymbols(code c.Int) c.Int - -//go:linkname UCSIsLimbu C.xmlUCSIsLimbu -func UCSIsLimbu(code c.Int) c.Int - -//go:linkname UCSIsLinearBIdeograms C.xmlUCSIsLinearBIdeograms -func UCSIsLinearBIdeograms(code c.Int) c.Int - -//go:linkname UCSIsLinearBSyllabary C.xmlUCSIsLinearBSyllabary -func UCSIsLinearBSyllabary(code c.Int) c.Int - -//go:linkname UCSIsLowSurrogates C.xmlUCSIsLowSurrogates -func UCSIsLowSurrogates(code c.Int) c.Int - -//go:linkname UCSIsMalayalam C.xmlUCSIsMalayalam -func UCSIsMalayalam(code c.Int) c.Int - -//go:linkname UCSIsMathematicalAlphanumericSymbols C.xmlUCSIsMathematicalAlphanumericSymbols -func UCSIsMathematicalAlphanumericSymbols(code c.Int) c.Int - -//go:linkname UCSIsMathematicalOperators C.xmlUCSIsMathematicalOperators -func UCSIsMathematicalOperators(code c.Int) c.Int - -//go:linkname UCSIsMiscellaneousMathematicalSymbolsA C.xmlUCSIsMiscellaneousMathematicalSymbolsA -func UCSIsMiscellaneousMathematicalSymbolsA(code c.Int) c.Int - -//go:linkname UCSIsMiscellaneousMathematicalSymbolsB C.xmlUCSIsMiscellaneousMathematicalSymbolsB -func UCSIsMiscellaneousMathematicalSymbolsB(code c.Int) c.Int - -//go:linkname UCSIsMiscellaneousSymbols C.xmlUCSIsMiscellaneousSymbols -func UCSIsMiscellaneousSymbols(code c.Int) c.Int - -//go:linkname UCSIsMiscellaneousSymbolsandArrows C.xmlUCSIsMiscellaneousSymbolsandArrows -func UCSIsMiscellaneousSymbolsandArrows(code c.Int) c.Int - -//go:linkname UCSIsMiscellaneousTechnical C.xmlUCSIsMiscellaneousTechnical -func UCSIsMiscellaneousTechnical(code c.Int) c.Int - -//go:linkname UCSIsMongolian C.xmlUCSIsMongolian -func UCSIsMongolian(code c.Int) c.Int - -//go:linkname UCSIsMusicalSymbols C.xmlUCSIsMusicalSymbols -func UCSIsMusicalSymbols(code c.Int) c.Int - -//go:linkname UCSIsMyanmar C.xmlUCSIsMyanmar -func UCSIsMyanmar(code c.Int) c.Int - -//go:linkname UCSIsNumberForms C.xmlUCSIsNumberForms -func UCSIsNumberForms(code c.Int) c.Int - -//go:linkname UCSIsOgham C.xmlUCSIsOgham -func UCSIsOgham(code c.Int) c.Int - -//go:linkname UCSIsOldItalic C.xmlUCSIsOldItalic -func UCSIsOldItalic(code c.Int) c.Int - -//go:linkname UCSIsOpticalCharacterRecognition C.xmlUCSIsOpticalCharacterRecognition -func UCSIsOpticalCharacterRecognition(code c.Int) c.Int - -//go:linkname UCSIsOriya C.xmlUCSIsOriya -func UCSIsOriya(code c.Int) c.Int - -//go:linkname UCSIsOsmanya C.xmlUCSIsOsmanya -func UCSIsOsmanya(code c.Int) c.Int - -//go:linkname UCSIsPhoneticExtensions C.xmlUCSIsPhoneticExtensions -func UCSIsPhoneticExtensions(code c.Int) c.Int - -//go:linkname UCSIsPrivateUse C.xmlUCSIsPrivateUse -func UCSIsPrivateUse(code c.Int) c.Int - -//go:linkname UCSIsPrivateUseArea C.xmlUCSIsPrivateUseArea -func UCSIsPrivateUseArea(code c.Int) c.Int - -//go:linkname UCSIsRunic C.xmlUCSIsRunic -func UCSIsRunic(code c.Int) c.Int - -//go:linkname UCSIsShavian C.xmlUCSIsShavian -func UCSIsShavian(code c.Int) c.Int - -//go:linkname UCSIsSinhala C.xmlUCSIsSinhala -func UCSIsSinhala(code c.Int) c.Int - -//go:linkname UCSIsSmallFormVariants C.xmlUCSIsSmallFormVariants -func UCSIsSmallFormVariants(code c.Int) c.Int - -//go:linkname UCSIsSpacingModifierLetters C.xmlUCSIsSpacingModifierLetters -func UCSIsSpacingModifierLetters(code c.Int) c.Int - -//go:linkname UCSIsSpecials C.xmlUCSIsSpecials -func UCSIsSpecials(code c.Int) c.Int - -//go:linkname UCSIsSuperscriptsandSubscripts C.xmlUCSIsSuperscriptsandSubscripts -func UCSIsSuperscriptsandSubscripts(code c.Int) c.Int - -//go:linkname UCSIsSupplementalArrowsA C.xmlUCSIsSupplementalArrowsA -func UCSIsSupplementalArrowsA(code c.Int) c.Int - -//go:linkname UCSIsSupplementalArrowsB C.xmlUCSIsSupplementalArrowsB -func UCSIsSupplementalArrowsB(code c.Int) c.Int - -//go:linkname UCSIsSupplementalMathematicalOperators C.xmlUCSIsSupplementalMathematicalOperators -func UCSIsSupplementalMathematicalOperators(code c.Int) c.Int - -//go:linkname UCSIsSupplementaryPrivateUseAreaA C.xmlUCSIsSupplementaryPrivateUseAreaA -func UCSIsSupplementaryPrivateUseAreaA(code c.Int) c.Int - -//go:linkname UCSIsSupplementaryPrivateUseAreaB C.xmlUCSIsSupplementaryPrivateUseAreaB -func UCSIsSupplementaryPrivateUseAreaB(code c.Int) c.Int - -//go:linkname UCSIsSyriac C.xmlUCSIsSyriac -func UCSIsSyriac(code c.Int) c.Int - -//go:linkname UCSIsTagalog C.xmlUCSIsTagalog -func UCSIsTagalog(code c.Int) c.Int - -//go:linkname UCSIsTagbanwa C.xmlUCSIsTagbanwa -func UCSIsTagbanwa(code c.Int) c.Int - -//go:linkname UCSIsTags C.xmlUCSIsTags -func UCSIsTags(code c.Int) c.Int - -//go:linkname UCSIsTaiLe C.xmlUCSIsTaiLe -func UCSIsTaiLe(code c.Int) c.Int - -//go:linkname UCSIsTaiXuanJingSymbols C.xmlUCSIsTaiXuanJingSymbols -func UCSIsTaiXuanJingSymbols(code c.Int) c.Int - -//go:linkname UCSIsTamil C.xmlUCSIsTamil -func UCSIsTamil(code c.Int) c.Int - -//go:linkname UCSIsTelugu C.xmlUCSIsTelugu -func UCSIsTelugu(code c.Int) c.Int - -//go:linkname UCSIsThaana C.xmlUCSIsThaana -func UCSIsThaana(code c.Int) c.Int - -//go:linkname UCSIsThai C.xmlUCSIsThai -func UCSIsThai(code c.Int) c.Int - -//go:linkname UCSIsTibetan C.xmlUCSIsTibetan -func UCSIsTibetan(code c.Int) c.Int - -//go:linkname UCSIsUgaritic C.xmlUCSIsUgaritic -func UCSIsUgaritic(code c.Int) c.Int - -//go:linkname UCSIsUnifiedCanadianAboriginalSyllabics C.xmlUCSIsUnifiedCanadianAboriginalSyllabics -func UCSIsUnifiedCanadianAboriginalSyllabics(code c.Int) c.Int - -//go:linkname UCSIsVariationSelectors C.xmlUCSIsVariationSelectors -func UCSIsVariationSelectors(code c.Int) c.Int - -//go:linkname UCSIsVariationSelectorsSupplement C.xmlUCSIsVariationSelectorsSupplement -func UCSIsVariationSelectorsSupplement(code c.Int) c.Int - -//go:linkname UCSIsYiRadicals C.xmlUCSIsYiRadicals -func UCSIsYiRadicals(code c.Int) c.Int - -//go:linkname UCSIsYiSyllables C.xmlUCSIsYiSyllables -func UCSIsYiSyllables(code c.Int) c.Int - -//go:linkname UCSIsYijingHexagramSymbols C.xmlUCSIsYijingHexagramSymbols -func UCSIsYijingHexagramSymbols(code c.Int) c.Int - -//go:linkname UCSIsBlock C.xmlUCSIsBlock -func UCSIsBlock(code c.Int, block *c.Char) c.Int - -//go:linkname UCSIsCatC C.xmlUCSIsCatC -func UCSIsCatC(code c.Int) c.Int - -//go:linkname UCSIsCatCc C.xmlUCSIsCatCc -func UCSIsCatCc(code c.Int) c.Int - -//go:linkname UCSIsCatCf C.xmlUCSIsCatCf -func UCSIsCatCf(code c.Int) c.Int - -//go:linkname UCSIsCatCo C.xmlUCSIsCatCo -func UCSIsCatCo(code c.Int) c.Int - -//go:linkname UCSIsCatCs C.xmlUCSIsCatCs -func UCSIsCatCs(code c.Int) c.Int - -//go:linkname UCSIsCatL C.xmlUCSIsCatL -func UCSIsCatL(code c.Int) c.Int - -//go:linkname UCSIsCatLl C.xmlUCSIsCatLl -func UCSIsCatLl(code c.Int) c.Int - -//go:linkname UCSIsCatLm C.xmlUCSIsCatLm -func UCSIsCatLm(code c.Int) c.Int - -//go:linkname UCSIsCatLo C.xmlUCSIsCatLo -func UCSIsCatLo(code c.Int) c.Int - -//go:linkname UCSIsCatLt C.xmlUCSIsCatLt -func UCSIsCatLt(code c.Int) c.Int - -//go:linkname UCSIsCatLu C.xmlUCSIsCatLu -func UCSIsCatLu(code c.Int) c.Int - -//go:linkname UCSIsCatM C.xmlUCSIsCatM -func UCSIsCatM(code c.Int) c.Int - -//go:linkname UCSIsCatMc C.xmlUCSIsCatMc -func UCSIsCatMc(code c.Int) c.Int - -//go:linkname UCSIsCatMe C.xmlUCSIsCatMe -func UCSIsCatMe(code c.Int) c.Int - -//go:linkname UCSIsCatMn C.xmlUCSIsCatMn -func UCSIsCatMn(code c.Int) c.Int - -//go:linkname UCSIsCatN C.xmlUCSIsCatN -func UCSIsCatN(code c.Int) c.Int - -//go:linkname UCSIsCatNd C.xmlUCSIsCatNd -func UCSIsCatNd(code c.Int) c.Int - -//go:linkname UCSIsCatNl C.xmlUCSIsCatNl -func UCSIsCatNl(code c.Int) c.Int - -//go:linkname UCSIsCatNo C.xmlUCSIsCatNo -func UCSIsCatNo(code c.Int) c.Int - -//go:linkname UCSIsCatP C.xmlUCSIsCatP -func UCSIsCatP(code c.Int) c.Int - -//go:linkname UCSIsCatPc C.xmlUCSIsCatPc -func UCSIsCatPc(code c.Int) c.Int - -//go:linkname UCSIsCatPd C.xmlUCSIsCatPd -func UCSIsCatPd(code c.Int) c.Int - -//go:linkname UCSIsCatPe C.xmlUCSIsCatPe -func UCSIsCatPe(code c.Int) c.Int - -//go:linkname UCSIsCatPf C.xmlUCSIsCatPf -func UCSIsCatPf(code c.Int) c.Int - -//go:linkname UCSIsCatPi C.xmlUCSIsCatPi -func UCSIsCatPi(code c.Int) c.Int - -//go:linkname UCSIsCatPo C.xmlUCSIsCatPo -func UCSIsCatPo(code c.Int) c.Int - -//go:linkname UCSIsCatPs C.xmlUCSIsCatPs -func UCSIsCatPs(code c.Int) c.Int - -//go:linkname UCSIsCatS C.xmlUCSIsCatS -func UCSIsCatS(code c.Int) c.Int - -//go:linkname UCSIsCatSc C.xmlUCSIsCatSc -func UCSIsCatSc(code c.Int) c.Int - -//go:linkname UCSIsCatSk C.xmlUCSIsCatSk -func UCSIsCatSk(code c.Int) c.Int - -//go:linkname UCSIsCatSm C.xmlUCSIsCatSm -func UCSIsCatSm(code c.Int) c.Int - -//go:linkname UCSIsCatSo C.xmlUCSIsCatSo -func UCSIsCatSo(code c.Int) c.Int - -//go:linkname UCSIsCatZ C.xmlUCSIsCatZ -func UCSIsCatZ(code c.Int) c.Int - -//go:linkname UCSIsCatZl C.xmlUCSIsCatZl -func UCSIsCatZl(code c.Int) c.Int - -//go:linkname UCSIsCatZp C.xmlUCSIsCatZp -func UCSIsCatZp(code c.Int) c.Int - -//go:linkname UCSIsCatZs C.xmlUCSIsCatZs -func UCSIsCatZs(code c.Int) c.Int - -//go:linkname UCSIsCat C.xmlUCSIsCat -func UCSIsCat(code c.Int, cat *c.Char) c.Int diff --git a/libxml2/xmlversion.go b/libxml2/xmlversion.go deleted file mode 100644 index d2b4704a..00000000 --- a/libxml2/xmlversion.go +++ /dev/null @@ -1,9 +0,0 @@ -package libxml2 - -import _ "unsafe" - -const DOTTED_VERSION = "2.13.6" -const VERSION = 21306 -const VERSION_STRING = "21306" -const VERSION_EXTRA = "" -const MODULE_EXTENSION = ".so" diff --git a/libxml2/xmlwriter.go b/libxml2/xmlwriter.go deleted file mode 100644 index b9142c92..00000000 --- a/libxml2/xmlwriter.go +++ /dev/null @@ -1,324 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -type X_xmlTextWriter struct { - Unused [8]uint8 -} -type TextWriter X_xmlTextWriter -type TextWriterPtr *TextWriter - -/* - * Constructors & Destructor - */ -//go:linkname NewTextWriter C.xmlNewTextWriter -func NewTextWriter(out OutputBufferPtr) TextWriterPtr - -//go:linkname NewTextWriterFilename C.xmlNewTextWriterFilename -func NewTextWriterFilename(uri *c.Char, compression c.Int) TextWriterPtr - -//go:linkname NewTextWriterMemory C.xmlNewTextWriterMemory -func NewTextWriterMemory(buf BufferPtr, compression c.Int) TextWriterPtr - -//go:linkname NewTextWriterPushParser C.xmlNewTextWriterPushParser -func NewTextWriterPushParser(ctxt ParserCtxtPtr, compression c.Int) TextWriterPtr - -//go:linkname NewTextWriterDoc C.xmlNewTextWriterDoc -func NewTextWriterDoc(doc *DocPtr, compression c.Int) TextWriterPtr - -//go:linkname NewTextWriterTree C.xmlNewTextWriterTree -func NewTextWriterTree(doc DocPtr, node NodePtr, compression c.Int) TextWriterPtr - -//go:linkname FreeTextWriter C.xmlFreeTextWriter -func FreeTextWriter(writer TextWriterPtr) - -/* - * Document - */ -//go:linkname TextWriterStartDocument C.xmlTextWriterStartDocument -func TextWriterStartDocument(writer TextWriterPtr, version *c.Char, encoding *c.Char, standalone *c.Char) c.Int - -//go:linkname TextWriterEndDocument C.xmlTextWriterEndDocument -func TextWriterEndDocument(writer TextWriterPtr) c.Int - -/* - * Comments - */ -//go:linkname TextWriterStartComment C.xmlTextWriterStartComment -func TextWriterStartComment(writer TextWriterPtr) c.Int - -//go:linkname TextWriterEndComment C.xmlTextWriterEndComment -func TextWriterEndComment(writer TextWriterPtr) c.Int - -//go:linkname TextWriterWriteFormatComment C.xmlTextWriterWriteFormatComment -func TextWriterWriteFormatComment(writer TextWriterPtr, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatComment C.xmlTextWriterWriteVFormatComment -func TextWriterWriteVFormatComment(writer TextWriterPtr, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteComment C.xmlTextWriterWriteComment -func TextWriterWriteComment(writer TextWriterPtr, content *Char) c.Int - -/* - * Elements - */ -//go:linkname TextWriterStartElement C.xmlTextWriterStartElement -func TextWriterStartElement(writer TextWriterPtr, name *Char) c.Int - -//go:linkname TextWriterStartElementNS C.xmlTextWriterStartElementNS -func TextWriterStartElementNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char) c.Int - -//go:linkname TextWriterEndElement C.xmlTextWriterEndElement -func TextWriterEndElement(writer TextWriterPtr) c.Int - -//go:linkname TextWriterFullEndElement C.xmlTextWriterFullEndElement -func TextWriterFullEndElement(writer TextWriterPtr) c.Int - -/* - * Elements conveniency functions - */ -//go:linkname TextWriterWriteFormatElement C.xmlTextWriterWriteFormatElement -func TextWriterWriteFormatElement(writer TextWriterPtr, name *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatElement C.xmlTextWriterWriteVFormatElement -func TextWriterWriteVFormatElement(writer TextWriterPtr, name *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteElement C.xmlTextWriterWriteElement -func TextWriterWriteElement(writer TextWriterPtr, name *Char, content *Char) c.Int - -//go:linkname TextWriterWriteFormatElementNS C.xmlTextWriterWriteFormatElementNS -func TextWriterWriteFormatElementNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatElementNS C.xmlTextWriterWriteVFormatElementNS -func TextWriterWriteVFormatElementNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteElementNS C.xmlTextWriterWriteElementNS -func TextWriterWriteElementNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, content *Char) c.Int - -/* - * Text - */ -//go:linkname TextWriterWriteFormatRaw C.xmlTextWriterWriteFormatRaw -func TextWriterWriteFormatRaw(writer TextWriterPtr, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatRaw C.xmlTextWriterWriteVFormatRaw -func TextWriterWriteVFormatRaw(writer TextWriterPtr, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteRawLen C.xmlTextWriterWriteRawLen -func TextWriterWriteRawLen(writer TextWriterPtr, content *Char, len c.Int) c.Int - -//go:linkname TextWriterWriteRaw C.xmlTextWriterWriteRaw -func TextWriterWriteRaw(writer TextWriterPtr, content *Char) c.Int - -//go:linkname TextWriterWriteFormatString C.xmlTextWriterWriteFormatString -func TextWriterWriteFormatString(writer TextWriterPtr, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatString C.xmlTextWriterWriteVFormatString -func TextWriterWriteVFormatString(writer TextWriterPtr, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteString C.xmlTextWriterWriteString -func TextWriterWriteString(writer TextWriterPtr, content *Char) c.Int - -//go:linkname TextWriterWriteBase64 C.xmlTextWriterWriteBase64 -func TextWriterWriteBase64(writer TextWriterPtr, data *c.Char, start c.Int, len c.Int) c.Int - -//go:linkname TextWriterWriteBinHex C.xmlTextWriterWriteBinHex -func TextWriterWriteBinHex(writer TextWriterPtr, data *c.Char, start c.Int, len c.Int) c.Int - -/* - * Attributes - */ -//go:linkname TextWriterStartAttribute C.xmlTextWriterStartAttribute -func TextWriterStartAttribute(writer TextWriterPtr, name *Char) c.Int - -//go:linkname TextWriterStartAttributeNS C.xmlTextWriterStartAttributeNS -func TextWriterStartAttributeNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char) c.Int - -//go:linkname TextWriterEndAttribute C.xmlTextWriterEndAttribute -func TextWriterEndAttribute(writer TextWriterPtr) c.Int - -/* - * Attributes conveniency functions - */ -//go:linkname TextWriterWriteFormatAttribute C.xmlTextWriterWriteFormatAttribute -func TextWriterWriteFormatAttribute(writer TextWriterPtr, name *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatAttribute C.xmlTextWriterWriteVFormatAttribute -func TextWriterWriteVFormatAttribute(writer TextWriterPtr, name *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteAttribute C.xmlTextWriterWriteAttribute -func TextWriterWriteAttribute(writer TextWriterPtr, name *Char, content *Char) c.Int - -//go:linkname TextWriterWriteFormatAttributeNS C.xmlTextWriterWriteFormatAttributeNS -func TextWriterWriteFormatAttributeNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatAttributeNS C.xmlTextWriterWriteVFormatAttributeNS -func TextWriterWriteVFormatAttributeNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteAttributeNS C.xmlTextWriterWriteAttributeNS -func TextWriterWriteAttributeNS(writer TextWriterPtr, prefix *Char, name *Char, namespaceURI *Char, content *Char) c.Int - -/* - * PI's - */ -//go:linkname TextWriterStartPI C.xmlTextWriterStartPI -func TextWriterStartPI(writer TextWriterPtr, target *Char) c.Int - -//go:linkname TextWriterEndPI C.xmlTextWriterEndPI -func TextWriterEndPI(writer TextWriterPtr) c.Int - -/* - * PI conveniency functions - */ -//go:linkname TextWriterWriteFormatPI C.xmlTextWriterWriteFormatPI -func TextWriterWriteFormatPI(writer TextWriterPtr, target *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatPI C.xmlTextWriterWriteVFormatPI -func TextWriterWriteVFormatPI(writer TextWriterPtr, target *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWritePI C.xmlTextWriterWritePI -func TextWriterWritePI(writer TextWriterPtr, target *Char, content *Char) c.Int - -/* - * CDATA - */ -//go:linkname TextWriterStartCDATA C.xmlTextWriterStartCDATA -func TextWriterStartCDATA(writer TextWriterPtr) c.Int - -//go:linkname TextWriterEndCDATA C.xmlTextWriterEndCDATA -func TextWriterEndCDATA(writer TextWriterPtr) c.Int - -/* - * CDATA conveniency functions - */ -//go:linkname TextWriterWriteFormatCDATA C.xmlTextWriterWriteFormatCDATA -func TextWriterWriteFormatCDATA(writer TextWriterPtr, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatCDATA C.xmlTextWriterWriteVFormatCDATA -func TextWriterWriteVFormatCDATA(writer TextWriterPtr, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteCDATA C.xmlTextWriterWriteCDATA -func TextWriterWriteCDATA(writer TextWriterPtr, content *Char) c.Int - -/* - * DTD - */ -//go:linkname TextWriterStartDTD C.xmlTextWriterStartDTD -func TextWriterStartDTD(writer TextWriterPtr, name *Char, pubid *Char, sysid *Char) c.Int - -//go:linkname TextWriterEndDTD C.xmlTextWriterEndDTD -func TextWriterEndDTD(writer TextWriterPtr) c.Int - -/* - * DTD conveniency functions - */ -//go:linkname TextWriterWriteFormatDTD C.xmlTextWriterWriteFormatDTD -func TextWriterWriteFormatDTD(writer TextWriterPtr, name *Char, pubid *Char, sysid *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatDTD C.xmlTextWriterWriteVFormatDTD -func TextWriterWriteVFormatDTD(writer TextWriterPtr, name *Char, pubid *Char, sysid *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteDTD C.xmlTextWriterWriteDTD -func TextWriterWriteDTD(writer TextWriterPtr, name *Char, pubid *Char, sysid *Char, subset *Char) c.Int - -/* - * DTD element definition - */ -//go:linkname TextWriterStartDTDElement C.xmlTextWriterStartDTDElement -func TextWriterStartDTDElement(writer TextWriterPtr, name *Char) c.Int - -//go:linkname TextWriterEndDTDElement C.xmlTextWriterEndDTDElement -func TextWriterEndDTDElement(writer TextWriterPtr) c.Int - -/* - * DTD element definition conveniency functions - */ -//go:linkname TextWriterWriteFormatDTDElement C.xmlTextWriterWriteFormatDTDElement -func TextWriterWriteFormatDTDElement(writer TextWriterPtr, name *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatDTDElement C.xmlTextWriterWriteVFormatDTDElement -func TextWriterWriteVFormatDTDElement(writer TextWriterPtr, name *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteDTDElement C.xmlTextWriterWriteDTDElement -func TextWriterWriteDTDElement(writer TextWriterPtr, name *Char, content *Char) c.Int - -/* - * DTD attribute list definition - */ -//go:linkname TextWriterStartDTDAttlist C.xmlTextWriterStartDTDAttlist -func TextWriterStartDTDAttlist(writer TextWriterPtr, name *Char) c.Int - -//go:linkname TextWriterEndDTDAttlist C.xmlTextWriterEndDTDAttlist -func TextWriterEndDTDAttlist(writer TextWriterPtr) c.Int - -/* - * DTD attribute list definition conveniency functions - */ -//go:linkname TextWriterWriteFormatDTDAttlist C.xmlTextWriterWriteFormatDTDAttlist -func TextWriterWriteFormatDTDAttlist(writer TextWriterPtr, name *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatDTDAttlist C.xmlTextWriterWriteVFormatDTDAttlist -func TextWriterWriteVFormatDTDAttlist(writer TextWriterPtr, name *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteDTDAttlist C.xmlTextWriterWriteDTDAttlist -func TextWriterWriteDTDAttlist(writer TextWriterPtr, name *Char, content *Char) c.Int - -/* - * DTD entity definition - */ -//go:linkname TextWriterStartDTDEntity C.xmlTextWriterStartDTDEntity -func TextWriterStartDTDEntity(writer TextWriterPtr, pe c.Int, name *Char) c.Int - -//go:linkname TextWriterEndDTDEntity C.xmlTextWriterEndDTDEntity -func TextWriterEndDTDEntity(writer TextWriterPtr) c.Int - -/* - * DTD entity definition conveniency functions - */ -//go:linkname TextWriterWriteFormatDTDInternalEntity C.xmlTextWriterWriteFormatDTDInternalEntity -func TextWriterWriteFormatDTDInternalEntity(writer TextWriterPtr, pe c.Int, name *Char, format *c.Char, __llgo_va_list ...interface{}) c.Int - -//go:linkname TextWriterWriteVFormatDTDInternalEntity C.xmlTextWriterWriteVFormatDTDInternalEntity -func TextWriterWriteVFormatDTDInternalEntity(writer TextWriterPtr, pe c.Int, name *Char, format *c.Char, argptr c.VaList) c.Int - -//go:linkname TextWriterWriteDTDInternalEntity C.xmlTextWriterWriteDTDInternalEntity -func TextWriterWriteDTDInternalEntity(writer TextWriterPtr, pe c.Int, name *Char, content *Char) c.Int - -//go:linkname TextWriterWriteDTDExternalEntity C.xmlTextWriterWriteDTDExternalEntity -func TextWriterWriteDTDExternalEntity(writer TextWriterPtr, pe c.Int, name *Char, pubid *Char, sysid *Char, ndataid *Char) c.Int - -//go:linkname TextWriterWriteDTDExternalEntityContents C.xmlTextWriterWriteDTDExternalEntityContents -func TextWriterWriteDTDExternalEntityContents(writer TextWriterPtr, pubid *Char, sysid *Char, ndataid *Char) c.Int - -//go:linkname TextWriterWriteDTDEntity C.xmlTextWriterWriteDTDEntity -func TextWriterWriteDTDEntity(writer TextWriterPtr, pe c.Int, name *Char, pubid *Char, sysid *Char, ndataid *Char, content *Char) c.Int - -/* - * DTD notation definition - */ -//go:linkname TextWriterWriteDTDNotation C.xmlTextWriterWriteDTDNotation -func TextWriterWriteDTDNotation(writer TextWriterPtr, name *Char, pubid *Char, sysid *Char) c.Int - -/* - * Indentation - */ -//go:linkname TextWriterSetIndent C.xmlTextWriterSetIndent -func TextWriterSetIndent(writer TextWriterPtr, indent c.Int) c.Int - -//go:linkname TextWriterSetIndentString C.xmlTextWriterSetIndentString -func TextWriterSetIndentString(writer TextWriterPtr, str *Char) c.Int - -//go:linkname TextWriterSetQuoteChar C.xmlTextWriterSetQuoteChar -func TextWriterSetQuoteChar(writer TextWriterPtr, quotechar Char) c.Int - -/* - * misc - */ -//go:linkname TextWriterFlush C.xmlTextWriterFlush -func TextWriterFlush(writer TextWriterPtr) c.Int - -//go:linkname TextWriterClose C.xmlTextWriterClose -func TextWriterClose(writer TextWriterPtr) c.Int diff --git a/libxml2/xpath.go b/libxml2/xpath.go deleted file mode 100644 index 37430ff2..00000000 --- a/libxml2/xpath.go +++ /dev/null @@ -1,335 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const XPATH_POINT = 5 -const XPATH_RANGE = 6 -const XPATH_LOCATIONSET = 7 - -type X_xmlXPathContext struct { - Doc DocPtr - Node NodePtr - NbVariablesUnused c.Int - MaxVariablesUnused c.Int - VarHash HashTablePtr - NbTypes c.Int - MaxTypes c.Int - Types XPathTypePtr - NbFuncsUnused c.Int - MaxFuncsUnused c.Int - FuncHash HashTablePtr - NbAxis c.Int - MaxAxis c.Int - Axis XPathAxisPtr - Namespaces *NsPtr - NsNr c.Int - User c.Pointer - ContextSize c.Int - ProximityPosition c.Int - Xptr c.Int - Here NodePtr - Origin NodePtr - NsHash HashTablePtr - VarLookupFunc XPathVariableLookupFunc - VarLookupData c.Pointer - Extra c.Pointer - Function *Char - FunctionURI *Char - FuncLookupFunc XPathFuncLookupFunc - FuncLookupData c.Pointer - TmpNsList *NsPtr - TmpNsNr c.Int - UserData c.Pointer - Error StructuredErrorFunc - LastError Error - DebugNode NodePtr - Dict DictPtr - Flags c.Int - Cache c.Pointer - OpLimit c.Ulong - OpCount c.Ulong - Depth c.Int -} -type XPathContext X_xmlXPathContext -type XPathContextPtr *XPathContext - -type X_xmlXPathParserContext struct { - Cur *Char - Base *Char - Error c.Int - Context XPathContextPtr - Value XPathObjectPtr - ValueNr c.Int - ValueMax c.Int - ValueTab *XPathObjectPtr - Comp XPathCompExprPtr - Xptr c.Int - Ancestor NodePtr - ValueFrame c.Int -} -type XPathParserContext X_xmlXPathParserContext -type XPathParserContextPtr *XPathParserContext -type XPathError c.Int - -const ( - XPATH_EXPRESSION_OK__1 XPathError = 0 - XPATH_NUMBER_ERROR__1 XPathError = 1 - XPATH_UNFINISHED_LITERAL_ERROR__1 XPathError = 2 - XPATH_START_LITERAL_ERROR__1 XPathError = 3 - XPATH_VARIABLE_REF_ERROR__1 XPathError = 4 - XPATH_UNDEF_VARIABLE_ERROR__1 XPathError = 5 - XPATH_INVALID_PREDICATE_ERROR__1 XPathError = 6 - XPATH_EXPR_ERROR__1 XPathError = 7 - XPATH_UNCLOSED_ERROR__1 XPathError = 8 - XPATH_UNKNOWN_FUNC_ERROR__1 XPathError = 9 - XPATH_INVALID_OPERAND__1 XPathError = 10 - XPATH_INVALID_TYPE__1 XPathError = 11 - XPATH_INVALID_ARITY__1 XPathError = 12 - XPATH_INVALID_CTXT_SIZE__1 XPathError = 13 - XPATH_INVALID_CTXT_POSITION__1 XPathError = 14 - XPATH_MEMORY_ERROR__1 XPathError = 15 - XPTR_SYNTAX_ERROR__1 XPathError = 16 - XPTR_RESOURCE_ERROR__1 XPathError = 17 - XPTR_SUB_RESOURCE_ERROR__1 XPathError = 18 - XPATH_UNDEF_PREFIX_ERROR__1 XPathError = 19 - XPATH_ENCODING_ERROR__1 XPathError = 20 - XPATH_INVALID_CHAR_ERROR__1 XPathError = 21 - XPATH_INVALID_CTXT XPathError = 22 - XPATH_STACK_ERROR XPathError = 23 - XPATH_FORBID_VARIABLE_ERROR XPathError = 24 - XPATH_OP_LIMIT_EXCEEDED XPathError = 25 - XPATH_RECURSION_LIMIT_EXCEEDED XPathError = 26 -) - -type X_xmlNodeSet struct { - NodeNr c.Int - NodeMax c.Int - NodeTab *NodePtr -} -type NodeSet X_xmlNodeSet -type NodeSetPtr *NodeSet -type XPathObjectType c.Int - -const ( - XPATH_UNDEFINED XPathObjectType = 0 - XPATH_NODESET XPathObjectType = 1 - XPATH_BOOLEAN XPathObjectType = 2 - XPATH_NUMBER XPathObjectType = 3 - XPATH_STRING XPathObjectType = 4 - XPATH_USERS XPathObjectType = 8 - XPATH_XSLT_TREE XPathObjectType = 9 -) - -type X_xmlXPathObject struct { - Type XPathObjectType - Nodesetval NodeSetPtr - Boolval c.Int - Floatval c.Double - Stringval *Char - User c.Pointer - Index c.Int - User2 c.Pointer - Index2 c.Int -} -type XPathObject X_xmlXPathObject -type XPathObjectPtr *XPathObject - -// llgo:type C -type XPathConvertFunc func(XPathObjectPtr, c.Int) c.Int - -type X_xmlXPathType struct { - Name *Char - Func XPathConvertFunc -} -type XPathType X_xmlXPathType -type XPathTypePtr *XPathType - -type X_xmlXPathVariable struct { - Name *Char - Value XPathObjectPtr -} -type XPathVariable X_xmlXPathVariable -type XPathVariablePtr *XPathVariable - -// llgo:type C -type XPathEvalFunc func(XPathParserContextPtr, c.Int) - -type X_xmlXPathFunct struct { - Name *Char - Func XPathEvalFunc -} -type XPathFunct X_xmlXPathFunct -type XPathFuncPtr *XPathFunct - -// llgo:type C -type XPathAxisFunc func(XPathParserContextPtr, XPathObjectPtr) XPathObjectPtr - -type X_xmlXPathAxis struct { - Name *Char - Func XPathAxisFunc -} -type XPathAxis X_xmlXPathAxis -type XPathAxisPtr *XPathAxis - -// llgo:type C -type XPathFunction func(XPathParserContextPtr, c.Int) - -// llgo:type C -type XPathVariableLookupFunc func(c.Pointer, *Char, *Char) XPathObjectPtr - -// llgo:type C -type XPathFuncLookupFunc func(c.Pointer, *Char, *Char) XPathFunction - -type X_xmlXPathCompExpr struct { - Unused [8]uint8 -} -type XPathCompExpr X_xmlXPathCompExpr -type XPathCompExprPtr *XPathCompExpr - -//go:linkname XPathFreeObject C.xmlXPathFreeObject -func XPathFreeObject(obj XPathObjectPtr) - -//go:linkname XPathNodeSetCreate C.xmlXPathNodeSetCreate -func XPathNodeSetCreate(val NodePtr) NodeSetPtr - -//go:linkname XPathFreeNodeSetList C.xmlXPathFreeNodeSetList -func XPathFreeNodeSetList(obj XPathObjectPtr) - -//go:linkname XPathFreeNodeSet C.xmlXPathFreeNodeSet -func XPathFreeNodeSet(obj NodeSetPtr) - -//go:linkname XPathObjectCopy C.xmlXPathObjectCopy -func XPathObjectCopy(val XPathObjectPtr) XPathObjectPtr - -//go:linkname XPathCmpNodes C.xmlXPathCmpNodes -func XPathCmpNodes(node1 NodePtr, node2 NodePtr) c.Int - -/** - * Conversion functions to basic types. - */ -//go:linkname XPathCastNumberToBoolean C.xmlXPathCastNumberToBoolean -func XPathCastNumberToBoolean(val c.Double) c.Int - -// llgo:link (*Char).XPathCastStringToBoolean C.xmlXPathCastStringToBoolean -func (recv_ *Char) XPathCastStringToBoolean() c.Int { - return 0 -} - -//go:linkname XPathCastNodeSetToBoolean C.xmlXPathCastNodeSetToBoolean -func XPathCastNodeSetToBoolean(ns NodeSetPtr) c.Int - -//go:linkname XPathCastToBoolean C.xmlXPathCastToBoolean -func XPathCastToBoolean(val XPathObjectPtr) c.Int - -//go:linkname XPathCastBooleanToNumber C.xmlXPathCastBooleanToNumber -func XPathCastBooleanToNumber(val c.Int) c.Double - -// llgo:link (*Char).XPathCastStringToNumber C.xmlXPathCastStringToNumber -func (recv_ *Char) XPathCastStringToNumber() c.Double { - return 0 -} - -//go:linkname XPathCastNodeToNumber C.xmlXPathCastNodeToNumber -func XPathCastNodeToNumber(node NodePtr) c.Double - -//go:linkname XPathCastNodeSetToNumber C.xmlXPathCastNodeSetToNumber -func XPathCastNodeSetToNumber(ns NodeSetPtr) c.Double - -//go:linkname XPathCastToNumber C.xmlXPathCastToNumber -func XPathCastToNumber(val XPathObjectPtr) c.Double - -//go:linkname XPathCastBooleanToString C.xmlXPathCastBooleanToString -func XPathCastBooleanToString(val c.Int) *Char - -//go:linkname XPathCastNumberToString C.xmlXPathCastNumberToString -func XPathCastNumberToString(val c.Double) *Char - -//go:linkname XPathCastNodeToString C.xmlXPathCastNodeToString -func XPathCastNodeToString(node NodePtr) *Char - -//go:linkname XPathCastNodeSetToString C.xmlXPathCastNodeSetToString -func XPathCastNodeSetToString(ns NodeSetPtr) *Char - -//go:linkname XPathCastToString C.xmlXPathCastToString -func XPathCastToString(val XPathObjectPtr) *Char - -//go:linkname XPathConvertBoolean C.xmlXPathConvertBoolean -func XPathConvertBoolean(val XPathObjectPtr) XPathObjectPtr - -//go:linkname XPathConvertNumber C.xmlXPathConvertNumber -func XPathConvertNumber(val XPathObjectPtr) XPathObjectPtr - -//go:linkname XPathConvertString C.xmlXPathConvertString -func XPathConvertString(val XPathObjectPtr) XPathObjectPtr - -/** - * Context handling. - */ -//go:linkname XPathNewContext C.xmlXPathNewContext -func XPathNewContext(doc DocPtr) XPathContextPtr - -//go:linkname XPathFreeContext C.xmlXPathFreeContext -func XPathFreeContext(ctxt XPathContextPtr) - -//go:linkname XPathSetErrorHandler C.xmlXPathSetErrorHandler -func XPathSetErrorHandler(ctxt XPathContextPtr, handler StructuredErrorFunc, context c.Pointer) - -//go:linkname XPathContextSetCache C.xmlXPathContextSetCache -func XPathContextSetCache(ctxt XPathContextPtr, active c.Int, value c.Int, options c.Int) c.Int - -/** - * Evaluation functions. - */ -//go:linkname XPathOrderDocElems C.xmlXPathOrderDocElems -func XPathOrderDocElems(doc DocPtr) c.Long - -//go:linkname XPathSetContextNode C.xmlXPathSetContextNode -func XPathSetContextNode(node NodePtr, ctx XPathContextPtr) c.Int - -//go:linkname XPathNodeEval C.xmlXPathNodeEval -func XPathNodeEval(node NodePtr, str *Char, ctx XPathContextPtr) XPathObjectPtr - -// llgo:link (*Char).XPathEval C.xmlXPathEval -func (recv_ *Char) XPathEval(ctx XPathContextPtr) XPathObjectPtr { - return nil -} - -// llgo:link (*Char).XPathEvalExpression C.xmlXPathEvalExpression -func (recv_ *Char) XPathEvalExpression(ctxt XPathContextPtr) XPathObjectPtr { - return nil -} - -//go:linkname XPathEvalPredicate C.xmlXPathEvalPredicate -func XPathEvalPredicate(ctxt XPathContextPtr, res XPathObjectPtr) c.Int - -/** - * Separate compilation/evaluation entry points. - */ -// llgo:link (*Char).XPathCompile C.xmlXPathCompile -func (recv_ *Char) XPathCompile() XPathCompExprPtr { - return nil -} - -//go:linkname XPathCtxtCompile C.xmlXPathCtxtCompile -func XPathCtxtCompile(ctxt XPathContextPtr, str *Char) XPathCompExprPtr - -//go:linkname XPathCompiledEval C.xmlXPathCompiledEval -func XPathCompiledEval(comp XPathCompExprPtr, ctx XPathContextPtr) XPathObjectPtr - -//go:linkname XPathCompiledEvalToBoolean C.xmlXPathCompiledEvalToBoolean -func XPathCompiledEvalToBoolean(comp XPathCompExprPtr, ctxt XPathContextPtr) c.Int - -//go:linkname XPathFreeCompExpr C.xmlXPathFreeCompExpr -func XPathFreeCompExpr(comp XPathCompExprPtr) - -//go:linkname XPathInit C.xmlXPathInit -func XPathInit() - -//go:linkname XPathIsNaN C.xmlXPathIsNaN -func XPathIsNaN(val c.Double) c.Int - -//go:linkname XPathIsInf C.xmlXPathIsInf -func XPathIsInf(val c.Double) c.Int diff --git a/libxml2/xpathInternals.go b/libxml2/xpathInternals.go deleted file mode 100644 index 618a9850..00000000 --- a/libxml2/xpathInternals.go +++ /dev/null @@ -1,398 +0,0 @@ -package libxml2 - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -//go:linkname XPathPopBoolean C.xmlXPathPopBoolean -func XPathPopBoolean(ctxt XPathParserContextPtr) c.Int - -//go:linkname XPathPopNumber C.xmlXPathPopNumber -func XPathPopNumber(ctxt XPathParserContextPtr) c.Double - -//go:linkname XPathPopString C.xmlXPathPopString -func XPathPopString(ctxt XPathParserContextPtr) *Char - -//go:linkname XPathPopNodeSet C.xmlXPathPopNodeSet -func XPathPopNodeSet(ctxt XPathParserContextPtr) NodeSetPtr - -//go:linkname XPathPopExternal C.xmlXPathPopExternal -func XPathPopExternal(ctxt XPathParserContextPtr) c.Pointer - -/* - * Variable Lookup forwarding. - */ -//go:linkname XPathRegisterVariableLookup C.xmlXPathRegisterVariableLookup -func XPathRegisterVariableLookup(ctxt XPathContextPtr, f XPathVariableLookupFunc, data c.Pointer) - -/* - * Function Lookup forwarding. - */ -//go:linkname XPathRegisterFuncLookup C.xmlXPathRegisterFuncLookup -func XPathRegisterFuncLookup(ctxt XPathContextPtr, f XPathFuncLookupFunc, funcCtxt c.Pointer) - -/* - * Error reporting. - */ -//go:linkname XPatherror C.xmlXPatherror -func XPatherror(ctxt XPathParserContextPtr, file *c.Char, line c.Int, no c.Int) - -//go:linkname XPathErr C.xmlXPathErr -func XPathErr(ctxt XPathParserContextPtr, error c.Int) - -//go:linkname XPathDebugDumpObject C.xmlXPathDebugDumpObject -func XPathDebugDumpObject(output *c.FILE, cur XPathObjectPtr, depth c.Int) - -//go:linkname XPathDebugDumpCompExpr C.xmlXPathDebugDumpCompExpr -func XPathDebugDumpCompExpr(output *c.FILE, comp XPathCompExprPtr, depth c.Int) - -/** - * NodeSet handling. - */ -//go:linkname XPathNodeSetContains C.xmlXPathNodeSetContains -func XPathNodeSetContains(cur NodeSetPtr, val NodePtr) c.Int - -//go:linkname XPathDifference C.xmlXPathDifference -func XPathDifference(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathIntersection C.xmlXPathIntersection -func XPathIntersection(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathDistinctSorted C.xmlXPathDistinctSorted -func XPathDistinctSorted(nodes NodeSetPtr) NodeSetPtr - -//go:linkname XPathDistinct C.xmlXPathDistinct -func XPathDistinct(nodes NodeSetPtr) NodeSetPtr - -//go:linkname XPathHasSameNodes C.xmlXPathHasSameNodes -func XPathHasSameNodes(nodes1 NodeSetPtr, nodes2 NodeSetPtr) c.Int - -//go:linkname XPathNodeLeadingSorted C.xmlXPathNodeLeadingSorted -func XPathNodeLeadingSorted(nodes NodeSetPtr, node NodePtr) NodeSetPtr - -//go:linkname XPathLeadingSorted C.xmlXPathLeadingSorted -func XPathLeadingSorted(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathNodeLeading C.xmlXPathNodeLeading -func XPathNodeLeading(nodes NodeSetPtr, node NodePtr) NodeSetPtr - -//go:linkname XPathLeading C.xmlXPathLeading -func XPathLeading(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathNodeTrailingSorted C.xmlXPathNodeTrailingSorted -func XPathNodeTrailingSorted(nodes NodeSetPtr, node NodePtr) NodeSetPtr - -//go:linkname XPathTrailingSorted C.xmlXPathTrailingSorted -func XPathTrailingSorted(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathNodeTrailing C.xmlXPathNodeTrailing -func XPathNodeTrailing(nodes NodeSetPtr, node NodePtr) NodeSetPtr - -//go:linkname XPathTrailing C.xmlXPathTrailing -func XPathTrailing(nodes1 NodeSetPtr, nodes2 NodeSetPtr) NodeSetPtr - -/** - * Extending a context. - */ -//go:linkname XPathRegisterNs C.xmlXPathRegisterNs -func XPathRegisterNs(ctxt XPathContextPtr, prefix *Char, ns_uri *Char) c.Int - -//go:linkname XPathNsLookup C.xmlXPathNsLookup -func XPathNsLookup(ctxt XPathContextPtr, prefix *Char) *Char - -//go:linkname XPathRegisteredNsCleanup C.xmlXPathRegisteredNsCleanup -func XPathRegisteredNsCleanup(ctxt XPathContextPtr) - -//go:linkname XPathRegisterFunc C.xmlXPathRegisterFunc -func XPathRegisterFunc(ctxt XPathContextPtr, name *Char, f XPathFunction) c.Int - -//go:linkname XPathRegisterFuncNS C.xmlXPathRegisterFuncNS -func XPathRegisterFuncNS(ctxt XPathContextPtr, name *Char, ns_uri *Char, f XPathFunction) c.Int - -//go:linkname XPathRegisterVariable C.xmlXPathRegisterVariable -func XPathRegisterVariable(ctxt XPathContextPtr, name *Char, value XPathObjectPtr) c.Int - -//go:linkname XPathRegisterVariableNS C.xmlXPathRegisterVariableNS -func XPathRegisterVariableNS(ctxt XPathContextPtr, name *Char, ns_uri *Char, value XPathObjectPtr) c.Int - -//go:linkname XPathFunctionLookup C.xmlXPathFunctionLookup -func XPathFunctionLookup(ctxt XPathContextPtr, name *Char) XPathFunction - -//go:linkname XPathFunctionLookupNS C.xmlXPathFunctionLookupNS -func XPathFunctionLookupNS(ctxt XPathContextPtr, name *Char, ns_uri *Char) XPathFunction - -//go:linkname XPathRegisteredFuncsCleanup C.xmlXPathRegisteredFuncsCleanup -func XPathRegisteredFuncsCleanup(ctxt XPathContextPtr) - -//go:linkname XPathVariableLookup C.xmlXPathVariableLookup -func XPathVariableLookup(ctxt XPathContextPtr, name *Char) XPathObjectPtr - -//go:linkname XPathVariableLookupNS C.xmlXPathVariableLookupNS -func XPathVariableLookupNS(ctxt XPathContextPtr, name *Char, ns_uri *Char) XPathObjectPtr - -//go:linkname XPathRegisteredVariablesCleanup C.xmlXPathRegisteredVariablesCleanup -func XPathRegisteredVariablesCleanup(ctxt XPathContextPtr) - -/** - * Utilities to extend XPath. - */ -// llgo:link (*Char).XPathNewParserContext C.xmlXPathNewParserContext -func (recv_ *Char) XPathNewParserContext(ctxt XPathContextPtr) XPathParserContextPtr { - return nil -} - -//go:linkname XPathFreeParserContext C.xmlXPathFreeParserContext -func XPathFreeParserContext(ctxt XPathParserContextPtr) - -/* TODO: remap to xmlXPathValuePop and Push. */ -//go:linkname ValuePop C.valuePop -func ValuePop(ctxt XPathParserContextPtr) XPathObjectPtr - -//go:linkname ValuePush C.valuePush -func ValuePush(ctxt XPathParserContextPtr, value XPathObjectPtr) c.Int - -// llgo:link (*Char).XPathNewString C.xmlXPathNewString -func (recv_ *Char) XPathNewString() XPathObjectPtr { - return nil -} - -//go:linkname XPathNewCString C.xmlXPathNewCString -func XPathNewCString(val *c.Char) XPathObjectPtr - -// llgo:link (*Char).XPathWrapString C.xmlXPathWrapString -func (recv_ *Char) XPathWrapString() XPathObjectPtr { - return nil -} - -//go:linkname XPathWrapCString C.xmlXPathWrapCString -func XPathWrapCString(val *c.Char) XPathObjectPtr - -//go:linkname XPathNewFloat C.xmlXPathNewFloat -func XPathNewFloat(val c.Double) XPathObjectPtr - -//go:linkname XPathNewBoolean C.xmlXPathNewBoolean -func XPathNewBoolean(val c.Int) XPathObjectPtr - -//go:linkname XPathNewNodeSet C.xmlXPathNewNodeSet -func XPathNewNodeSet(val NodePtr) XPathObjectPtr - -//go:linkname XPathNewValueTree C.xmlXPathNewValueTree -func XPathNewValueTree(val NodePtr) XPathObjectPtr - -//go:linkname XPathNodeSetAdd C.xmlXPathNodeSetAdd -func XPathNodeSetAdd(cur NodeSetPtr, val NodePtr) c.Int - -//go:linkname XPathNodeSetAddUnique C.xmlXPathNodeSetAddUnique -func XPathNodeSetAddUnique(cur NodeSetPtr, val NodePtr) c.Int - -//go:linkname XPathNodeSetAddNs C.xmlXPathNodeSetAddNs -func XPathNodeSetAddNs(cur NodeSetPtr, node NodePtr, ns NsPtr) c.Int - -//go:linkname XPathNodeSetSort C.xmlXPathNodeSetSort -func XPathNodeSetSort(set NodeSetPtr) - -//go:linkname XPathRoot C.xmlXPathRoot -func XPathRoot(ctxt XPathParserContextPtr) - -//go:linkname XPathEvalExpr C.xmlXPathEvalExpr -func XPathEvalExpr(ctxt XPathParserContextPtr) - -//go:linkname XPathParseName C.xmlXPathParseName -func XPathParseName(ctxt XPathParserContextPtr) *Char - -//go:linkname XPathParseNCName C.xmlXPathParseNCName -func XPathParseNCName(ctxt XPathParserContextPtr) *Char - -/* - * Existing functions. - */ -// llgo:link (*Char).XPathStringEvalNumber C.xmlXPathStringEvalNumber -func (recv_ *Char) XPathStringEvalNumber() c.Double { - return 0 -} - -//go:linkname XPathEvaluatePredicateResult C.xmlXPathEvaluatePredicateResult -func XPathEvaluatePredicateResult(ctxt XPathParserContextPtr, res XPathObjectPtr) c.Int - -//go:linkname XPathRegisterAllFunctions C.xmlXPathRegisterAllFunctions -func XPathRegisterAllFunctions(ctxt XPathContextPtr) - -//go:linkname XPathNodeSetMerge C.xmlXPathNodeSetMerge -func XPathNodeSetMerge(val1 NodeSetPtr, val2 NodeSetPtr) NodeSetPtr - -//go:linkname XPathNodeSetDel C.xmlXPathNodeSetDel -func XPathNodeSetDel(cur NodeSetPtr, val NodePtr) - -//go:linkname XPathNodeSetRemove C.xmlXPathNodeSetRemove -func XPathNodeSetRemove(cur NodeSetPtr, val c.Int) - -//go:linkname XPathNewNodeSetList C.xmlXPathNewNodeSetList -func XPathNewNodeSetList(val NodeSetPtr) XPathObjectPtr - -//go:linkname XPathWrapNodeSet C.xmlXPathWrapNodeSet -func XPathWrapNodeSet(val NodeSetPtr) XPathObjectPtr - -//go:linkname XPathWrapExternal C.xmlXPathWrapExternal -func XPathWrapExternal(val c.Pointer) XPathObjectPtr - -//go:linkname XPathEqualValues C.xmlXPathEqualValues -func XPathEqualValues(ctxt XPathParserContextPtr) c.Int - -//go:linkname XPathNotEqualValues C.xmlXPathNotEqualValues -func XPathNotEqualValues(ctxt XPathParserContextPtr) c.Int - -//go:linkname XPathCompareValues C.xmlXPathCompareValues -func XPathCompareValues(ctxt XPathParserContextPtr, inf c.Int, strict c.Int) c.Int - -//go:linkname XPathValueFlipSign C.xmlXPathValueFlipSign -func XPathValueFlipSign(ctxt XPathParserContextPtr) - -//go:linkname XPathAddValues C.xmlXPathAddValues -func XPathAddValues(ctxt XPathParserContextPtr) - -//go:linkname XPathSubValues C.xmlXPathSubValues -func XPathSubValues(ctxt XPathParserContextPtr) - -//go:linkname XPathMultValues C.xmlXPathMultValues -func XPathMultValues(ctxt XPathParserContextPtr) - -//go:linkname XPathDivValues C.xmlXPathDivValues -func XPathDivValues(ctxt XPathParserContextPtr) - -//go:linkname XPathModValues C.xmlXPathModValues -func XPathModValues(ctxt XPathParserContextPtr) - -// llgo:link (*Char).XPathIsNodeType C.xmlXPathIsNodeType -func (recv_ *Char) XPathIsNodeType() c.Int { - return 0 -} - -/* - * Some of the axis navigation routines. - */ -//go:linkname XPathNextSelf C.xmlXPathNextSelf -func XPathNextSelf(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextChild C.xmlXPathNextChild -func XPathNextChild(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextDescendant C.xmlXPathNextDescendant -func XPathNextDescendant(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextDescendantOrSelf C.xmlXPathNextDescendantOrSelf -func XPathNextDescendantOrSelf(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextParent C.xmlXPathNextParent -func XPathNextParent(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextAncestorOrSelf C.xmlXPathNextAncestorOrSelf -func XPathNextAncestorOrSelf(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextFollowingSibling C.xmlXPathNextFollowingSibling -func XPathNextFollowingSibling(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextFollowing C.xmlXPathNextFollowing -func XPathNextFollowing(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextNamespace C.xmlXPathNextNamespace -func XPathNextNamespace(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextAttribute C.xmlXPathNextAttribute -func XPathNextAttribute(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextPreceding C.xmlXPathNextPreceding -func XPathNextPreceding(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextAncestor C.xmlXPathNextAncestor -func XPathNextAncestor(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -//go:linkname XPathNextPrecedingSibling C.xmlXPathNextPrecedingSibling -func XPathNextPrecedingSibling(ctxt XPathParserContextPtr, cur NodePtr) NodePtr - -/* - * The official core of XPath functions. - */ -//go:linkname XPathLastFunction C.xmlXPathLastFunction -func XPathLastFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathPositionFunction C.xmlXPathPositionFunction -func XPathPositionFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathCountFunction C.xmlXPathCountFunction -func XPathCountFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathIdFunction C.xmlXPathIdFunction -func XPathIdFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathLocalNameFunction C.xmlXPathLocalNameFunction -func XPathLocalNameFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathNamespaceURIFunction C.xmlXPathNamespaceURIFunction -func XPathNamespaceURIFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathStringFunction C.xmlXPathStringFunction -func XPathStringFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathStringLengthFunction C.xmlXPathStringLengthFunction -func XPathStringLengthFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathConcatFunction C.xmlXPathConcatFunction -func XPathConcatFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathContainsFunction C.xmlXPathContainsFunction -func XPathContainsFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathStartsWithFunction C.xmlXPathStartsWithFunction -func XPathStartsWithFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathSubstringFunction C.xmlXPathSubstringFunction -func XPathSubstringFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathSubstringBeforeFunction C.xmlXPathSubstringBeforeFunction -func XPathSubstringBeforeFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathSubstringAfterFunction C.xmlXPathSubstringAfterFunction -func XPathSubstringAfterFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathNormalizeFunction C.xmlXPathNormalizeFunction -func XPathNormalizeFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathTranslateFunction C.xmlXPathTranslateFunction -func XPathTranslateFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathNotFunction C.xmlXPathNotFunction -func XPathNotFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathTrueFunction C.xmlXPathTrueFunction -func XPathTrueFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathFalseFunction C.xmlXPathFalseFunction -func XPathFalseFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathLangFunction C.xmlXPathLangFunction -func XPathLangFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathNumberFunction C.xmlXPathNumberFunction -func XPathNumberFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathSumFunction C.xmlXPathSumFunction -func XPathSumFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathFloorFunction C.xmlXPathFloorFunction -func XPathFloorFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathCeilingFunction C.xmlXPathCeilingFunction -func XPathCeilingFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathRoundFunction C.xmlXPathRoundFunction -func XPathRoundFunction(ctxt XPathParserContextPtr, nargs c.Int) - -//go:linkname XPathBooleanFunction C.xmlXPathBooleanFunction -func XPathBooleanFunction(ctxt XPathParserContextPtr, nargs c.Int) - -/** - * Really internal functions - */ -//go:linkname XPathNodeSetFreeNs C.xmlXPathNodeSetFreeNs -func XPathNodeSetFreeNs(ns NsPtr) diff --git a/libxml2/xpointer.go b/libxml2/xpointer.go deleted file mode 100644 index d58203e6..00000000 --- a/libxml2/xpointer.go +++ /dev/null @@ -1,14 +0,0 @@ -package libxml2 - -import _ "unsafe" - -/* - * Functions. - */ -//go:linkname XPtrNewContext C.xmlXPtrNewContext -func XPtrNewContext(doc DocPtr, here NodePtr, origin NodePtr) XPathContextPtr - -// llgo:link (*Char).XPtrEval C.xmlXPtrEval -func (recv_ *Char) XPtrEval(ctx XPathContextPtr) XPathObjectPtr { - return nil -} diff --git a/libxslt/_demo/withdeplibxml/demo.go b/libxslt/_demo/withdeplibxml/demo.go deleted file mode 100644 index fca0b3fb..00000000 --- a/libxslt/_demo/withdeplibxml/demo.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "fmt" - "os" - "unsafe" - - "github.com/goplus/llpkg/libxslt" - - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" -) - -func main() { - libxml2.InitParser() - - xml := - ` - - - Alice - 25 - - ` - xslt := ` - - - - -

个人信息

-

姓名:

-

年龄:

- - -
-
- ` - xmlDoc := libxml2.ReadMemory((*int8)(unsafe.Pointer(unsafe.StringData(xml))), c.Int(len(xml)), nil, nil, 0) - xsltDoc := libxml2.ReadMemory((*int8)(unsafe.Pointer(unsafe.StringData(xslt))), c.Int(len(xslt)), nil, nil, 0) - - if xmlDoc == nil || xsltDoc == nil { - panic("cant read xml or xslt") - } - - stylesheet := libxslt.ParseStylesheetDoc(xsltDoc) - if stylesheet == nil { - panic("cant parse xslt") - } - result := libxslt.ApplyStylesheet(stylesheet, xmlDoc, (**int8)(unsafe.Pointer(uintptr(0)))) - if result == nil { - panic("cant apply xslt") - } - - libxslt.SaveResultToFilename(c.Str("output.html"), result, stylesheet, 0) - - libxml2.FreeDoc(xmlDoc) - libxml2.FreeDoc(result) - libxslt.FreeStylesheet(stylesheet) - - libxslt.CleanupGlobals() - libxml2.CleanupParser() - - buf, err := os.ReadFile("./output.html") - if err != nil { - panic(err) - } - fmt.Println(string(buf)) -} diff --git a/libxslt/attributes.go b/libxslt/attributes.go deleted file mode 100644 index 6e7b7441..00000000 --- a/libxslt/attributes.go +++ /dev/null @@ -1,18 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname ParseStylesheetAttributeSet C.xsltParseStylesheetAttributeSet -func ParseStylesheetAttributeSet(style StylesheetPtr, cur libxml2.NodePtr) - -//go:linkname FreeAttributeSetsHashes C.xsltFreeAttributeSetsHashes -func FreeAttributeSetsHashes(style StylesheetPtr) - -//go:linkname ApplyAttributeSet C.xsltApplyAttributeSet -func ApplyAttributeSet(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, attributes *libxml2.Char) - -//go:linkname ResolveStylesheetAttributeSet C.xsltResolveStylesheetAttributeSet -func ResolveStylesheetAttributeSet(style StylesheetPtr) diff --git a/libxslt/documents.go b/libxslt/documents.go deleted file mode 100644 index 0134212e..00000000 --- a/libxslt/documents.go +++ /dev/null @@ -1,42 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname NewDocument C.xsltNewDocument -func NewDocument(ctxt TransformContextPtr, doc libxml2.DocPtr) DocumentPtr - -//go:linkname LoadDocument C.xsltLoadDocument -func LoadDocument(ctxt TransformContextPtr, URI *libxml2.Char) DocumentPtr - -//go:linkname FindDocument C.xsltFindDocument -func FindDocument(ctxt TransformContextPtr, doc libxml2.DocPtr) DocumentPtr - -//go:linkname FreeDocuments C.xsltFreeDocuments -func FreeDocuments(ctxt TransformContextPtr) - -//go:linkname LoadStyleDocument C.xsltLoadStyleDocument -func LoadStyleDocument(style StylesheetPtr, URI *libxml2.Char) DocumentPtr - -//go:linkname NewStyleDocument C.xsltNewStyleDocument -func NewStyleDocument(style StylesheetPtr, doc libxml2.DocPtr) DocumentPtr - -//go:linkname FreeStyleDocuments C.xsltFreeStyleDocuments -func FreeStyleDocuments(style StylesheetPtr) - -type LoadType c.Int - -const ( - LOAD_START LoadType = 0 - LOAD_STYLESHEET LoadType = 1 - LOAD_DOCUMENT LoadType = 2 -) - -// llgo:type C -type DocLoaderFunc func(*libxml2.Char, libxml2.DictPtr, c.Int, c.Pointer, LoadType) libxml2.DocPtr - -//go:linkname SetLoaderFunc C.xsltSetLoaderFunc -func SetLoaderFunc(f DocLoaderFunc) diff --git a/libxslt/exslt.go b/libxslt/exslt.go deleted file mode 100644 index f830fa57..00000000 --- a/libxslt/exslt.go +++ /dev/null @@ -1,3 +0,0 @@ -package libxslt - -import _ "unsafe" diff --git a/libxslt/exsltconfig.go b/libxslt/exsltconfig.go deleted file mode 100644 index d5718dd3..00000000 --- a/libxslt/exsltconfig.go +++ /dev/null @@ -1,8 +0,0 @@ -package libxslt - -import _ "unsafe" - -const LIBEXSLT_DOTTED_VERSION = "0.8.23" -const LIBEXSLT_VERSION = 823 -const LIBEXSLT_VERSION_STRING = "823" -const LIBEXSLT_VERSION_EXTRA = "" diff --git a/libxslt/exsltexports.go b/libxslt/exsltexports.go deleted file mode 100644 index f830fa57..00000000 --- a/libxslt/exsltexports.go +++ /dev/null @@ -1,3 +0,0 @@ -package libxslt - -import _ "unsafe" diff --git a/libxslt/extensions.go b/libxslt/extensions.go deleted file mode 100644 index 2fc5d01e..00000000 --- a/libxslt/extensions.go +++ /dev/null @@ -1,148 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -/** - * xsltInitGlobals: - * - * Initialize the global variables for extensions - * - */ -//go:linkname InitGlobals C.xsltInitGlobals -func InitGlobals() - -// llgo:type C -type StyleExtInitFunction func(StylesheetPtr, *libxml2.Char) c.Pointer - -// llgo:type C -type StyleExtShutdownFunction func(StylesheetPtr, *libxml2.Char, c.Pointer) - -// llgo:type C -type ExtInitFunction func(TransformContextPtr, *libxml2.Char) c.Pointer - -// llgo:type C -type ExtShutdownFunction func(TransformContextPtr, *libxml2.Char, c.Pointer) - -//go:linkname RegisterExtModule C.xsltRegisterExtModule -func RegisterExtModule(URI *libxml2.Char, initFunc ExtInitFunction, shutdownFunc ExtShutdownFunction) c.Int - -//go:linkname RegisterExtModuleFull C.xsltRegisterExtModuleFull -func RegisterExtModuleFull(URI *libxml2.Char, initFunc ExtInitFunction, shutdownFunc ExtShutdownFunction, styleInitFunc StyleExtInitFunction, styleShutdownFunc StyleExtShutdownFunction) c.Int - -//go:linkname UnregisterExtModule C.xsltUnregisterExtModule -func UnregisterExtModule(URI *libxml2.Char) c.Int - -//go:linkname GetExtData C.xsltGetExtData -func GetExtData(ctxt TransformContextPtr, URI *libxml2.Char) c.Pointer - -//go:linkname StyleGetExtData C.xsltStyleGetExtData -func StyleGetExtData(style StylesheetPtr, URI *libxml2.Char) c.Pointer - -//go:linkname ShutdownCtxtExts C.xsltShutdownCtxtExts -func ShutdownCtxtExts(ctxt TransformContextPtr) - -//go:linkname ShutdownExts C.xsltShutdownExts -func ShutdownExts(style StylesheetPtr) - -//go:linkname XPathGetTransformContext C.xsltXPathGetTransformContext -func XPathGetTransformContext(ctxt libxml2.XPathParserContextPtr) TransformContextPtr - -/* - * extension functions - */ -//go:linkname RegisterExtModuleFunction C.xsltRegisterExtModuleFunction -func RegisterExtModuleFunction(name *libxml2.Char, URI *libxml2.Char, function libxml2.XPathFunction) c.Int - -//go:linkname ExtModuleFunctionLookup C.xsltExtModuleFunctionLookup -func ExtModuleFunctionLookup(name *libxml2.Char, URI *libxml2.Char) libxml2.XPathFunction - -//go:linkname UnregisterExtModuleFunction C.xsltUnregisterExtModuleFunction -func UnregisterExtModuleFunction(name *libxml2.Char, URI *libxml2.Char) c.Int - -// llgo:type C -type PreComputeFunction func(StylesheetPtr, libxml2.NodePtr, TransformFunction) ElemPreCompPtr - -//go:linkname NewElemPreComp C.xsltNewElemPreComp -func NewElemPreComp(style StylesheetPtr, inst libxml2.NodePtr, function TransformFunction) ElemPreCompPtr - -//go:linkname InitElemPreComp C.xsltInitElemPreComp -func InitElemPreComp(comp ElemPreCompPtr, style StylesheetPtr, inst libxml2.NodePtr, function TransformFunction, freeFunc ElemPreCompDeallocator) - -//go:linkname RegisterExtModuleElement C.xsltRegisterExtModuleElement -func RegisterExtModuleElement(name *libxml2.Char, URI *libxml2.Char, precomp PreComputeFunction, transform TransformFunction) c.Int - -//go:linkname ExtElementLookup C.xsltExtElementLookup -func ExtElementLookup(ctxt TransformContextPtr, name *libxml2.Char, URI *libxml2.Char) TransformFunction - -//go:linkname ExtModuleElementLookup C.xsltExtModuleElementLookup -func ExtModuleElementLookup(name *libxml2.Char, URI *libxml2.Char) TransformFunction - -//go:linkname ExtModuleElementPreComputeLookup C.xsltExtModuleElementPreComputeLookup -func ExtModuleElementPreComputeLookup(name *libxml2.Char, URI *libxml2.Char) PreComputeFunction - -//go:linkname UnregisterExtModuleElement C.xsltUnregisterExtModuleElement -func UnregisterExtModuleElement(name *libxml2.Char, URI *libxml2.Char) c.Int - -// llgo:type C -type TopLevelFunction func(StylesheetPtr, libxml2.NodePtr) - -//go:linkname RegisterExtModuleTopLevel C.xsltRegisterExtModuleTopLevel -func RegisterExtModuleTopLevel(name *libxml2.Char, URI *libxml2.Char, function TopLevelFunction) c.Int - -//go:linkname ExtModuleTopLevelLookup C.xsltExtModuleTopLevelLookup -func ExtModuleTopLevelLookup(name *libxml2.Char, URI *libxml2.Char) TopLevelFunction - -//go:linkname UnregisterExtModuleTopLevel C.xsltUnregisterExtModuleTopLevel -func UnregisterExtModuleTopLevel(name *libxml2.Char, URI *libxml2.Char) c.Int - -/* These 2 functions are deprecated for use within modules. */ -//go:linkname RegisterExtFunction C.xsltRegisterExtFunction -func RegisterExtFunction(ctxt TransformContextPtr, name *libxml2.Char, URI *libxml2.Char, function libxml2.XPathFunction) c.Int - -//go:linkname RegisterExtElement C.xsltRegisterExtElement -func RegisterExtElement(ctxt TransformContextPtr, name *libxml2.Char, URI *libxml2.Char, function TransformFunction) c.Int - -/* - * Extension Prefix handling API. - * Those are used by the XSLT (pre)processor. - */ -//go:linkname RegisterExtPrefix C.xsltRegisterExtPrefix -func RegisterExtPrefix(style StylesheetPtr, prefix *libxml2.Char, URI *libxml2.Char) c.Int - -//go:linkname CheckExtPrefix C.xsltCheckExtPrefix -func CheckExtPrefix(style StylesheetPtr, URI *libxml2.Char) c.Int - -//go:linkname CheckExtURI C.xsltCheckExtURI -func CheckExtURI(style StylesheetPtr, URI *libxml2.Char) c.Int - -//go:linkname InitCtxtExts C.xsltInitCtxtExts -func InitCtxtExts(ctxt TransformContextPtr) c.Int - -//go:linkname FreeCtxtExts C.xsltFreeCtxtExts -func FreeCtxtExts(ctxt TransformContextPtr) - -//go:linkname FreeExts C.xsltFreeExts -func FreeExts(style StylesheetPtr) - -//go:linkname PreComputeExtModuleElement C.xsltPreComputeExtModuleElement -func PreComputeExtModuleElement(style StylesheetPtr, inst libxml2.NodePtr) ElemPreCompPtr - -/* - * Extension Infos access. - * Used by exslt initialisation - */ -//go:linkname GetExtInfo C.xsltGetExtInfo -func GetExtInfo(style StylesheetPtr, URI *libxml2.Char) libxml2.HashTablePtr - -/** - * Test of the extension module API - */ -//go:linkname RegisterTestModule C.xsltRegisterTestModule -func RegisterTestModule() - -//go:linkname DebugDumpExtensions C.xsltDebugDumpExtensions -func DebugDumpExtensions(output *c.FILE) diff --git a/libxslt/extra.go b/libxslt/extra.go deleted file mode 100644 index 8ee85ac3..00000000 --- a/libxslt/extra.go +++ /dev/null @@ -1,19 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname FunctionNodeSet C.xsltFunctionNodeSet -func FunctionNodeSet(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname Debug C.xsltDebug -func Debug(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname RegisterExtras C.xsltRegisterExtras -func RegisterExtras(ctxt TransformContextPtr) - -//go:linkname RegisterAllExtras C.xsltRegisterAllExtras -func RegisterAllExtras() diff --git a/libxslt/functions.go b/libxslt/functions.go deleted file mode 100644 index a3addd1a..00000000 --- a/libxslt/functions.go +++ /dev/null @@ -1,43 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname XPathFunctionLookup C.xsltXPathFunctionLookup -func XPathFunctionLookup(vctxt c.Pointer, name *libxml2.Char, ns_uri *libxml2.Char) libxml2.XPathFunction - -/* - * Interfaces for the functions implementations. - */ -//go:linkname DocumentFunction C.xsltDocumentFunction -func DocumentFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname KeyFunction C.xsltKeyFunction -func KeyFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname UnparsedEntityURIFunction C.xsltUnparsedEntityURIFunction -func UnparsedEntityURIFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname FormatNumberFunction C.xsltFormatNumberFunction -func FormatNumberFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname GenerateIdFunction C.xsltGenerateIdFunction -func GenerateIdFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname SystemPropertyFunction C.xsltSystemPropertyFunction -func SystemPropertyFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname ElementAvailableFunction C.xsltElementAvailableFunction -func ElementAvailableFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -//go:linkname FunctionAvailableFunction C.xsltFunctionAvailableFunction -func FunctionAvailableFunction(ctxt libxml2.XPathParserContextPtr, nargs c.Int) - -/* - * And the registration - */ -//go:linkname RegisterAllFunctions C.xsltRegisterAllFunctions -func RegisterAllFunctions(ctxt libxml2.XPathContextPtr) diff --git a/libxslt/go.mod b/libxslt/go.mod deleted file mode 100644 index fc1541ce..00000000 --- a/libxslt/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/goplus/llpkg/libxslt - -go 1.20 - -require ( - github.com/goplus/lib v0.2.0 - github.com/goplus/llpkg/libxml2 v1.0.3 -) diff --git a/libxslt/go.sum b/libxslt/go.sum deleted file mode 100644 index 812b4bd4..00000000 --- a/libxslt/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/goplus/lib v0.2.0 h1:AjqkN1XK5H23wZMMlpaUYAMCDAdSBQ2NMFrLtSh7W4g= -github.com/goplus/lib v0.2.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= -github.com/goplus/llpkg/libxml2 v1.0.3 h1:yl9wAONIH9OFXaCFcr7y0AGX9OgR5R4tCglnvhcT8bw= -github.com/goplus/llpkg/libxml2 v1.0.3/go.mod h1:5YXQ8OhzQeH+udVb1NPEryxH7hAiJ75p6+f5QBy7BpM= diff --git a/libxslt/imports.go b/libxslt/imports.go deleted file mode 100644 index 87552c21..00000000 --- a/libxslt/imports.go +++ /dev/null @@ -1,28 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -/* - * Module interfaces - */ -//go:linkname ParseStylesheetImport C.xsltParseStylesheetImport -func ParseStylesheetImport(style StylesheetPtr, cur libxml2.NodePtr) c.Int - -//go:linkname ParseStylesheetInclude C.xsltParseStylesheetInclude -func ParseStylesheetInclude(style StylesheetPtr, cur libxml2.NodePtr) c.Int - -//go:linkname NextImport C.xsltNextImport -func NextImport(style StylesheetPtr) StylesheetPtr - -//go:linkname NeedElemSpaceHandling C.xsltNeedElemSpaceHandling -func NeedElemSpaceHandling(ctxt TransformContextPtr) c.Int - -//go:linkname FindElemSpaceHandling C.xsltFindElemSpaceHandling -func FindElemSpaceHandling(ctxt TransformContextPtr, node libxml2.NodePtr) c.Int - -//go:linkname FindTemplate C.xsltFindTemplate -func FindTemplate(ctxt TransformContextPtr, name *libxml2.Char, nameURI *libxml2.Char) TemplatePtr diff --git a/libxslt/keys.go b/libxslt/keys.go deleted file mode 100644 index 9767652f..00000000 --- a/libxslt/keys.go +++ /dev/null @@ -1,22 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname AddKey C.xsltAddKey -func AddKey(style StylesheetPtr, name *libxml2.Char, nameURI *libxml2.Char, match *libxml2.Char, use *libxml2.Char, inst libxml2.NodePtr) c.Int - -//go:linkname GetKey C.xsltGetKey -func GetKey(ctxt TransformContextPtr, name *libxml2.Char, nameURI *libxml2.Char, value *libxml2.Char) libxml2.NodeSetPtr - -//go:linkname InitCtxtKeys C.xsltInitCtxtKeys -func InitCtxtKeys(ctxt TransformContextPtr, doc DocumentPtr) - -//go:linkname FreeKeys C.xsltFreeKeys -func FreeKeys(style StylesheetPtr) - -//go:linkname FreeDocumentKeys C.xsltFreeDocumentKeys -func FreeDocumentKeys(doc DocumentPtr) diff --git a/libxslt/libxslt_autogen_link.go b/libxslt/libxslt_autogen_link.go deleted file mode 100644 index ebca1c92..00000000 --- a/libxslt/libxslt_autogen_link.go +++ /dev/null @@ -1,9 +0,0 @@ -package libxslt - -import ( - _ "github.com/goplus/lib/c" - _ "github.com/goplus/lib/c/os" - _ "github.com/goplus/llpkg/libxml2" -) - -const LLGoPackage string = "link: $(pkg-config --libs libxslt);" diff --git a/libxslt/llcppg.cfg b/libxslt/llcppg.cfg deleted file mode 100644 index f77bc55b..00000000 --- a/libxslt/llcppg.cfg +++ /dev/null @@ -1,37 +0,0 @@ -{ - "name": "libxslt", - "cflags": "$(pkg-config --cflags libxslt)", - "libs": "$(pkg-config --libs libxslt)", - "include": [ - "libxslt/variables.h", - "libxslt/functions.h", - "libxslt/xsltutils.h", - "libxslt/templates.h", - "libxslt/pattern.h", - "libxslt/preproc.h", - "libxslt/extra.h", - "libxslt/documents.h", - "libxslt/imports.h", - "libxslt/keys.h", - "libxslt/transform.h", - "libxslt/attributes.h", - "libxslt/security.h", - "libxslt/extensions.h", - "libxslt/xsltInternals.h", - "libexslt/exslt.h", - "libxslt/numbersInternals.h", - "libxslt/namespaces.h", - "libxslt/xslt.h", - "libxslt/xsltlocale.h", - "libexslt/exsltexports.h", - "libxslt/xsltconfig.h", - "libxslt/xsltexports.h", - "libexslt/exsltconfig.h" - ], - "trimPrefixes": ["XSLT_","xslt"], - "cplusplus": false, - "deps": ["c/os","github.com/goplus/llpkg/libxml2@v1.0.3"], - "symMap":{ - "xsltSetCtxtLocaleHandlers":"-" - } -} diff --git a/libxslt/llcppg.pub b/libxslt/llcppg.pub deleted file mode 100644 index 4dbe1807..00000000 --- a/libxslt/llcppg.pub +++ /dev/null @@ -1,58 +0,0 @@ -xsltAddCallCallback AddCallCallback -xsltCompMatch CompMatch -xsltCompMatchPtr CompMatchPtr -xsltDebugStatusCodes DebugStatusCodes -xsltDebugTraceCodes DebugTraceCodes -xsltDecimalFormat DecimalFormat -xsltDecimalFormatPtr DecimalFormatPtr -xsltDocLoaderFunc DocLoaderFunc -xsltDocument Document -xsltDocumentPtr DocumentPtr -xsltDropCallCallback DropCallCallback -xsltElemPreComp ElemPreComp -xsltElemPreCompDeallocator ElemPreCompDeallocator -xsltElemPreCompPtr ElemPreCompPtr -xsltExtInitFunction ExtInitFunction -xsltExtShutdownFunction ExtShutdownFunction -xsltFormatNumberInfo FormatNumberInfo -xsltFormatNumberInfoPtr FormatNumberInfoPtr -xsltFreeLocaleFunc FreeLocaleFunc -xsltGenSortKeyFunc GenSortKeyFunc -xsltHandleDebuggerCallback HandleDebuggerCallback -xsltKeyDef KeyDef -xsltKeyDefPtr KeyDefPtr -xsltKeyTable KeyTable -xsltKeyTablePtr KeyTablePtr -xsltLoadType LoadType -xsltLocale Locale -xsltLocaleChar LocaleChar -xsltNewLocaleFunc NewLocaleFunc -xsltNumberData NumberData -xsltNumberDataPtr NumberDataPtr -xsltOutputType OutputType -xsltPreComputeFunction PreComputeFunction -xsltRuntimeExtra RuntimeExtra -xsltRuntimeExtraPtr RuntimeExtraPtr -xsltSecurityCheck SecurityCheck -xsltSecurityOption SecurityOption -xsltSecurityPrefs SecurityPrefs -xsltSecurityPrefsPtr SecurityPrefsPtr -xsltSortFunc SortFunc -xsltStackElem StackElem -xsltStackElemPtr StackElemPtr -xsltStyleExtInitFunction StyleExtInitFunction -xsltStyleExtShutdownFunction StyleExtShutdownFunction -xsltStylePreComp StylePreComp -xsltStylePreCompPtr StylePreCompPtr -xsltStyleType StyleType -xsltStylesheet Stylesheet -xsltStylesheetPtr StylesheetPtr -xsltTemplate Template -xsltTemplatePtr TemplatePtr -xsltTopLevelFunction TopLevelFunction -xsltTransformCache TransformCache -xsltTransformCachePtr TransformCachePtr -xsltTransformContext TransformContext -xsltTransformContextPtr TransformContextPtr -xsltTransformFunction TransformFunction -xsltTransformState TransformState \ No newline at end of file diff --git a/libxslt/llpkg.cfg b/libxslt/llpkg.cfg deleted file mode 100644 index a47b88af..00000000 --- a/libxslt/llpkg.cfg +++ /dev/null @@ -1,15 +0,0 @@ - -{ - "upstream": { - "package": { - "name": "libxslt", - "version": "1.1.42" - }, - "installer":{ - "name": "conan", - "config" : { - "options": "libxml2/*:iconv=False" - } - } - } -} \ No newline at end of file diff --git a/libxslt/namespaces.go b/libxslt/namespaces.go deleted file mode 100644 index a8fc9911..00000000 --- a/libxslt/namespaces.go +++ /dev/null @@ -1,27 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname NamespaceAlias C.xsltNamespaceAlias -func NamespaceAlias(style StylesheetPtr, node libxml2.NodePtr) - -//go:linkname GetNamespace C.xsltGetNamespace -func GetNamespace(ctxt TransformContextPtr, cur libxml2.NodePtr, ns libxml2.NsPtr, out libxml2.NodePtr) libxml2.NsPtr - -//go:linkname GetPlainNamespace C.xsltGetPlainNamespace -func GetPlainNamespace(ctxt TransformContextPtr, cur libxml2.NodePtr, ns libxml2.NsPtr, out libxml2.NodePtr) libxml2.NsPtr - -//go:linkname GetSpecialNamespace C.xsltGetSpecialNamespace -func GetSpecialNamespace(ctxt TransformContextPtr, cur libxml2.NodePtr, URI *libxml2.Char, prefix *libxml2.Char, out libxml2.NodePtr) libxml2.NsPtr - -//go:linkname CopyNamespace C.xsltCopyNamespace -func CopyNamespace(ctxt TransformContextPtr, elem libxml2.NodePtr, ns libxml2.NsPtr) libxml2.NsPtr - -//go:linkname CopyNamespaceList C.xsltCopyNamespaceList -func CopyNamespaceList(ctxt TransformContextPtr, node libxml2.NodePtr, cur libxml2.NsPtr) libxml2.NsPtr - -//go:linkname FreeNamespaceAliasHashes C.xsltFreeNamespaceAliasHashes -func FreeNamespaceAliasHashes(style StylesheetPtr) diff --git a/libxslt/numbersInternals.go b/libxslt/numbersInternals.go deleted file mode 100644 index 228287c0..00000000 --- a/libxslt/numbersInternals.go +++ /dev/null @@ -1,43 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -type X_xsltCompMatch struct { - Unused [8]uint8 -} - -type X_xsltNumberData struct { - Level *libxml2.Char - Count *libxml2.Char - From *libxml2.Char - Value *libxml2.Char - Format *libxml2.Char - HasFormat c.Int - DigitsPerGroup c.Int - GroupingCharacter c.Int - GroupingCharacterLen c.Int - Doc libxml2.DocPtr - Node libxml2.NodePtr - CountPat *X_xsltCompMatch - FromPat *X_xsltCompMatch -} -type NumberData X_xsltNumberData -type NumberDataPtr *NumberData - -type X_xsltFormatNumberInfo struct { - IntegerHash c.Int - IntegerDigits c.Int - FracDigits c.Int - FracHash c.Int - Group c.Int - Multiplier c.Int - AddDecimal c.Char - IsMultiplierSet c.Char - IsNegativePattern c.Char -} -type FormatNumberInfo X_xsltFormatNumberInfo -type FormatNumberInfoPtr *FormatNumberInfo diff --git a/libxslt/pattern.go b/libxslt/pattern.go deleted file mode 100644 index 0c6e925d..00000000 --- a/libxslt/pattern.go +++ /dev/null @@ -1,43 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -type CompMatch X_xsltCompMatch -type CompMatchPtr *CompMatch - -/* - * Pattern related interfaces. - */ -//go:linkname CompilePattern C.xsltCompilePattern -func CompilePattern(pattern *libxml2.Char, doc libxml2.DocPtr, node libxml2.NodePtr, style StylesheetPtr, runtime TransformContextPtr) CompMatchPtr - -//go:linkname FreeCompMatchList C.xsltFreeCompMatchList -func FreeCompMatchList(comp CompMatchPtr) - -//go:linkname TestCompMatchList C.xsltTestCompMatchList -func TestCompMatchList(ctxt TransformContextPtr, node libxml2.NodePtr, comp CompMatchPtr) c.Int - -//go:linkname CompMatchClearCache C.xsltCompMatchClearCache -func CompMatchClearCache(ctxt TransformContextPtr, comp CompMatchPtr) - -//go:linkname NormalizeCompSteps C.xsltNormalizeCompSteps -func NormalizeCompSteps(payload c.Pointer, data c.Pointer, name *libxml2.Char) - -/* - * Template related interfaces. - */ -//go:linkname AddTemplate C.xsltAddTemplate -func AddTemplate(style StylesheetPtr, cur TemplatePtr, mode *libxml2.Char, modeURI *libxml2.Char) c.Int - -//go:linkname GetTemplate C.xsltGetTemplate -func GetTemplate(ctxt TransformContextPtr, node libxml2.NodePtr, style StylesheetPtr) TemplatePtr - -//go:linkname FreeTemplateHashes C.xsltFreeTemplateHashes -func FreeTemplateHashes(style StylesheetPtr) - -//go:linkname CleanupTemplates C.xsltCleanupTemplates -func CleanupTemplates(style StylesheetPtr) diff --git a/libxslt/preproc.go b/libxslt/preproc.go deleted file mode 100644 index 6c3ceb09..00000000 --- a/libxslt/preproc.go +++ /dev/null @@ -1,15 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname DocumentComp C.xsltDocumentComp -func DocumentComp(style StylesheetPtr, inst libxml2.NodePtr, function TransformFunction) ElemPreCompPtr - -//go:linkname StylePreCompute C.xsltStylePreCompute -func StylePreCompute(style StylesheetPtr, inst libxml2.NodePtr) - -//go:linkname FreeStylePreComps C.xsltFreeStylePreComps -func FreeStylePreComps(style StylesheetPtr) diff --git a/libxslt/security.go b/libxslt/security.go deleted file mode 100644 index bd1c15e0..00000000 --- a/libxslt/security.go +++ /dev/null @@ -1,64 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -type X_xsltSecurityPrefs struct { - Unused [8]uint8 -} -type SecurityPrefs X_xsltSecurityPrefs -type SecurityPrefsPtr *SecurityPrefs -type SecurityOption c.Int - -const ( - SECPREF_READ_FILE SecurityOption = 1 - SECPREF_WRITE_FILE SecurityOption = 2 - SECPREF_CREATE_DIRECTORY SecurityOption = 3 - SECPREF_READ_NETWORK SecurityOption = 4 - SECPREF_WRITE_NETWORK SecurityOption = 5 -) - -// llgo:type C -type SecurityCheck func(SecurityPrefsPtr, TransformContextPtr, *c.Char) c.Int - -/* - * Module interfaces - */ -//go:linkname NewSecurityPrefs C.xsltNewSecurityPrefs -func NewSecurityPrefs() SecurityPrefsPtr - -//go:linkname FreeSecurityPrefs C.xsltFreeSecurityPrefs -func FreeSecurityPrefs(sec SecurityPrefsPtr) - -//go:linkname SetSecurityPrefs C.xsltSetSecurityPrefs -func SetSecurityPrefs(sec SecurityPrefsPtr, option SecurityOption, func_ SecurityCheck) c.Int - -//go:linkname GetSecurityPrefs C.xsltGetSecurityPrefs -func GetSecurityPrefs(sec SecurityPrefsPtr, option SecurityOption) SecurityCheck - -//go:linkname SetDefaultSecurityPrefs C.xsltSetDefaultSecurityPrefs -func SetDefaultSecurityPrefs(sec SecurityPrefsPtr) - -//go:linkname GetDefaultSecurityPrefs C.xsltGetDefaultSecurityPrefs -func GetDefaultSecurityPrefs() SecurityPrefsPtr - -//go:linkname SetCtxtSecurityPrefs C.xsltSetCtxtSecurityPrefs -func SetCtxtSecurityPrefs(sec SecurityPrefsPtr, ctxt TransformContextPtr) c.Int - -//go:linkname SecurityAllow C.xsltSecurityAllow -func SecurityAllow(sec SecurityPrefsPtr, ctxt TransformContextPtr, value *c.Char) c.Int - -//go:linkname SecurityForbid C.xsltSecurityForbid -func SecurityForbid(sec SecurityPrefsPtr, ctxt TransformContextPtr, value *c.Char) c.Int - -/* - * internal interfaces - */ -//go:linkname CheckWrite C.xsltCheckWrite -func CheckWrite(sec SecurityPrefsPtr, ctxt TransformContextPtr, URL *libxml2.Char) c.Int - -//go:linkname CheckRead C.xsltCheckRead -func CheckRead(sec SecurityPrefsPtr, ctxt TransformContextPtr, URL *libxml2.Char) c.Int diff --git a/libxslt/templates.go b/libxslt/templates.go deleted file mode 100644 index 7f2e3d43..00000000 --- a/libxslt/templates.go +++ /dev/null @@ -1,41 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname EvalXPathPredicate C.xsltEvalXPathPredicate -func EvalXPathPredicate(ctxt TransformContextPtr, comp libxml2.XPathCompExprPtr, nsList *libxml2.NsPtr, nsNr c.Int) c.Int - -//go:linkname EvalTemplateString C.xsltEvalTemplateString -func EvalTemplateString(ctxt TransformContextPtr, contextNode libxml2.NodePtr, inst libxml2.NodePtr) *libxml2.Char - -//go:linkname EvalAttrValueTemplate C.xsltEvalAttrValueTemplate -func EvalAttrValueTemplate(ctxt TransformContextPtr, node libxml2.NodePtr, name *libxml2.Char, ns *libxml2.Char) *libxml2.Char - -//go:linkname EvalStaticAttrValueTemplate C.xsltEvalStaticAttrValueTemplate -func EvalStaticAttrValueTemplate(style StylesheetPtr, node libxml2.NodePtr, name *libxml2.Char, ns *libxml2.Char, found *c.Int) *libxml2.Char - -/* TODO: this is obviously broken ... the namespaces should be passed too ! */ -//go:linkname EvalXPathString C.xsltEvalXPathString -func EvalXPathString(ctxt TransformContextPtr, comp libxml2.XPathCompExprPtr) *libxml2.Char - -//go:linkname EvalXPathStringNs C.xsltEvalXPathStringNs -func EvalXPathStringNs(ctxt TransformContextPtr, comp libxml2.XPathCompExprPtr, nsNr c.Int, nsList *libxml2.NsPtr) *libxml2.Char - -//go:linkname TemplateProcess C.xsltTemplateProcess -func TemplateProcess(ctxt TransformContextPtr, node libxml2.NodePtr) *libxml2.NodePtr - -//go:linkname AttrListTemplateProcess C.xsltAttrListTemplateProcess -func AttrListTemplateProcess(ctxt TransformContextPtr, target libxml2.NodePtr, cur libxml2.AttrPtr) libxml2.AttrPtr - -//go:linkname AttrTemplateProcess C.xsltAttrTemplateProcess -func AttrTemplateProcess(ctxt TransformContextPtr, target libxml2.NodePtr, attr libxml2.AttrPtr) libxml2.AttrPtr - -//go:linkname AttrTemplateValueProcess C.xsltAttrTemplateValueProcess -func AttrTemplateValueProcess(ctxt TransformContextPtr, attr *libxml2.Char) *libxml2.Char - -//go:linkname AttrTemplateValueProcessNode C.xsltAttrTemplateValueProcessNode -func AttrTemplateValueProcessNode(ctxt TransformContextPtr, str *libxml2.Char, node libxml2.NodePtr) *libxml2.Char diff --git a/libxslt/transform.go b/libxslt/transform.go deleted file mode 100644 index 51538064..00000000 --- a/libxslt/transform.go +++ /dev/null @@ -1,116 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -/** - * XInclude default processing. - */ -//go:linkname SetXIncludeDefault C.xsltSetXIncludeDefault -func SetXIncludeDefault(xinclude c.Int) - -//go:linkname GetXIncludeDefault C.xsltGetXIncludeDefault -func GetXIncludeDefault() c.Int - -/** - * Export context to users. - */ -//go:linkname NewTransformContext C.xsltNewTransformContext -func NewTransformContext(style StylesheetPtr, doc libxml2.DocPtr) TransformContextPtr - -//go:linkname FreeTransformContext C.xsltFreeTransformContext -func FreeTransformContext(ctxt TransformContextPtr) - -//go:linkname ApplyStylesheetUser C.xsltApplyStylesheetUser -func ApplyStylesheetUser(style StylesheetPtr, doc libxml2.DocPtr, params **c.Char, output *c.Char, profile *c.FILE, userCtxt TransformContextPtr) libxml2.DocPtr - -//go:linkname ProcessOneNode C.xsltProcessOneNode -func ProcessOneNode(ctxt TransformContextPtr, node libxml2.NodePtr, params StackElemPtr) - -/** - * Private Interfaces. - */ -//go:linkname ApplyStripSpaces C.xsltApplyStripSpaces -func ApplyStripSpaces(ctxt TransformContextPtr, node libxml2.NodePtr) - -//go:linkname ApplyStylesheet C.xsltApplyStylesheet -func ApplyStylesheet(style StylesheetPtr, doc libxml2.DocPtr, params **c.Char) libxml2.DocPtr - -//go:linkname ProfileStylesheet C.xsltProfileStylesheet -func ProfileStylesheet(style StylesheetPtr, doc libxml2.DocPtr, params **c.Char, output *c.FILE) libxml2.DocPtr - -//go:linkname RunStylesheet C.xsltRunStylesheet -func RunStylesheet(style StylesheetPtr, doc libxml2.DocPtr, params **c.Char, output *c.Char, SAX libxml2.SAXHandlerPtr, IObuf libxml2.OutputBufferPtr) c.Int - -//go:linkname RunStylesheetUser C.xsltRunStylesheetUser -func RunStylesheetUser(style StylesheetPtr, doc libxml2.DocPtr, params **c.Char, output *c.Char, SAX libxml2.SAXHandlerPtr, IObuf libxml2.OutputBufferPtr, profile *c.FILE, userCtxt TransformContextPtr) c.Int - -//go:linkname ApplyOneTemplate C.xsltApplyOneTemplate -func ApplyOneTemplate(ctxt TransformContextPtr, node libxml2.NodePtr, list libxml2.NodePtr, templ TemplatePtr, params StackElemPtr) - -//go:linkname DocumentElem C.xsltDocumentElem -func DocumentElem(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Sort C.xsltSort -func Sort(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Copy C.xsltCopy -func Copy(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Text C.xsltText -func Text(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Element C.xsltElement -func Element(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Comment C.xsltComment -func Comment(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Attribute C.xsltAttribute -func Attribute(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname ProcessingInstruction C.xsltProcessingInstruction -func ProcessingInstruction(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname CopyOf C.xsltCopyOf -func CopyOf(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname ValueOf C.xsltValueOf -func ValueOf(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Number C.xsltNumber -func Number(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname ApplyImports C.xsltApplyImports -func ApplyImports(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname CallTemplate C.xsltCallTemplate -func CallTemplate(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname ApplyTemplates C.xsltApplyTemplates -func ApplyTemplates(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname Choose C.xsltChoose -func Choose(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname If C.xsltIf -func If(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname ForEach C.xsltForEach -func ForEach(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr, comp ElemPreCompPtr) - -//go:linkname RegisterAllElement C.xsltRegisterAllElement -func RegisterAllElement(ctxt TransformContextPtr) - -//go:linkname CopyTextString C.xsltCopyTextString -func CopyTextString(ctxt TransformContextPtr, target libxml2.NodePtr, string *libxml2.Char, noescape c.Int) libxml2.NodePtr - -/* Following 2 functions needed for libexslt/functions.c */ -//go:linkname LocalVariablePop C.xsltLocalVariablePop -func LocalVariablePop(ctxt TransformContextPtr, limitNr c.Int, level c.Int) - -//go:linkname LocalVariablePush C.xsltLocalVariablePush -func LocalVariablePush(ctxt TransformContextPtr, variable StackElemPtr, level c.Int) c.Int diff --git a/libxslt/variables.go b/libxslt/variables.go deleted file mode 100644 index 1008de2f..00000000 --- a/libxslt/variables.go +++ /dev/null @@ -1,56 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -const RVT_LOCAL = 1 -const RVT_FUNC_RESULT = 2 -const RVT_GLOBAL = 3 - -/* - * Interfaces for the variable module. - */ -//go:linkname EvalGlobalVariables C.xsltEvalGlobalVariables -func EvalGlobalVariables(ctxt TransformContextPtr) c.Int - -//go:linkname EvalUserParams C.xsltEvalUserParams -func EvalUserParams(ctxt TransformContextPtr, params **c.Char) c.Int - -//go:linkname QuoteUserParams C.xsltQuoteUserParams -func QuoteUserParams(ctxt TransformContextPtr, params **c.Char) c.Int - -//go:linkname EvalOneUserParam C.xsltEvalOneUserParam -func EvalOneUserParam(ctxt TransformContextPtr, name *libxml2.Char, value *libxml2.Char) c.Int - -//go:linkname QuoteOneUserParam C.xsltQuoteOneUserParam -func QuoteOneUserParam(ctxt TransformContextPtr, name *libxml2.Char, value *libxml2.Char) c.Int - -//go:linkname ParseGlobalVariable C.xsltParseGlobalVariable -func ParseGlobalVariable(style StylesheetPtr, cur libxml2.NodePtr) - -//go:linkname ParseGlobalParam C.xsltParseGlobalParam -func ParseGlobalParam(style StylesheetPtr, cur libxml2.NodePtr) - -//go:linkname ParseStylesheetVariable C.xsltParseStylesheetVariable -func ParseStylesheetVariable(ctxt TransformContextPtr, cur libxml2.NodePtr) - -//go:linkname ParseStylesheetParam C.xsltParseStylesheetParam -func ParseStylesheetParam(ctxt TransformContextPtr, cur libxml2.NodePtr) - -//go:linkname ParseStylesheetCallerParam C.xsltParseStylesheetCallerParam -func ParseStylesheetCallerParam(ctxt TransformContextPtr, cur libxml2.NodePtr) StackElemPtr - -//go:linkname AddStackElemList C.xsltAddStackElemList -func AddStackElemList(ctxt TransformContextPtr, elems StackElemPtr) c.Int - -//go:linkname FreeGlobalVariables C.xsltFreeGlobalVariables -func FreeGlobalVariables(ctxt TransformContextPtr) - -//go:linkname VariableLookup C.xsltVariableLookup -func VariableLookup(ctxt TransformContextPtr, name *libxml2.Char, ns_uri *libxml2.Char) libxml2.XPathObjectPtr - -//go:linkname XPathVariableLookup C.xsltXPathVariableLookup -func XPathVariableLookup(ctxt c.Pointer, name *libxml2.Char, ns_uri *libxml2.Char) libxml2.XPathObjectPtr diff --git a/libxslt/xslt.go b/libxslt/xslt.go deleted file mode 100644 index b2ca88c4..00000000 --- a/libxslt/xslt.go +++ /dev/null @@ -1,19 +0,0 @@ -package libxslt - -import _ "unsafe" - -const DEFAULT_VERSION = "1.0" -const DEFAULT_VENDOR = "libxslt" -const DEFAULT_URL = "http://xmlsoft.org/XSLT/" - -/* - * Global initialization function. - */ -//go:linkname Init C.xsltInit -func Init() - -/* - * Global cleanup function. - */ -//go:linkname CleanupGlobals C.xsltCleanupGlobals -func CleanupGlobals() diff --git a/libxslt/xsltInternals.go b/libxslt/xsltInternals.go deleted file mode 100644 index 6c9236bd..00000000 --- a/libxslt/xsltInternals.go +++ /dev/null @@ -1,490 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -const MAX_SORT = 15 - -type X_xsltRuntimeExtra struct { - Info c.Pointer - Deallocate libxml2.FreeFunc - Val struct { - Ptr c.Pointer - } -} -type RuntimeExtra X_xsltRuntimeExtra -type RuntimeExtraPtr *RuntimeExtra - -type X_xsltTemplate struct { - Next *X_xsltTemplate - Style *X_xsltStylesheet - Match *libxml2.Char - Priority c.Float - Name *libxml2.Char - NameURI *libxml2.Char - Mode *libxml2.Char - ModeURI *libxml2.Char - Content libxml2.NodePtr - Elem libxml2.NodePtr - InheritedNsNr c.Int - InheritedNs *libxml2.NsPtr - NbCalls c.Int - Time c.Ulong - Params c.Pointer - TemplNr c.Int - TemplMax c.Int - TemplCalledTab *TemplatePtr - TemplCountTab *c.Int - Position c.Int -} -type Template X_xsltTemplate -type TemplatePtr *Template - -type X_xsltStylesheet struct { - Parent *X_xsltStylesheet - Next *X_xsltStylesheet - Imports *X_xsltStylesheet - DocList DocumentPtr - Doc libxml2.DocPtr - StripSpaces libxml2.HashTablePtr - StripAll c.Int - CdataSection libxml2.HashTablePtr - Variables StackElemPtr - Templates TemplatePtr - TemplatesHash libxml2.HashTablePtr - RootMatch *X_xsltCompMatch - KeyMatch *X_xsltCompMatch - ElemMatch *X_xsltCompMatch - AttrMatch *X_xsltCompMatch - ParentMatch *X_xsltCompMatch - TextMatch *X_xsltCompMatch - PiMatch *X_xsltCompMatch - CommentMatch *X_xsltCompMatch - NsAliases libxml2.HashTablePtr - AttributeSets libxml2.HashTablePtr - NsHash libxml2.HashTablePtr - NsDefs c.Pointer - Keys c.Pointer - Method *libxml2.Char - MethodURI *libxml2.Char - Version *libxml2.Char - Encoding *libxml2.Char - OmitXmlDeclaration c.Int - DecimalFormat DecimalFormatPtr - Standalone c.Int - DoctypePublic *libxml2.Char - DoctypeSystem *libxml2.Char - Indent c.Int - MediaType *libxml2.Char - PreComps ElemPreCompPtr - Warnings c.Int - Errors c.Int - ExclPrefix *libxml2.Char - ExclPrefixTab **libxml2.Char - ExclPrefixNr c.Int - ExclPrefixMax c.Int - X_private c.Pointer - ExtInfos libxml2.HashTablePtr - ExtrasNr c.Int - Includes DocumentPtr - Dict libxml2.DictPtr - AttVTs c.Pointer - DefaultAlias *libxml2.Char - Nopreproc c.Int - Internalized c.Int - LiteralResult c.Int - Principal StylesheetPtr - ForwardsCompatible c.Int - NamedTemplates libxml2.HashTablePtr - XpathCtxt libxml2.XPathContextPtr - OpLimit c.Ulong - OpCount c.Ulong -} - -type X_xsltDecimalFormat struct { - Next *X_xsltDecimalFormat - Name *libxml2.Char - Digit *libxml2.Char - PatternSeparator *libxml2.Char - MinusSign *libxml2.Char - Infinity *libxml2.Char - NoNumber *libxml2.Char - DecimalPoint *libxml2.Char - Grouping *libxml2.Char - Percent *libxml2.Char - Permille *libxml2.Char - ZeroDigit *libxml2.Char - NsUri *libxml2.Char -} -type DecimalFormat X_xsltDecimalFormat -type DecimalFormatPtr *DecimalFormat - -type X_xsltDocument struct { - Next *X_xsltDocument - Main c.Int - Doc libxml2.DocPtr - Keys c.Pointer - Includes *X_xsltDocument - Preproc c.Int - NbKeysComputed c.Int -} -type Document X_xsltDocument -type DocumentPtr *Document - -type X_xsltKeyDef struct { - Next *X_xsltKeyDef - Inst libxml2.NodePtr - Name *libxml2.Char - NameURI *libxml2.Char - Match *libxml2.Char - Use *libxml2.Char - Comp libxml2.XPathCompExprPtr - Usecomp libxml2.XPathCompExprPtr - NsList *libxml2.NsPtr - NsNr c.Int -} -type KeyDef X_xsltKeyDef -type KeyDefPtr *KeyDef - -type X_xsltKeyTable struct { - Next *X_xsltKeyTable - Name *libxml2.Char - NameURI *libxml2.Char - Keys libxml2.HashTablePtr -} -type KeyTable X_xsltKeyTable -type KeyTablePtr *KeyTable -type Stylesheet X_xsltStylesheet -type StylesheetPtr *Stylesheet - -type X_xsltTransformContext struct { - Style StylesheetPtr - Type OutputType - Templ TemplatePtr - TemplNr c.Int - TemplMax c.Int - TemplTab *TemplatePtr - Vars StackElemPtr - VarsNr c.Int - VarsMax c.Int - VarsTab *StackElemPtr - VarsBase c.Int - ExtFunctions libxml2.HashTablePtr - ExtElements libxml2.HashTablePtr - ExtInfos libxml2.HashTablePtr - Mode *libxml2.Char - ModeURI *libxml2.Char - DocList DocumentPtr - Document DocumentPtr - Node libxml2.NodePtr - NodeList libxml2.NodeSetPtr - Output libxml2.DocPtr - Insert libxml2.NodePtr - XpathCtxt libxml2.XPathContextPtr - State TransformState - GlobalVars libxml2.HashTablePtr - Inst libxml2.NodePtr - Xinclude c.Int - OutputFile *c.Char - Profile c.Int - Prof c.Long - ProfNr c.Int - ProfMax c.Int - ProfTab *c.Long - X_private c.Pointer - ExtrasNr c.Int - ExtrasMax c.Int - Extras RuntimeExtraPtr - StyleList DocumentPtr - Sec c.Pointer - Error libxml2.GenericErrorFunc - Errctx c.Pointer - Sortfunc SortFunc - TmpRVT libxml2.DocPtr - PersistRVT libxml2.DocPtr - Ctxtflags c.Int - Lasttext *libxml2.Char - Lasttsize c.Int - Lasttuse c.Int - DebugStatus c.Int - TraceCode *c.Ulong - ParserOptions c.Int - Dict libxml2.DictPtr - TmpDoc libxml2.DocPtr - Internalized c.Int - NbKeys c.Int - HasTemplKeyPatterns c.Int - CurrentTemplateRule TemplatePtr - InitialContextNode libxml2.NodePtr - InitialContextDoc libxml2.DocPtr - Cache TransformCachePtr - ContextVariable c.Pointer - LocalRVT libxml2.DocPtr - LocalRVTBase libxml2.DocPtr - KeyInitLevel c.Int - Depth c.Int - MaxTemplateDepth c.Int - MaxTemplateVars c.Int - OpLimit c.Ulong - OpCount c.Ulong - SourceDocDirty c.Int - CurrentId c.Ulong - NewLocale NewLocaleFunc - FreeLocale FreeLocaleFunc - GenSortKey GenSortKeyFunc -} -type TransformContext X_xsltTransformContext -type TransformContextPtr *TransformContext - -type X_xsltElemPreComp struct { - Next ElemPreCompPtr - Type StyleType - Func TransformFunction - Inst libxml2.NodePtr - Free ElemPreCompDeallocator -} -type ElemPreComp X_xsltElemPreComp -type ElemPreCompPtr *ElemPreComp - -// llgo:type C -type TransformFunction func(TransformContextPtr, libxml2.NodePtr, libxml2.NodePtr, ElemPreCompPtr) - -// llgo:type C -type SortFunc func(TransformContextPtr, *libxml2.NodePtr, c.Int) -type StyleType c.Int - -const ( - FUNC_COPY StyleType = 1 - FUNC_SORT StyleType = 2 - FUNC_TEXT StyleType = 3 - FUNC_ELEMENT StyleType = 4 - FUNC_ATTRIBUTE StyleType = 5 - FUNC_COMMENT StyleType = 6 - FUNC_PI StyleType = 7 - FUNC_COPYOF StyleType = 8 - FUNC_VALUEOF StyleType = 9 - FUNC_NUMBER StyleType = 10 - FUNC_APPLYIMPORTS StyleType = 11 - FUNC_CALLTEMPLATE StyleType = 12 - FUNC_APPLYTEMPLATES StyleType = 13 - FUNC_CHOOSE StyleType = 14 - FUNC_IF StyleType = 15 - FUNC_FOREACH StyleType = 16 - FUNC_DOCUMENT StyleType = 17 - FUNC_WITHPARAM StyleType = 18 - FUNC_PARAM StyleType = 19 - FUNC_VARIABLE StyleType = 20 - FUNC_WHEN StyleType = 21 - FUNC_EXTENSION StyleType = 22 -) - -// llgo:type C -type ElemPreCompDeallocator func(ElemPreCompPtr) - -type X_xsltStylePreComp struct { - Next ElemPreCompPtr - Type StyleType - Func TransformFunction - Inst libxml2.NodePtr - Stype *libxml2.Char - HasStype c.Int - Number c.Int - Order *libxml2.Char - HasOrder c.Int - Descending c.Int - Lang *libxml2.Char - HasLang c.Int - CaseOrder *libxml2.Char - LowerFirst c.Int - Use *libxml2.Char - HasUse c.Int - Noescape c.Int - Name *libxml2.Char - HasName c.Int - Ns *libxml2.Char - HasNs c.Int - Mode *libxml2.Char - ModeURI *libxml2.Char - Test *libxml2.Char - Templ TemplatePtr - Select *libxml2.Char - Ver11 c.Int - Filename *libxml2.Char - HasFilename c.Int - Numdata NumberData - Comp libxml2.XPathCompExprPtr - NsList *libxml2.NsPtr - NsNr c.Int -} -type StylePreComp X_xsltStylePreComp -type StylePreCompPtr *StylePreComp - -type X_xsltStackElem struct { - Next *X_xsltStackElem - Comp StylePreCompPtr - Computed c.Int - Name *libxml2.Char - NameURI *libxml2.Char - Select *libxml2.Char - Tree libxml2.NodePtr - Value libxml2.XPathObjectPtr - Fragment libxml2.DocPtr - Level c.Int - Context TransformContextPtr - Flags c.Int -} -type StackElem X_xsltStackElem -type StackElemPtr *StackElem - -type X_xsltTransformCache struct { - RVT libxml2.DocPtr - NbRVT c.Int - StackItems StackElemPtr - NbStackItems c.Int -} -type TransformCache X_xsltTransformCache -type TransformCachePtr *TransformCache -type OutputType c.Int - -const ( - OUTPUT_XML OutputType = 0 - OUTPUT_HTML OutputType = 1 - OUTPUT_TEXT OutputType = 2 -) - -// llgo:type C -type NewLocaleFunc func(*libxml2.Char, c.Int) c.Pointer - -// llgo:type C -type FreeLocaleFunc func(c.Pointer) - -// llgo:type C -type GenSortKeyFunc func(c.Pointer, *libxml2.Char) *libxml2.Char -type TransformState c.Int - -const ( - STATE_OK TransformState = 0 - STATE_ERROR TransformState = 1 - STATE_STOPPED TransformState = 2 -) - -/* - * Functions associated to the internal types -xsltDecimalFormatPtr xsltDecimalFormatGetByName(xsltStylesheetPtr sheet, - xmlChar *name); -*/ -//go:linkname NewStylesheet C.xsltNewStylesheet -func NewStylesheet() StylesheetPtr - -//go:linkname ParseStylesheetFile C.xsltParseStylesheetFile -func ParseStylesheetFile(filename *libxml2.Char) StylesheetPtr - -//go:linkname FreeStylesheet C.xsltFreeStylesheet -func FreeStylesheet(style StylesheetPtr) - -//go:linkname IsBlank C.xsltIsBlank -func IsBlank(str *libxml2.Char) c.Int - -//go:linkname FreeStackElemList C.xsltFreeStackElemList -func FreeStackElemList(elem StackElemPtr) - -//go:linkname DecimalFormatGetByName C.xsltDecimalFormatGetByName -func DecimalFormatGetByName(style StylesheetPtr, name *libxml2.Char) DecimalFormatPtr - -//go:linkname DecimalFormatGetByQName C.xsltDecimalFormatGetByQName -func DecimalFormatGetByQName(style StylesheetPtr, nsUri *libxml2.Char, name *libxml2.Char) DecimalFormatPtr - -//go:linkname ParseStylesheetProcess C.xsltParseStylesheetProcess -func ParseStylesheetProcess(ret StylesheetPtr, doc libxml2.DocPtr) StylesheetPtr - -//go:linkname ParseStylesheetOutput C.xsltParseStylesheetOutput -func ParseStylesheetOutput(style StylesheetPtr, cur libxml2.NodePtr) - -//go:linkname ParseStylesheetDoc C.xsltParseStylesheetDoc -func ParseStylesheetDoc(doc libxml2.DocPtr) StylesheetPtr - -//go:linkname ParseStylesheetImportedDoc C.xsltParseStylesheetImportedDoc -func ParseStylesheetImportedDoc(doc libxml2.DocPtr, style StylesheetPtr) StylesheetPtr - -//go:linkname ParseStylesheetUser C.xsltParseStylesheetUser -func ParseStylesheetUser(style StylesheetPtr, doc libxml2.DocPtr) c.Int - -//go:linkname LoadStylesheetPI C.xsltLoadStylesheetPI -func LoadStylesheetPI(doc libxml2.DocPtr) StylesheetPtr - -//go:linkname NumberFormat C.xsltNumberFormat -func NumberFormat(ctxt TransformContextPtr, data NumberDataPtr, node libxml2.NodePtr) - -//go:linkname FormatNumberConversion C.xsltFormatNumberConversion -func FormatNumberConversion(self DecimalFormatPtr, format *libxml2.Char, number c.Double, result **libxml2.Char) libxml2.XPathError - -//go:linkname ParseTemplateContent C.xsltParseTemplateContent -func ParseTemplateContent(style StylesheetPtr, templ libxml2.NodePtr) - -//go:linkname AllocateExtra C.xsltAllocateExtra -func AllocateExtra(style StylesheetPtr) c.Int - -//go:linkname AllocateExtraCtxt C.xsltAllocateExtraCtxt -func AllocateExtraCtxt(ctxt TransformContextPtr) c.Int - -/* - * Extra functions for Result Value Trees - */ -//go:linkname CreateRVT C.xsltCreateRVT -func CreateRVT(ctxt TransformContextPtr) libxml2.DocPtr - -//go:linkname RegisterTmpRVT C.xsltRegisterTmpRVT -func RegisterTmpRVT(ctxt TransformContextPtr, RVT libxml2.DocPtr) c.Int - -//go:linkname RegisterLocalRVT C.xsltRegisterLocalRVT -func RegisterLocalRVT(ctxt TransformContextPtr, RVT libxml2.DocPtr) c.Int - -//go:linkname RegisterPersistRVT C.xsltRegisterPersistRVT -func RegisterPersistRVT(ctxt TransformContextPtr, RVT libxml2.DocPtr) c.Int - -//go:linkname ExtensionInstructionResultRegister C.xsltExtensionInstructionResultRegister -func ExtensionInstructionResultRegister(ctxt TransformContextPtr, obj libxml2.XPathObjectPtr) c.Int - -//go:linkname ExtensionInstructionResultFinalize C.xsltExtensionInstructionResultFinalize -func ExtensionInstructionResultFinalize(ctxt TransformContextPtr) c.Int - -//go:linkname FlagRVTs C.xsltFlagRVTs -func FlagRVTs(ctxt TransformContextPtr, obj libxml2.XPathObjectPtr, val c.Int) c.Int - -//go:linkname FreeRVTs C.xsltFreeRVTs -func FreeRVTs(ctxt TransformContextPtr) - -//go:linkname ReleaseRVT C.xsltReleaseRVT -func ReleaseRVT(ctxt TransformContextPtr, RVT libxml2.DocPtr) - -/* - * Extra functions for Attribute Value Templates - */ -//go:linkname CompileAttr C.xsltCompileAttr -func CompileAttr(style StylesheetPtr, attr libxml2.AttrPtr) - -//go:linkname EvalAVT C.xsltEvalAVT -func EvalAVT(ctxt TransformContextPtr, avt c.Pointer, node libxml2.NodePtr) *libxml2.Char - -//go:linkname FreeAVTList C.xsltFreeAVTList -func FreeAVTList(avt c.Pointer) - -/* - * Extra function for successful xsltCleanupGlobals / xsltInit sequence. - */ -//go:linkname Uninit C.xsltUninit -func Uninit() - -/************************************************************************ - * * - * Transformation-time functions for *internal* use only * - * * - ************************************************************************/ -//go:linkname InitCtxtKey C.xsltInitCtxtKey -func InitCtxtKey(ctxt TransformContextPtr, doc DocumentPtr, keyd KeyDefPtr) c.Int - -//go:linkname InitAllDocKeys C.xsltInitAllDocKeys -func InitAllDocKeys(ctxt TransformContextPtr) c.Int diff --git a/libxslt/xsltconfig.go b/libxslt/xsltconfig.go deleted file mode 100644 index c617d020..00000000 --- a/libxslt/xsltconfig.go +++ /dev/null @@ -1,8 +0,0 @@ -package libxslt - -import _ "unsafe" - -const LIBXSLT_DOTTED_VERSION = "1.1.42" -const LIBXSLT_VERSION = 10142 -const LIBXSLT_VERSION_STRING = "10142" -const LIBXSLT_VERSION_EXTRA = "" diff --git a/libxslt/xsltexports.go b/libxslt/xsltexports.go deleted file mode 100644 index f830fa57..00000000 --- a/libxslt/xsltexports.go +++ /dev/null @@ -1,3 +0,0 @@ -package libxslt - -import _ "unsafe" diff --git a/libxslt/xsltlocale.go b/libxslt/xsltlocale.go deleted file mode 100644 index 22186200..00000000 --- a/libxslt/xsltlocale.go +++ /dev/null @@ -1,25 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -//go:linkname NewLocale C.xsltNewLocale -func NewLocale(langName *libxml2.Char, lowerFirst c.Int) c.Pointer - -//go:linkname FreeLocale C.xsltFreeLocale -func FreeLocale(locale c.Pointer) - -//go:linkname Strxfrm C.xsltStrxfrm -func Strxfrm(locale c.Pointer, string *libxml2.Char) *libxml2.Char - -//go:linkname FreeLocales C.xsltFreeLocales -func FreeLocales() - -type Locale c.Pointer -type LocaleChar libxml2.Char - -//go:linkname LocaleStrcmp C.xsltLocaleStrcmp -func LocaleStrcmp(locale c.Pointer, str1 *libxml2.Char, str2 *libxml2.Char) c.Int diff --git a/libxslt/xsltutils.go b/libxslt/xsltutils.go deleted file mode 100644 index b51f1528..00000000 --- a/libxslt/xsltutils.go +++ /dev/null @@ -1,158 +0,0 @@ -package libxslt - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/llpkg/libxml2" - _ "unsafe" -) - -/* - * Our own version of namespaced attributes lookup. - */ -//go:linkname GetNsProp C.xsltGetNsProp -func GetNsProp(node libxml2.NodePtr, name *libxml2.Char, nameSpace *libxml2.Char) *libxml2.Char - -//go:linkname GetCNsProp C.xsltGetCNsProp -func GetCNsProp(style StylesheetPtr, node libxml2.NodePtr, name *libxml2.Char, nameSpace *libxml2.Char) *libxml2.Char - -//go:linkname GetUTF8Char C.xsltGetUTF8Char -func GetUTF8Char(utf *c.Char, len *c.Int) c.Int - -type DebugTraceCodes c.Int - -const ( - TRACE_ALL DebugTraceCodes = -1 - TRACE_NONE DebugTraceCodes = 0 - TRACE_COPY_TEXT DebugTraceCodes = 1 - TRACE_PROCESS_NODE DebugTraceCodes = 2 - TRACE_APPLY_TEMPLATE DebugTraceCodes = 4 - TRACE_COPY DebugTraceCodes = 8 - TRACE_COMMENT DebugTraceCodes = 16 - TRACE_PI DebugTraceCodes = 32 - TRACE_COPY_OF DebugTraceCodes = 64 - TRACE_VALUE_OF DebugTraceCodes = 128 - TRACE_CALL_TEMPLATE DebugTraceCodes = 256 - TRACE_APPLY_TEMPLATES DebugTraceCodes = 512 - TRACE_CHOOSE DebugTraceCodes = 1024 - TRACE_IF DebugTraceCodes = 2048 - TRACE_FOR_EACH DebugTraceCodes = 4096 - TRACE_STRIP_SPACES DebugTraceCodes = 8192 - TRACE_TEMPLATES DebugTraceCodes = 16384 - TRACE_KEYS DebugTraceCodes = 32768 - TRACE_VARIABLES DebugTraceCodes = 65536 -) - -// llgo:link DebugTraceCodes.DebugSetDefaultTrace C.xsltDebugSetDefaultTrace -func (recv_ DebugTraceCodes) DebugSetDefaultTrace() { -} - -//go:linkname DebugGetDefaultTrace C.xsltDebugGetDefaultTrace -func DebugGetDefaultTrace() DebugTraceCodes - -//go:linkname PrintErrorContext C.xsltPrintErrorContext -func PrintErrorContext(ctxt TransformContextPtr, style StylesheetPtr, node libxml2.NodePtr) - -//go:linkname Message C.xsltMessage -func Message(ctxt TransformContextPtr, node libxml2.NodePtr, inst libxml2.NodePtr) - -//go:linkname SetGenericErrorFunc C.xsltSetGenericErrorFunc -func SetGenericErrorFunc(ctx c.Pointer, handler libxml2.GenericErrorFunc) - -//go:linkname SetGenericDebugFunc C.xsltSetGenericDebugFunc -func SetGenericDebugFunc(ctx c.Pointer, handler libxml2.GenericErrorFunc) - -//go:linkname SetTransformErrorFunc C.xsltSetTransformErrorFunc -func SetTransformErrorFunc(ctxt TransformContextPtr, ctx c.Pointer, handler libxml2.GenericErrorFunc) - -//go:linkname TransformError C.xsltTransformError -func TransformError(ctxt TransformContextPtr, style StylesheetPtr, node libxml2.NodePtr, msg *c.Char, __llgo_va_list ...interface{}) - -//go:linkname SetCtxtParseOptions C.xsltSetCtxtParseOptions -func SetCtxtParseOptions(ctxt TransformContextPtr, options c.Int) c.Int - -/* - * Sorting. - */ -//go:linkname DocumentSortFunction C.xsltDocumentSortFunction -func DocumentSortFunction(list libxml2.NodeSetPtr) - -//go:linkname SetSortFunc C.xsltSetSortFunc -func SetSortFunc(handler SortFunc) - -//go:linkname SetCtxtSortFunc C.xsltSetCtxtSortFunc -func SetCtxtSortFunc(ctxt TransformContextPtr, handler SortFunc) - -//go:linkname DefaultSortFunction C.xsltDefaultSortFunction -func DefaultSortFunction(ctxt TransformContextPtr, sorts *libxml2.NodePtr, nbsorts c.Int) - -//go:linkname DoSortFunction C.xsltDoSortFunction -func DoSortFunction(ctxt TransformContextPtr, sorts *libxml2.NodePtr, nbsorts c.Int) - -//go:linkname ComputeSortResult C.xsltComputeSortResult -func ComputeSortResult(ctxt TransformContextPtr, sort libxml2.NodePtr) *libxml2.XPathObjectPtr - -/* - * QNames handling. - */ -//go:linkname SplitQName C.xsltSplitQName -func SplitQName(dict libxml2.DictPtr, name *libxml2.Char, prefix **libxml2.Char) *libxml2.Char - -//go:linkname GetQNameURI C.xsltGetQNameURI -func GetQNameURI(node libxml2.NodePtr, name **libxml2.Char) *libxml2.Char - -//go:linkname GetQNameURI2 C.xsltGetQNameURI2 -func GetQNameURI2(style StylesheetPtr, node libxml2.NodePtr, name **libxml2.Char) *libxml2.Char - -/* - * Output, reuse libxml I/O buffers. - */ -//go:linkname SaveResultTo C.xsltSaveResultTo -func SaveResultTo(buf libxml2.OutputBufferPtr, result libxml2.DocPtr, style StylesheetPtr) c.Int - -//go:linkname SaveResultToFilename C.xsltSaveResultToFilename -func SaveResultToFilename(URI *c.Char, result libxml2.DocPtr, style StylesheetPtr, compression c.Int) c.Int - -//go:linkname SaveResultToFile C.xsltSaveResultToFile -func SaveResultToFile(file *c.FILE, result libxml2.DocPtr, style StylesheetPtr) c.Int - -//go:linkname SaveResultToFd C.xsltSaveResultToFd -func SaveResultToFd(fd c.Int, result libxml2.DocPtr, style StylesheetPtr) c.Int - -//go:linkname SaveResultToString C.xsltSaveResultToString -func SaveResultToString(doc_txt_ptr **libxml2.Char, doc_txt_len *c.Int, result libxml2.DocPtr, style StylesheetPtr) c.Int - -/* - * XPath interface - */ -//go:linkname XPathCompile C.xsltXPathCompile -func XPathCompile(style StylesheetPtr, str *libxml2.Char) libxml2.XPathCompExprPtr - -//go:linkname XPathCompileFlags C.xsltXPathCompileFlags -func XPathCompileFlags(style StylesheetPtr, str *libxml2.Char, flags c.Int) libxml2.XPathCompExprPtr - -type DebugStatusCodes c.Int - -const ( - DEBUG_NONE DebugStatusCodes = 0 - DEBUG_INIT DebugStatusCodes = 1 - DEBUG_STEP DebugStatusCodes = 2 - DEBUG_STEPOUT DebugStatusCodes = 3 - DEBUG_NEXT DebugStatusCodes = 4 - DEBUG_STOP DebugStatusCodes = 5 - DEBUG_CONT DebugStatusCodes = 6 - DEBUG_RUN DebugStatusCodes = 7 - DEBUG_RUN_RESTART DebugStatusCodes = 8 - DEBUG_QUIT DebugStatusCodes = 9 -) - -// llgo:type C -type HandleDebuggerCallback func(libxml2.NodePtr, libxml2.NodePtr, TemplatePtr, TransformContextPtr) - -// llgo:type C -type AddCallCallback func(TemplatePtr, libxml2.NodePtr) c.Int - -// llgo:type C -type DropCallCallback func() - -//go:linkname GetDebuggerStatus C.xsltGetDebuggerStatus -func GetDebuggerStatus() c.Int diff --git a/py/emoji/emoji.go b/py/emoji/emoji.go new file mode 100644 index 00000000..e4cc8dc5 --- /dev/null +++ b/py/emoji/emoji.go @@ -0,0 +1,178 @@ +package emoji + +import ( + "github.com/goplus/lib/py" + _ "unsafe" +) + +const LLGoPackage = "py.emoji" +// +// Replace emoji names in a string with Unicode codes. +// >>> import emoji +// >>> print(emoji.emojize("Python is fun :thumbsup:", language='alias')) +// Python is fun 👍 +// >>> print(emoji.emojize("Python is fun :thumbs_up:")) +// Python is fun 👍 +// >>> print(emoji.emojize("Python is fun {thumbs_up}", delimiters = ("{", "}"))) +// Python is fun 👍 +// >>> print(emoji.emojize("Python is fun :red_heart:", variant="text_type")) +// Python is fun ❤ +// >>> print(emoji.emojize("Python is fun :red_heart:", variant="emoji_type")) +// Python is fun ❤️ # red heart, not black heart +// +// :param string: String contains emoji names. +// :param delimiters: (optional) Use delimiters other than _DEFAULT_DELIMITER. Each delimiter +// should contain at least one character that is not part of a-zA-Z0-9 and ``_-&.()!?#*+,``. +// See ``emoji.core._EMOJI_NAME_PATTERN`` for the regular expression of unsafe characters. +// :param variant: (optional) Choose variation selector between "base"(None), VS-15 ("text_type") and VS-16 ("emoji_type") +// :param language: Choose language of emoji name: language code 'es', 'de', etc. or 'alias' +// to use English aliases +// :param version: (optional) Max version. If set to an Emoji Version, +// all emoji above this version will be ignored. +// :param handle_version: (optional) Replace the emoji above ``version`` +// instead of ignoring it. handle_version can be either a string or a +// callable; If it is a callable, it's passed the Unicode emoji and the +// data dict from :data:`EMOJI_DATA` and must return a replacement string +// to be used:: +// +// handle_version('\U0001F6EB', { +// 'en' : ':airplane_departure:', +// 'status' : fully_qualified, +// 'E' : 1, +// 'alias' : [':flight_departure:'], +// 'de': ':abflug:', +// 'es': ':avión_despegando:', +// ... +// }) +// +// :raises ValueError: if ``variant`` is neither None, 'text_type' or 'emoji_type' +// +// +// +//go:linkname Emojize py.emojize +func Emojize(string *py.Object, delimiters *py.Object, variant *py.Object, language *py.Object, version *py.Object, handleVersion *py.Object) *py.Object +// +// Replace Unicode emoji in a string with emoji shortcodes. Useful for storage. +// >>> import emoji +// >>> print(emoji.emojize("Python is fun :thumbs_up:")) +// Python is fun 👍 +// >>> print(emoji.demojize("Python is fun 👍")) +// Python is fun :thumbs_up: +// >>> print(emoji.demojize("icode is tricky 😯", delimiters=("__", "__"))) +// Unicode is tricky __hushed_face__ +// +// :param string: String contains Unicode characters. MUST BE UNICODE. +// :param delimiters: (optional) User delimiters other than ``_DEFAULT_DELIMITER`` +// :param language: Choose language of emoji name: language code 'es', 'de', etc. or 'alias' +// to use English aliases +// :param version: (optional) Max version. If set to an Emoji Version, +// all emoji above this version will be removed. +// :param handle_version: (optional) Replace the emoji above ``version`` +// instead of removing it. handle_version can be either a string or a +// callable ``handle_version(emj: str, data: dict) -> str``; If it is +// a callable, it's passed the Unicode emoji and the data dict from +// :data:`EMOJI_DATA` and must return a replacement string to be used. +// The passed data is in the form of:: +// +// handle_version('\U0001F6EB', { +// 'en' : ':airplane_departure:', +// 'status' : fully_qualified, +// 'E' : 1, +// 'alias' : [':flight_departure:'], +// 'de': ':abflug:', +// 'es': ':avión_despegando:', +// ... +// }) +// +// +// +//go:linkname Demojize py.demojize +func Demojize(string *py.Object, delimiters *py.Object, language *py.Object, version *py.Object, handleVersion *py.Object) *py.Object +// +// Find unicode emoji in a string. Yield each emoji as a named tuple +// :class:`Token` ``(chars, EmojiMatch)`` or `:class:`Token` ``(chars, EmojiMatchZWJNonRGI)``. +// If ``non_emoji`` is True, also yield all other characters as +// :class:`Token` ``(char, char)`` . +// +// :param string: String to analyze +// :param non_emoji: If True also yield all non-emoji characters as Token(char, char) +// :param join_emoji: If True, multiple EmojiMatch are merged into a single +// EmojiMatchZWJNonRGI if they are separated only by a ZWJ. +// +// +//go:linkname Analyze py.analyze +func Analyze(string *py.Object, nonEmoji *py.Object, joinEmoji *py.Object) *py.Object +// +// Returns the location and emoji in list of dict format. +// >>> emoji.emoji_list("Hi, I am fine. 😁") +// [{'match_start': 15, 'match_end': 16, 'emoji': '😁'}] +// +// +//go:linkname EmojiList py.emoji_list +func EmojiList(string *py.Object) *py.Object +// Returns distinct list of emojis from the string. +// +//go:linkname DistinctEmojiList py.distinct_emoji_list +func DistinctEmojiList(string *py.Object) *py.Object +// +// Returns the count of emojis in a string. +// +// :param unique: (optional) True if count only unique emojis +// +// +//go:linkname EmojiCount py.emoji_count +func EmojiCount(string *py.Object, unique *py.Object) *py.Object +// +// Replace Unicode emoji in a customizable string. +// +// :param string: String contains Unicode characters. MUST BE UNICODE. +// :param replace: (optional) replace can be either a string or a callable; +// If it is a callable, it's passed the Unicode emoji and the data dict from +// :data:`EMOJI_DATA` and must return a replacement string to be used. +// replace(str, dict) -> str +// :param version: (optional) Max version. If set to an Emoji Version, +// only emoji above this version will be replaced. +// +// +//go:linkname ReplaceEmoji py.replace_emoji +func ReplaceEmoji(string *py.Object, replace *py.Object, version *py.Object) *py.Object +// +// Returns True if the string is a single emoji, and it is "recommended for +// general interchange" by Unicode.org. +// +// +//go:linkname IsEmoji py.is_emoji +func IsEmoji(string *py.Object) *py.Object +// +// Returns True if the string contains only emojis. +// This might not imply that `is_emoji` for all the characters, for example, +// if the string contains variation selectors. +// +// +//go:linkname PurelyEmoji py.purely_emoji +func PurelyEmoji(string *py.Object) *py.Object +// +// Returns the Emoji Version of the emoji. +// +// See https://www.unicode.org/reports/tr51/#Versioning for more information. +// >>> emoji.version("😁") +// 0.6 +// >>> emoji.version(":butterfly:") +// 3 +// +// :param string: An emoji or a text containing an emoji +// :raises ValueError: if ``string`` does not contain an emoji +// +// +//go:linkname Version py.version +func Version(string *py.Object) *py.Object +// Generate dict containing all fully-qualified and component emoji name for a language +// The dict is only generated once per language and then cached in _EMOJI_UNICODE[lang] +// +//go:linkname GetEmojiUnicodeDict py.get_emoji_unicode_dict +func GetEmojiUnicodeDict(lang *py.Object) *py.Object +// Generate dict containing all fully-qualified and component aliases +// The dict is only generated once and then cached in _ALIASES_UNICODE +// +//go:linkname GetAliasesUnicodeDict py.get_aliases_unicode_dict +func GetAliasesUnicodeDict() *py.Object diff --git a/py/emoji/go.mod b/py/emoji/go.mod new file mode 100644 index 00000000..666e935a --- /dev/null +++ b/py/emoji/go.mod @@ -0,0 +1,5 @@ +module github.com/PengPengPeng717/llpkg/py/emoji + +go 1.24.5 + +require github.com/goplus/lib v0.3.0 diff --git a/py/emoji/go.sum b/py/emoji/go.sum new file mode 100644 index 00000000..54e0f00c --- /dev/null +++ b/py/emoji/go.sum @@ -0,0 +1,2 @@ +github.com/goplus/lib v0.3.0 h1:y0ZGb5Q/RikW1oMMB4Di7XIZIpuzh/7mlrR8HNbxXCA= +github.com/goplus/lib v0.3.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/py/emoji/llpkg.cfg b/py/emoji/llpkg.cfg new file mode 100644 index 00000000..9c762f94 --- /dev/null +++ b/py/emoji/llpkg.cfg @@ -0,0 +1,17 @@ +{ + "type": "python", + "upstream": { + "installer": { + "name": "pip" + }, + "package": { + "name": "emoji", + "version": "2.10.0" + } + }, + "llpyg": { + "output_dir": "./test", + "mod_name": "github.com/PengPengPeng717/llpkg/py/emoji", + "mod_depth": 1 + } +} diff --git a/py/emoji/llpyg.cfg b/py/emoji/llpyg.cfg new file mode 100644 index 00000000..8711c91d --- /dev/null +++ b/py/emoji/llpyg.cfg @@ -0,0 +1,7 @@ +{ + "name": "emoji", + "libName": "emoji", + "modules": [ + "emoji" + ] +} diff --git a/py/numpy/_demo/basic_test/main.go b/py/numpy/_demo/basic_test/main.go new file mode 100644 index 00000000..c70be070 --- /dev/null +++ b/py/numpy/_demo/basic_test/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + + "numpy" + + "github.com/goplus/lib/py" + "github.com/goplus/lib/py/std" +) + +func main() { + fmt.Println("=== NumPy Basic Test Demo ===") + + // 测试1: 创建数组 + fmt.Println("\n1. 创建数组测试:") + + // 创建一个简单的数组 + arr1 := py.List(1.0, 2.0, 3.0, 4.0, 5.0) + arr2 := py.List(2.0, 4.0, 6.0, 8.0, 10.0) + + // 使用numpy进行数组运算 + arr3 := numpy.Add(arr1, arr2) + std.Print(py.Str("result:"), arr3) + + fmt.Println("\n=== 测试完成 ===") +} diff --git a/py/numpy/go.mod b/py/numpy/go.mod new file mode 100644 index 00000000..a772d954 --- /dev/null +++ b/py/numpy/go.mod @@ -0,0 +1,5 @@ +module github.com/PengPengPeng717/llpkg/py/numpy + +go 1.24.5 + +require github.com/goplus/lib v0.3.0 diff --git a/py/numpy/go.sum b/py/numpy/go.sum new file mode 100644 index 00000000..54e0f00c --- /dev/null +++ b/py/numpy/go.sum @@ -0,0 +1,2 @@ +github.com/goplus/lib v0.3.0 h1:y0ZGb5Q/RikW1oMMB4Di7XIZIpuzh/7mlrR8HNbxXCA= +github.com/goplus/lib v0.3.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/py/numpy/llpkg.cfg b/py/numpy/llpkg.cfg new file mode 100644 index 00000000..4d56789c --- /dev/null +++ b/py/numpy/llpkg.cfg @@ -0,0 +1,17 @@ +{ + "type": "python", + "upstream": { + "installer": { + "name": "pip" + }, + "package": { + "name": "numpy", + "version": "1.26.4" + } + }, + "llpyg": { + "output_dir": "./test", + "mod_name": "github.com/PengPengPeng717/llpkg/py/numpy", + "mod_depth": 1 + } +} \ No newline at end of file diff --git a/py/numpy/llpyg.cfg b/py/numpy/llpyg.cfg new file mode 100644 index 00000000..698ff6b4 --- /dev/null +++ b/py/numpy/llpyg.cfg @@ -0,0 +1,7 @@ +{ + "name": "numpy", + "libName": "numpy", + "modules": [ + "numpy" + ] +} diff --git a/py/numpy/numpy.go b/py/numpy/numpy.go new file mode 100644 index 00000000..bf94afe5 --- /dev/null +++ b/py/numpy/numpy.go @@ -0,0 +1,27293 @@ +package numpy + +import ( + "github.com/goplus/lib/py" + _ "unsafe" +) + +const LLGoPackage = "py.numpy" +// +// Show libraries and system information on which NumPy was built +// and is being used +// +// Parameters +// ---------- +// mode : {`'stdout'`, `'dicts'`}, optional. +// Indicates how to display the config information. +// `'stdout'` prints to console, `'dicts'` returns a dictionary +// of the configuration. +// +// Returns +// ------- +// out : {`dict`, `None`} +// If mode is `'dicts'`, a dict is returned, else None +// +// See Also +// -------- +// get_include : Returns the directory containing NumPy C +// header files. +// +// Notes +// ----- +// 1. The `'stdout'` mode will give more readable +// output if ``pyyaml`` is installed +// +// +// +//go:linkname ShowConfig py.show_config +func ShowConfig(mode *py.Object) *py.Object +// nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, order="K", casting="safe", buffersize=0) +// +// Create nditers for use in nested loops +// +// Create a tuple of `nditer` objects which iterate in nested loops over +// different axes of the op argument. The first iterator is used in the +// outermost loop, the last in the innermost loop. Advancing one will change +// the subsequent iterators to point at its new element. +// +// Parameters +// ---------- +// op : ndarray or sequence of array_like +// The array(s) to iterate over. +// +// axes : list of list of int +// Each item is used as an "op_axes" argument to an nditer +// +// flags, op_flags, op_dtypes, order, casting, buffersize (optional) +// See `nditer` parameters of the same name +// +// Returns +// ------- +// iters : tuple of nditer +// An nditer for each item in `axes`, outermost first +// +// See Also +// -------- +// nditer +// +// Examples +// -------- +// +// Basic usage. Note how y is the "flattened" version of +// [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified +// the first iter's axes as [1] +// +// >>> a = np.arange(12).reshape(2, 3, 2) +// >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) +// >>> for x in i: +// ... print(i.multi_index) +// ... for y in j: +// ... print('', j.multi_index, y) +// (0,) +// (0, 0) 0 +// (0, 1) 1 +// (1, 0) 6 +// (1, 1) 7 +// (1,) +// (0, 0) 2 +// (0, 1) 3 +// (1, 0) 8 +// (1, 1) 9 +// (2,) +// (0, 0) 4 +// (0, 1) 5 +// (1, 0) 10 +// (1, 1) 11 +// +//go:linkname NestedIters py.nested_iters +func NestedIters(op *py.Object, axes *py.Object, flags *py.Object, opFlags *py.Object, opDtypes *py.Object, order *py.Object, casting *py.Object, buffersize *py.Object) *py.Object +// arange([start,] stop[, step,], dtype=None, *, like=None) +// +// Return evenly spaced values within a given interval. +// +// ``arange`` can be called with a varying number of positional arguments: +// +// * ``arange(stop)``: Values are generated within the half-open interval +// ``[0, stop)`` (in other words, the interval including `start` but +// excluding `stop`). +// * ``arange(start, stop)``: Values are generated within the half-open +// interval ``[start, stop)``. +// * ``arange(start, stop, step)`` Values are generated within the half-open +// interval ``[start, stop)``, with spacing between values given by +// ``step``. +// +// For integer arguments the function is roughly equivalent to the Python +// built-in :py:class:`range`, but returns an ndarray rather than a ``range`` +// instance. +// +// When using a non-integer step, such as 0.1, it is often better to use +// `numpy.linspace`. +// +// See the Warning sections below for more information. +// +// Parameters +// ---------- +// start : integer or real, optional +// Start of interval. The interval includes this value. The default +// start value is 0. +// stop : integer or real +// End of interval. The interval does not include this value, except +// in some cases where `step` is not an integer and floating point +// round-off affects the length of `out`. +// step : integer or real, optional +// Spacing between values. For any output `out`, this is the distance +// between two adjacent values, ``out[i+1] - out[i]``. The default +// step size is 1. If `step` is specified as a position argument, +// `start` must also be given. +// dtype : dtype, optional +// The type of the output array. If `dtype` is not given, infer the data +// type from the other input arguments. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// arange : ndarray +// Array of evenly spaced values. +// +// For floating point arguments, the length of the result is +// ``ceil((stop - start)/step)``. Because of floating point overflow, +// this rule may result in the last element of `out` being greater +// than `stop`. +// +// Warnings +// -------- +// The length of the output might not be numerically stable. +// +// Another stability issue is due to the internal implementation of +// `numpy.arange`. +// The actual step value used to populate the array is +// ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss +// can occur here, due to casting or due to using floating points when +// `start` is much larger than `step`. This can lead to unexpected +// behaviour. For example:: +// +// >>> np.arange(0, 5, 0.5, dtype=int) +// array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) +// >>> np.arange(-3, 3, 0.5, dtype=int) +// array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) +// +// In such cases, the use of `numpy.linspace` should be preferred. +// +// The built-in :py:class:`range` generates :std:doc:`Python built-in integers +// that have arbitrary size `, while `numpy.arange` +// produces `numpy.int32` or `numpy.int64` numbers. This may result in +// incorrect results for large integer values:: +// +// >>> power = 40 +// >>> modulo = 10000 +// >>> x1 = [(n ** power) % modulo for n in range(8)] +// >>> x2 = [(n ** power) % modulo for n in np.arange(8)] +// >>> print(x1) +// [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct +// >>> print(x2) +// [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect +// +// See Also +// -------- +// numpy.linspace : Evenly spaced numbers with careful handling of endpoints. +// numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. +// numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. +// :ref:`how-to-partition` +// +// Examples +// -------- +// >>> np.arange(3) +// array([0, 1, 2]) +// >>> np.arange(3.0) +// array([ 0., 1., 2.]) +// >>> np.arange(3,7) +// array([3, 4, 5, 6]) +// >>> np.arange(3,7,2) +// array([3, 5]) +// +//go:linkname Arange py.arange +func Arange(start *py.Object, stop *py.Object, step *py.Object, dtype *py.Object) *py.Object +// array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, +// like=None) +// +// Create an array. +// +// Parameters +// ---------- +// object : array_like +// An array, any object exposing the array interface, an object whose +// ``__array__`` method returns an array, or any (nested) sequence. +// If object is a scalar, a 0-dimensional array containing object is +// returned. +// dtype : data-type, optional +// The desired data-type for the array. If not given, NumPy will try to use +// a default ``dtype`` that can represent the values (by applying promotion +// rules when necessary.) +// copy : bool, optional +// If true (default), then the object is copied. Otherwise, a copy will +// only be made if ``__array__`` returns a copy, if obj is a nested +// sequence, or if a copy is needed to satisfy any of the other +// requirements (``dtype``, ``order``, etc.). +// order : {'K', 'A', 'C', 'F'}, optional +// Specify the memory layout of the array. If object is not an array, the +// newly created array will be in C order (row major) unless 'F' is +// specified, in which case it will be in Fortran order (column major). +// If object is an array the following holds. +// +// ===== ========= =================================================== +// order no copy copy=True +// ===== ========= =================================================== +// 'K' unchanged F & C order preserved, otherwise most similar order +// 'A' unchanged F order if input is F and not C, otherwise C order +// 'C' C order C order +// 'F' F order F order +// ===== ========= =================================================== +// +// When ``copy=False`` and a copy is made for other reasons, the result is +// the same as if ``copy=True``, with some exceptions for 'A', see the +// Notes section. The default order is 'K'. +// subok : bool, optional +// If True, then sub-classes will be passed-through, otherwise +// the returned array will be forced to be a base-class array (default). +// ndmin : int, optional +// Specifies the minimum number of dimensions that the resulting +// array should have. Ones will be prepended to the shape as +// needed to meet this requirement. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// An array object satisfying the specified requirements. +// +// See Also +// -------- +// empty_like : Return an empty array with shape and type of input. +// ones_like : Return an array of ones with shape and type of input. +// zeros_like : Return an array of zeros with shape and type of input. +// full_like : Return a new array with shape of input filled with value. +// empty : Return a new uninitialized array. +// ones : Return a new array setting values to one. +// zeros : Return a new array setting values to zero. +// full : Return a new array of given shape filled with value. +// +// +// Notes +// ----- +// When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order, +// and a copy is forced by a change in dtype, then the order of the result is +// not necessarily 'C' as expected. This is likely a bug. +// +// Examples +// -------- +// >>> np.array([1, 2, 3]) +// array([1, 2, 3]) +// +// Upcasting: +// +// >>> np.array([1, 2, 3.0]) +// array([ 1., 2., 3.]) +// +// More than one dimension: +// +// >>> np.array([[1, 2], [3, 4]]) +// array([[1, 2], +// [3, 4]]) +// +// Minimum dimensions 2: +// +// >>> np.array([1, 2, 3], ndmin=2) +// array([[1, 2, 3]]) +// +// Type provided: +// +// >>> np.array([1, 2, 3], dtype=complex) +// array([ 1.+0.j, 2.+0.j, 3.+0.j]) +// +// Data-type consisting of more than one element: +// +// >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] +// array([1, 3]) +// +// Creating an array from sub-classes: +// +// >>> np.array(np.mat('1 2; 3 4')) +// array([[1, 2], +// [3, 4]]) +// +// >>> np.array(np.mat('1 2; 3 4'), subok=True) +// matrix([[1, 2], +// [3, 4]]) +// +//go:linkname Array py.array +func Array(object *py.Object, dtype *py.Object) *py.Object +// asarray(a, dtype=None, order=None, *, like=None) +// +// Convert the input to an array. +// +// Parameters +// ---------- +// a : array_like +// Input data, in any form that can be converted to an array. This +// includes lists, lists of tuples, tuples, tuples of tuples, tuples +// of lists and ndarrays. +// dtype : data-type, optional +// By default, the data-type is inferred from the input data. +// order : {'C', 'F', 'A', 'K'}, optional +// Memory layout. 'A' and 'K' depend on the order of input array a. +// 'C' row-major (C-style), +// 'F' column-major (Fortran-style) memory representation. +// 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise +// 'K' (keep) preserve input order +// Defaults to 'K'. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array interpretation of `a`. No copy is performed if the input +// is already an ndarray with matching dtype and order. If `a` is a +// subclass of ndarray, a base class ndarray is returned. +// +// See Also +// -------- +// asanyarray : Similar function which passes through subclasses. +// ascontiguousarray : Convert input to a contiguous array. +// asfarray : Convert input to a floating point ndarray. +// asfortranarray : Convert input to an ndarray with column-major +// memory order. +// asarray_chkfinite : Similar function which checks input for NaNs and Infs. +// fromiter : Create an array from an iterator. +// fromfunction : Construct an array by executing a function on grid +// positions. +// +// Examples +// -------- +// Convert a list into an array: +// +// >>> a = [1, 2] +// >>> np.asarray(a) +// array([1, 2]) +// +// Existing arrays are not copied: +// +// >>> a = np.array([1, 2]) +// >>> np.asarray(a) is a +// True +// +// If `dtype` is set, array is copied only if dtype does not match: +// +// >>> a = np.array([1, 2], dtype=np.float32) +// >>> np.asarray(a, dtype=np.float32) is a +// True +// >>> np.asarray(a, dtype=np.float64) is a +// False +// +// Contrary to `asanyarray`, ndarray subclasses are not passed through: +// +// >>> issubclass(np.recarray, np.ndarray) +// True +// >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) +// >>> np.asarray(a) is a +// False +// >>> np.asanyarray(a) is a +// True +// +//go:linkname Asarray py.asarray +func Asarray(a *py.Object, dtype *py.Object, order *py.Object) *py.Object +// asanyarray(a, dtype=None, order=None, *, like=None) +// +// Convert the input to an ndarray, but pass ndarray subclasses through. +// +// Parameters +// ---------- +// a : array_like +// Input data, in any form that can be converted to an array. This +// includes scalars, lists, lists of tuples, tuples, tuples of tuples, +// tuples of lists, and ndarrays. +// dtype : data-type, optional +// By default, the data-type is inferred from the input data. +// order : {'C', 'F', 'A', 'K'}, optional +// Memory layout. 'A' and 'K' depend on the order of input array a. +// 'C' row-major (C-style), +// 'F' column-major (Fortran-style) memory representation. +// 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise +// 'K' (keep) preserve input order +// Defaults to 'C'. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray or an ndarray subclass +// Array interpretation of `a`. If `a` is an ndarray or a subclass +// of ndarray, it is returned as-is and no copy is performed. +// +// See Also +// -------- +// asarray : Similar function which always returns ndarrays. +// ascontiguousarray : Convert input to a contiguous array. +// asfarray : Convert input to a floating point ndarray. +// asfortranarray : Convert input to an ndarray with column-major +// memory order. +// asarray_chkfinite : Similar function which checks input for NaNs and +// Infs. +// fromiter : Create an array from an iterator. +// fromfunction : Construct an array by executing a function on grid +// positions. +// +// Examples +// -------- +// Convert a list into an array: +// +// >>> a = [1, 2] +// >>> np.asanyarray(a) +// array([1, 2]) +// +// Instances of `ndarray` subclasses are passed through as-is: +// +// >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) +// >>> np.asanyarray(a) is a +// True +// +//go:linkname Asanyarray py.asanyarray +func Asanyarray(a *py.Object, dtype *py.Object, order *py.Object) *py.Object +// ascontiguousarray(a, dtype=None, *, like=None) +// +// Return a contiguous array (ndim >= 1) in memory (C order). +// +// Parameters +// ---------- +// a : array_like +// Input array. +// dtype : str or dtype object, optional +// Data-type of returned array. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Contiguous array of same shape and content as `a`, with type `dtype` +// if specified. +// +// See Also +// -------- +// asfortranarray : Convert input to an ndarray with column-major +// memory order. +// require : Return an ndarray that satisfies requirements. +// ndarray.flags : Information about the memory layout of the array. +// +// Examples +// -------- +// Starting with a Fortran-contiguous array: +// +// >>> x = np.ones((2, 3), order='F') +// >>> x.flags['F_CONTIGUOUS'] +// True +// +// Calling ``ascontiguousarray`` makes a C-contiguous copy: +// +// >>> y = np.ascontiguousarray(x) +// >>> y.flags['C_CONTIGUOUS'] +// True +// >>> np.may_share_memory(x, y) +// False +// +// Now, starting with a C-contiguous array: +// +// >>> x = np.ones((2, 3), order='C') +// >>> x.flags['C_CONTIGUOUS'] +// True +// +// Then, calling ``ascontiguousarray`` returns the same object: +// +// >>> y = np.ascontiguousarray(x) +// >>> x is y +// True +// +// Note: This function returns an array with at least one-dimension (1-d) +// so it will not preserve 0-d arrays. +// +//go:linkname Ascontiguousarray py.ascontiguousarray +func Ascontiguousarray(a *py.Object, dtype *py.Object) *py.Object +// asfortranarray(a, dtype=None, *, like=None) +// +// Return an array (ndim >= 1) laid out in Fortran order in memory. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// dtype : str or dtype object, optional +// By default, the data-type is inferred from the input data. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// The input `a` in Fortran, or column-major, order. +// +// See Also +// -------- +// ascontiguousarray : Convert input to a contiguous (C order) array. +// asanyarray : Convert input to an ndarray with either row or +// column-major memory order. +// require : Return an ndarray that satisfies requirements. +// ndarray.flags : Information about the memory layout of the array. +// +// Examples +// -------- +// Starting with a C-contiguous array: +// +// >>> x = np.ones((2, 3), order='C') +// >>> x.flags['C_CONTIGUOUS'] +// True +// +// Calling ``asfortranarray`` makes a Fortran-contiguous copy: +// +// >>> y = np.asfortranarray(x) +// >>> y.flags['F_CONTIGUOUS'] +// True +// >>> np.may_share_memory(x, y) +// False +// +// Now, starting with a Fortran-contiguous array: +// +// >>> x = np.ones((2, 3), order='F') +// >>> x.flags['F_CONTIGUOUS'] +// True +// +// Then, calling ``asfortranarray`` returns the same object: +// +// >>> y = np.asfortranarray(x) +// >>> x is y +// True +// +// Note: This function returns an array with at least one-dimension (1-d) +// so it will not preserve 0-d arrays. +// +//go:linkname Asfortranarray py.asfortranarray +func Asfortranarray(a *py.Object, dtype *py.Object) *py.Object +// zeros(shape, dtype=float, order='C', *, like=None) +// +// Return a new array of given shape and type, filled with zeros. +// +// Parameters +// ---------- +// shape : int or tuple of ints +// Shape of the new array, e.g., ``(2, 3)`` or ``2``. +// dtype : data-type, optional +// The desired data-type for the array, e.g., `numpy.int8`. Default is +// `numpy.float64`. +// order : {'C', 'F'}, optional, default: 'C' +// Whether to store multi-dimensional data in row-major +// (C-style) or column-major (Fortran-style) order in +// memory. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array of zeros with the given shape, dtype, and order. +// +// See Also +// -------- +// zeros_like : Return an array of zeros with shape and type of input. +// empty : Return a new uninitialized array. +// ones : Return a new array setting values to one. +// full : Return a new array of given shape filled with value. +// +// Examples +// -------- +// >>> np.zeros(5) +// array([ 0., 0., 0., 0., 0.]) +// +// >>> np.zeros((5,), dtype=int) +// array([0, 0, 0, 0, 0]) +// +// >>> np.zeros((2, 1)) +// array([[ 0.], +// [ 0.]]) +// +// >>> s = (2,2) +// >>> np.zeros(s) +// array([[ 0., 0.], +// [ 0., 0.]]) +// +// >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype +// array([(0, 0), (0, 0)], +// dtype=[('x', '>> np.count_nonzero(np.eye(4)) +// 4 +// >>> a = np.array([[0, 1, 7, 0], +// ... [3, 0, 2, 19]]) +// >>> np.count_nonzero(a) +// 5 +// >>> np.count_nonzero(a, axis=0) +// array([1, 1, 2, 1]) +// >>> np.count_nonzero(a, axis=1) +// array([2, 3]) +// >>> np.count_nonzero(a, axis=1, keepdims=True) +// array([[2], +// [3]]) +// +// +//go:linkname CountNonzero py.count_nonzero +func CountNonzero(a *py.Object, axis *py.Object) *py.Object +// empty(shape, dtype=float, order='C', *, like=None) +// +// Return a new array of given shape and type, without initializing entries. +// +// Parameters +// ---------- +// shape : int or tuple of int +// Shape of the empty array, e.g., ``(2, 3)`` or ``2``. +// dtype : data-type, optional +// Desired output data-type for the array, e.g, `numpy.int8`. Default is +// `numpy.float64`. +// order : {'C', 'F'}, optional, default: 'C' +// Whether to store multi-dimensional data in row-major +// (C-style) or column-major (Fortran-style) order in +// memory. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array of uninitialized (arbitrary) data of the given shape, dtype, and +// order. Object arrays will be initialized to None. +// +// See Also +// -------- +// empty_like : Return an empty array with shape and type of input. +// ones : Return a new array setting values to one. +// zeros : Return a new array setting values to zero. +// full : Return a new array of given shape filled with value. +// +// +// Notes +// ----- +// `empty`, unlike `zeros`, does not set the array values to zero, +// and may therefore be marginally faster. On the other hand, it requires +// the user to manually set all the values in the array, and should be +// used with caution. +// +// Examples +// -------- +// >>> np.empty([2, 2]) +// array([[ -9.74499359e+001, 6.69583040e-309], +// [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized +// +// >>> np.empty([2, 2], dtype=int) +// array([[-1073741821, -1067949133], +// [ 496041986, 19249760]]) #uninitialized +// +//go:linkname Empty py.empty +func Empty(shape *py.Object, dtype *py.Object, order *py.Object) *py.Object +// fromstring(string, dtype=float, count=-1, *, sep, like=None) +// +// A new 1-D array initialized from text data in a string. +// +// Parameters +// ---------- +// string : str +// A string containing the data. +// dtype : data-type, optional +// The data type of the array; default: float. For binary input data, +// the data must be in exactly this format. Most builtin numeric types are +// supported and extension types may be supported. +// +// .. versionadded:: 1.18.0 +// Complex dtypes. +// +// count : int, optional +// Read this number of `dtype` elements from the data. If this is +// negative (the default), the count will be determined from the +// length of the data. +// sep : str, optional +// The string separating numbers in the data; extra whitespace between +// elements is also ignored. +// +// .. deprecated:: 1.14 +// Passing ``sep=''``, the default, is deprecated since it will +// trigger the deprecated binary mode of this function. This mode +// interprets `string` as binary bytes, rather than ASCII text with +// decimal numbers, an operation which is better spelt +// ``frombuffer(string, dtype, count)``. If `string` contains unicode +// text, the binary mode of `fromstring` will first encode it into +// bytes using utf-8, which will not produce sane results. +// +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// arr : ndarray +// The constructed array. +// +// Raises +// ------ +// ValueError +// If the string is not the correct size to satisfy the requested +// `dtype` and `count`. +// +// See Also +// -------- +// frombuffer, fromfile, fromiter +// +// Examples +// -------- +// >>> np.fromstring('1 2', dtype=int, sep=' ') +// array([1, 2]) +// >>> np.fromstring('1, 2', dtype=int, sep=',') +// array([1, 2]) +// +//go:linkname Fromstring py.fromstring +func Fromstring(string *py.Object, dtype *py.Object, count *py.Object) *py.Object +// fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) +// +// Construct an array from data in a text or binary file. +// +// A highly efficient way of reading binary data with a known data-type, +// as well as parsing simply formatted text files. Data written using the +// `tofile` method can be read using this function. +// +// Parameters +// ---------- +// file : file or str or Path +// Open file object or filename. +// +// .. versionchanged:: 1.17.0 +// `pathlib.Path` objects are now accepted. +// +// dtype : data-type +// Data type of the returned array. +// For binary files, it is used to determine the size and byte-order +// of the items in the file. +// Most builtin numeric types are supported and extension types may be supported. +// +// .. versionadded:: 1.18.0 +// Complex dtypes. +// +// count : int +// Number of items to read. ``-1`` means all items (i.e., the complete +// file). +// sep : str +// Separator between items if file is a text file. +// Empty ("") separator means the file should be treated as binary. +// Spaces (" ") in the separator match zero or more whitespace characters. +// A separator consisting only of spaces must match at least one +// whitespace. +// offset : int +// The offset (in bytes) from the file's current position. Defaults to 0. +// Only permitted for binary files. +// +// .. versionadded:: 1.17.0 +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// See also +// -------- +// load, save +// ndarray.tofile +// loadtxt : More flexible way of loading data from a text file. +// +// Notes +// ----- +// Do not rely on the combination of `tofile` and `fromfile` for +// data storage, as the binary files generated are not platform +// independent. In particular, no byte-order or data-type information is +// saved. Data can be stored in the platform independent ``.npy`` format +// using `save` and `load` instead. +// +// Examples +// -------- +// Construct an ndarray: +// +// >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), +// ... ('temp', float)]) +// >>> x = np.zeros((1,), dtype=dt) +// >>> x['time']['min'] = 10; x['temp'] = 98.25 +// >>> x +// array([((10, 0), 98.25)], +// dtype=[('time', [('min', '>> import tempfile +// >>> fname = tempfile.mkstemp()[1] +// >>> x.tofile(fname) +// +// Read the raw data from disk: +// +// >>> np.fromfile(fname, dtype=dt) +// array([((10, 0), 98.25)], +// dtype=[('time', [('min', '>> np.save(fname, x) +// >>> np.load(fname + '.npy') +// array([((10, 0), 98.25)], +// dtype=[('time', [('min', '>> dt = np.dtype(int) +// >>> dt = dt.newbyteorder('>') +// >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP +// +// The data of the resulting array will not be byteswapped, but will be +// interpreted correctly. +// +// This function creates a view into the original object. This should be safe +// in general, but it may make sense to copy the result when the original +// object is mutable or untrusted. +// +// Examples +// -------- +// >>> s = b'hello world' +// >>> np.frombuffer(s, dtype='S1', count=5, offset=6) +// array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') +// +// >>> np.frombuffer(b'\x01\x02', dtype=np.uint8) +// array([1, 2], dtype=uint8) +// >>> np.frombuffer(b'\x01\x02\x03\x04\x05', dtype=np.uint8, count=3) +// array([1, 2, 3], dtype=uint8) +// +//go:linkname Frombuffer py.frombuffer +func Frombuffer(buffer *py.Object, dtype *py.Object, count *py.Object, offset *py.Object) *py.Object +// from_dlpack(x, /) +// +// Create a NumPy array from an object implementing the ``__dlpack__`` +// protocol. Generally, the returned NumPy array is a read-only view +// of the input object. See [1]_ and [2]_ for more details. +// +// Parameters +// ---------- +// x : object +// A Python object that implements the ``__dlpack__`` and +// ``__dlpack_device__`` methods. +// +// Returns +// ------- +// out : ndarray +// +// References +// ---------- +// .. [1] Array API documentation, +// https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack +// +// .. [2] Python specification for DLPack, +// https://dmlc.github.io/dlpack/latest/python_spec.html +// +// Examples +// -------- +// >>> import torch +// >>> x = torch.arange(10) +// >>> # create a view of the torch tensor "x" in NumPy +// >>> y = np.from_dlpack(x) +// +//go:linkname FromDlpack py.from_dlpack +func FromDlpack(x *py.Object) *py.Object +// +// where(condition, [x, y], /) +// +// Return elements chosen from `x` or `y` depending on `condition`. +// +// .. note:: +// When only `condition` is provided, this function is a shorthand for +// ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be +// preferred, as it behaves correctly for subclasses. The rest of this +// documentation covers only the case where all three arguments are +// provided. +// +// Parameters +// ---------- +// condition : array_like, bool +// Where True, yield `x`, otherwise yield `y`. +// x, y : array_like +// Values from which to choose. `x`, `y` and `condition` need to be +// broadcastable to some shape. +// +// Returns +// ------- +// out : ndarray +// An array with elements from `x` where `condition` is True, and elements +// from `y` elsewhere. +// +// See Also +// -------- +// choose +// nonzero : The function that is called when x and y are omitted +// +// Notes +// ----- +// If all the arrays are 1-D, `where` is equivalent to:: +// +// [xv if c else yv +// for c, xv, yv in zip(condition, x, y)] +// +// Examples +// -------- +// >>> a = np.arange(10) +// >>> a +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// >>> np.where(a < 5, a, 10*a) +// array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) +// +// This can be used on multidimensional arrays too: +// +// >>> np.where([[True, False], [True, True]], +// ... [[1, 2], [3, 4]], +// ... [[9, 8], [7, 6]]) +// array([[1, 8], +// [3, 4]]) +// +// The shapes of x, y, and the condition are broadcast together: +// +// >>> x, y = np.ogrid[:3, :4] +// >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast +// array([[10, 0, 0, 0], +// [10, 11, 1, 1], +// [10, 11, 12, 2]]) +// +// >>> a = np.array([[0, 1, 2], +// ... [0, 2, 4], +// ... [0, 3, 6]]) +// >>> np.where(a < 4, a, -1) # -1 is broadcast +// array([[ 0, 1, 2], +// [ 0, 2, -1], +// [ 0, 3, -1]]) +// +// +//go:linkname Where py.where +func Where(condition *py.Object, x *py.Object, y *py.Object) *py.Object +// +// Find the indices of array elements that are non-zero, grouped by element. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// +// Returns +// ------- +// index_array : (N, a.ndim) ndarray +// Indices of elements that are non-zero. Indices are grouped by element. +// This array will have shape ``(N, a.ndim)`` where ``N`` is the number of +// non-zero items. +// +// See Also +// -------- +// where, nonzero +// +// Notes +// ----- +// ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, +// but produces a result of the correct shape for a 0D array. +// +// The output of ``argwhere`` is not suitable for indexing arrays. +// For this purpose use ``nonzero(a)`` instead. +// +// Examples +// -------- +// >>> x = np.arange(6).reshape(2,3) +// >>> x +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.argwhere(x>1) +// array([[0, 2], +// [1, 0], +// [1, 1], +// [1, 2]]) +// +// +// +//go:linkname Argwhere py.argwhere +func Argwhere(a *py.Object) *py.Object +// +// copyto(dst, src, casting='same_kind', where=True) +// +// Copies values from one array to another, broadcasting as necessary. +// +// Raises a TypeError if the `casting` rule is violated, and if +// `where` is provided, it selects which elements to copy. +// +// .. versionadded:: 1.7.0 +// +// Parameters +// ---------- +// dst : ndarray +// The array into which values are copied. +// src : array_like +// The array from which values are copied. +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur when copying. +// +// * 'no' means the data types should not be cast at all. +// * 'equiv' means only byte-order changes are allowed. +// * 'safe' means only casts which can preserve values are allowed. +// * 'same_kind' means only safe casts or casts within a kind, +// like float64 to float32, are allowed. +// * 'unsafe' means any data conversions may be done. +// where : array_like of bool, optional +// A boolean array which is broadcasted to match the dimensions +// of `dst`, and selects elements to copy from `src` to `dst` +// wherever it contains the value True. +// +// Examples +// -------- +// >>> A = np.array([4, 5, 6]) +// >>> B = [1, 2, 3] +// >>> np.copyto(A, B) +// >>> A +// array([1, 2, 3]) +// +// >>> A = np.array([[1, 2, 3], [4, 5, 6]]) +// >>> B = [[4, 5, 6], [7, 8, 9]] +// >>> np.copyto(A, B) +// >>> A +// array([[4, 5, 6], +// [7, 8, 9]]) +// +// +// +//go:linkname Copyto py.copyto +func Copyto(dst *py.Object, src *py.Object, casting *py.Object, where *py.Object) *py.Object +// +// concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind") +// +// Join a sequence of arrays along an existing axis. +// +// Parameters +// ---------- +// a1, a2, ... : sequence of array_like +// The arrays must have the same shape, except in the dimension +// corresponding to `axis` (the first, by default). +// axis : int, optional +// The axis along which the arrays will be joined. If axis is None, +// arrays are flattened before use. Default is 0. +// out : ndarray, optional +// If provided, the destination to place the result. The shape must be +// correct, matching that of what concatenate would have returned if no +// out argument were specified. +// dtype : str or dtype +// If provided, the destination array will have this dtype. Cannot be +// provided together with `out`. +// +// .. versionadded:: 1.20.0 +// +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Defaults to 'same_kind'. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// res : ndarray +// The concatenated array. +// +// See Also +// -------- +// ma.concatenate : Concatenate function that preserves input masks. +// array_split : Split an array into multiple sub-arrays of equal or +// near-equal size. +// split : Split array into a list of multiple sub-arrays of equal size. +// hsplit : Split array into multiple sub-arrays horizontally (column wise). +// vsplit : Split array into multiple sub-arrays vertically (row wise). +// dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). +// stack : Stack a sequence of arrays along a new axis. +// block : Assemble arrays from blocks. +// hstack : Stack arrays in sequence horizontally (column wise). +// vstack : Stack arrays in sequence vertically (row wise). +// dstack : Stack arrays in sequence depth wise (along third dimension). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// +// Notes +// ----- +// When one or more of the arrays to be concatenated is a MaskedArray, +// this function will return a MaskedArray object instead of an ndarray, +// but the input masks are *not* preserved. In cases where a MaskedArray +// is expected as input, use the ma.concatenate function from the masked +// array module instead. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4]]) +// >>> b = np.array([[5, 6]]) +// >>> np.concatenate((a, b), axis=0) +// array([[1, 2], +// [3, 4], +// [5, 6]]) +// >>> np.concatenate((a, b.T), axis=1) +// array([[1, 2, 5], +// [3, 4, 6]]) +// >>> np.concatenate((a, b), axis=None) +// array([1, 2, 3, 4, 5, 6]) +// +// This function will not preserve masking of MaskedArray inputs. +// +// >>> a = np.ma.arange(3) +// >>> a[1] = np.ma.masked +// >>> b = np.arange(2, 5) +// >>> a +// masked_array(data=[0, --, 2], +// mask=[False, True, False], +// fill_value=999999) +// >>> b +// array([2, 3, 4]) +// >>> np.concatenate([a, b]) +// masked_array(data=[0, 1, 2, 2, 3, 4], +// mask=False, +// fill_value=999999) +// >>> np.ma.concatenate([a, b]) +// masked_array(data=[0, --, 2, 2, 3, 4], +// mask=[False, True, False, False, False, False], +// fill_value=999999) +// +// +// +//go:linkname Concatenate py.concatenate +func Concatenate(list0 *py.Object, axis *py.Object, out *py.Object, dtype *py.Object, casting *py.Object) *py.Object +// fastCopyAndTranspose(a) +// +// .. deprecated:: 1.24 +// +// fastCopyAndTranspose is deprecated and will be removed. Use the copy and +// transpose methods instead, e.g. ``arr.T.copy()`` +// +//go:linkname FastCopyAndTranspose py.fastCopyAndTranspose +func FastCopyAndTranspose(a *py.Object) *py.Object +// +// lexsort(keys, axis=-1) +// +// Perform an indirect stable sort using a sequence of keys. +// +// Given multiple sorting keys, which can be interpreted as columns in a +// spreadsheet, lexsort returns an array of integer indices that describes +// the sort order by multiple columns. The last key in the sequence is used +// for the primary sort order, the second-to-last key for the secondary sort +// order, and so on. The keys argument must be a sequence of objects that +// can be converted to arrays of the same shape. If a 2D array is provided +// for the keys argument, its rows are interpreted as the sorting keys and +// sorting is according to the last row, second last row etc. +// +// Parameters +// ---------- +// keys : (k, N) array or tuple containing k (N,)-shaped sequences +// The `k` different "columns" to be sorted. The last column (or row if +// `keys` is a 2D array) is the primary sort key. +// axis : int, optional +// Axis to be indirectly sorted. By default, sort over the last axis. +// +// Returns +// ------- +// indices : (N,) ndarray of ints +// Array of indices that sort the keys along the specified axis. +// +// See Also +// -------- +// argsort : Indirect sort. +// ndarray.sort : In-place sort. +// sort : Return a sorted copy of an array. +// +// Examples +// -------- +// Sort names: first by surname, then by name. +// +// >>> surnames = ('Hertz', 'Galilei', 'Hertz') +// >>> first_names = ('Heinrich', 'Galileo', 'Gustav') +// >>> ind = np.lexsort((first_names, surnames)) +// >>> ind +// array([1, 2, 0]) +// +// >>> [surnames[i] + ", " + first_names[i] for i in ind] +// ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] +// +// Sort two columns of numbers: +// +// >>> a = [1,5,1,4,3,4,4] # First column +// >>> b = [9,4,0,4,0,2,1] # Second column +// >>> ind = np.lexsort((b,a)) # Sort by a, then by b +// >>> ind +// array([2, 0, 4, 6, 5, 3, 1]) +// +// >>> [(a[i],b[i]) for i in ind] +// [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] +// +// Note that sorting is first according to the elements of ``a``. +// Secondary sorting is according to the elements of ``b``. +// +// A normal ``argsort`` would have yielded: +// +// >>> [(a[i],b[i]) for i in np.argsort(a)] +// [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] +// +// Structured arrays are sorted lexically by ``argsort``: +// +// >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], +// ... dtype=np.dtype([('x', int), ('y', int)])) +// +// >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) +// array([2, 0, 4, 6, 5, 3, 1]) +// +// +// +//go:linkname Lexsort py.lexsort +func Lexsort(keys *py.Object, axis *py.Object) *py.Object +// set_numeric_ops(op1=func1, op2=func2, ...) +// +// Set numerical operators for array objects. +// +// .. deprecated:: 1.16 +// +// For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. +// For ndarray subclasses, define the ``__array_ufunc__`` method and +// override the relevant ufunc. +// +// Parameters +// ---------- +// op1, op2, ... : callable +// Each ``op = func`` pair describes an operator to be replaced. +// For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace +// addition by modulus 5 addition. +// +// Returns +// ------- +// saved_ops : list of callables +// A list of all operators, stored before making replacements. +// +// Notes +// ----- +// .. warning:: +// Use with care! Incorrect usage may lead to memory errors. +// +// A function replacing an operator cannot make use of that operator. +// For example, when replacing add, you may not use ``+``. Instead, +// directly call ufuncs. +// +// Examples +// -------- +// >>> def add_mod5(x, y): +// ... return np.add(x, y) % 5 +// ... +// >>> old_funcs = np.set_numeric_ops(add=add_mod5) +// +// >>> x = np.arange(12).reshape((3, 4)) +// >>> x + x +// array([[0, 2, 4, 1], +// [3, 0, 2, 4], +// [1, 3, 0, 2]]) +// +// >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators +// +//go:linkname SetNumericOps py.set_numeric_ops +func SetNumericOps(op1 *py.Object, op2 *py.Object) *py.Object +// +// can_cast(from_, to, casting='safe') +// +// Returns True if cast between data types can occur according to the +// casting rule. If from is a scalar or array scalar, also returns +// True if the scalar value can be cast without overflow or truncation +// to an integer. +// +// Parameters +// ---------- +// from_ : dtype, dtype specifier, scalar, or array +// Data type, scalar, or array to cast from. +// to : dtype or dtype specifier +// Data type to cast to. +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. +// +// * 'no' means the data types should not be cast at all. +// * 'equiv' means only byte-order changes are allowed. +// * 'safe' means only casts which can preserve values are allowed. +// * 'same_kind' means only safe casts or casts within a kind, +// like float64 to float32, are allowed. +// * 'unsafe' means any data conversions may be done. +// +// Returns +// ------- +// out : bool +// True if cast can occur according to the casting rule. +// +// Notes +// ----- +// .. versionchanged:: 1.17.0 +// Casting between a simple data type and a structured one is possible only +// for "unsafe" casting. Casting to multiple fields is allowed, but +// casting from multiple fields is not. +// +// .. versionchanged:: 1.9.0 +// Casting from numeric to string types in 'safe' casting mode requires +// that the string dtype length is long enough to store the maximum +// integer/float value converted. +// +// See also +// -------- +// dtype, result_type +// +// Examples +// -------- +// Basic examples +// +// >>> np.can_cast(np.int32, np.int64) +// True +// >>> np.can_cast(np.float64, complex) +// True +// >>> np.can_cast(complex, float) +// False +// +// >>> np.can_cast('i8', 'f8') +// True +// >>> np.can_cast('i8', 'f4') +// False +// >>> np.can_cast('i4', 'S4') +// False +// +// Casting scalars +// +// >>> np.can_cast(100, 'i1') +// True +// >>> np.can_cast(150, 'i1') +// False +// >>> np.can_cast(150, 'u1') +// True +// +// >>> np.can_cast(3.5e100, np.float32) +// False +// >>> np.can_cast(1000.0, np.float32) +// True +// +// Array scalar checks the value, array does not +// +// >>> np.can_cast(np.array(1000.0), np.float32) +// True +// >>> np.can_cast(np.array([1000.0]), np.float32) +// False +// +// Using the casting rules +// +// >>> np.can_cast('i8', 'i8', 'no') +// True +// >>> np.can_cast('i8', 'no') +// False +// +// >>> np.can_cast('i8', 'equiv') +// True +// >>> np.can_cast('i8', 'equiv') +// False +// +// >>> np.can_cast('i8', 'safe') +// True +// >>> np.can_cast('i4', 'safe') +// False +// +// >>> np.can_cast('i4', 'same_kind') +// True +// >>> np.can_cast('u4', 'same_kind') +// False +// +// >>> np.can_cast('u4', 'unsafe') +// True +// +// +// +//go:linkname CanCast py.can_cast +func CanCast(from_ *py.Object, to *py.Object, casting *py.Object) *py.Object +// promote_types(type1, type2) +// +// Returns the data type with the smallest size and smallest scalar +// kind to which both ``type1`` and ``type2`` may be safely cast. +// The returned data type is always considered "canonical", this mainly +// means that the promoted dtype will always be in native byte order. +// +// This function is symmetric, but rarely associative. +// +// Parameters +// ---------- +// type1 : dtype or dtype specifier +// First data type. +// type2 : dtype or dtype specifier +// Second data type. +// +// Returns +// ------- +// out : dtype +// The promoted data type. +// +// Notes +// ----- +// Please see `numpy.result_type` for additional information about promotion. +// +// .. versionadded:: 1.6.0 +// +// Starting in NumPy 1.9, promote_types function now returns a valid string +// length when given an integer or float dtype as one argument and a string +// dtype as another argument. Previously it always returned the input string +// dtype, even if it wasn't long enough to store the max integer/float value +// converted to a string. +// +// .. versionchanged:: 1.23.0 +// +// NumPy now supports promotion for more structured dtypes. It will now +// remove unnecessary padding from a structure dtype and promote included +// fields individually. +// +// See Also +// -------- +// result_type, dtype, can_cast +// +// Examples +// -------- +// >>> np.promote_types('f4', 'f8') +// dtype('float64') +// +// >>> np.promote_types('i8', 'f4') +// dtype('float64') +// +// >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') +// dtype('S11') +// +// An example of a non-associative case: +// +// >>> p = np.promote_types +// >>> p('S', p('i1', 'u1')) +// dtype('S6') +// >>> p(p('S', 'i1'), 'u1') +// dtype('S4') +// +//go:linkname PromoteTypes py.promote_types +func PromoteTypes(type1 *py.Object, type2 *py.Object) *py.Object +// +// min_scalar_type(a, /) +// +// For scalar ``a``, returns the data type with the smallest size +// and smallest scalar kind which can hold its value. For non-scalar +// array ``a``, returns the vector's dtype unmodified. +// +// Floating point values are not demoted to integers, +// and complex values are not demoted to floats. +// +// Parameters +// ---------- +// a : scalar or array_like +// The value whose minimal data type is to be found. +// +// Returns +// ------- +// out : dtype +// The minimal data type. +// +// Notes +// ----- +// .. versionadded:: 1.6.0 +// +// See Also +// -------- +// result_type, promote_types, dtype, can_cast +// +// Examples +// -------- +// >>> np.min_scalar_type(10) +// dtype('uint8') +// +// >>> np.min_scalar_type(-260) +// dtype('int16') +// +// >>> np.min_scalar_type(3.1) +// dtype('float16') +// +// >>> np.min_scalar_type(1e50) +// dtype('float64') +// +// >>> np.min_scalar_type(np.arange(4,dtype='f8')) +// dtype('float64') +// +// +// +//go:linkname MinScalarType py.min_scalar_type +func MinScalarType(a *py.Object) *py.Object +// +// result_type(*arrays_and_dtypes) +// +// Returns the type that results from applying the NumPy +// type promotion rules to the arguments. +// +// Type promotion in NumPy works similarly to the rules in languages +// like C++, with some slight differences. When both scalars and +// arrays are used, the array's type takes precedence and the actual value +// of the scalar is taken into account. +// +// For example, calculating 3*a, where a is an array of 32-bit floats, +// intuitively should result in a 32-bit float output. If the 3 is a +// 32-bit integer, the NumPy rules indicate it can't convert losslessly +// into a 32-bit float, so a 64-bit float should be the result type. +// By examining the value of the constant, '3', we see that it fits in +// an 8-bit integer, which can be cast losslessly into the 32-bit float. +// +// Parameters +// ---------- +// arrays_and_dtypes : list of arrays and dtypes +// The operands of some operation whose result type is needed. +// +// Returns +// ------- +// out : dtype +// The result type. +// +// See also +// -------- +// dtype, promote_types, min_scalar_type, can_cast +// +// Notes +// ----- +// .. versionadded:: 1.6.0 +// +// The specific algorithm used is as follows. +// +// Categories are determined by first checking which of boolean, +// integer (int/uint), or floating point (float/complex) the maximum +// kind of all the arrays and the scalars are. +// +// If there are only scalars or the maximum category of the scalars +// is higher than the maximum category of the arrays, +// the data types are combined with :func:`promote_types` +// to produce the return value. +// +// Otherwise, `min_scalar_type` is called on each scalar, and +// the resulting data types are all combined with :func:`promote_types` +// to produce the return value. +// +// The set of int values is not a subset of the uint values for types +// with the same number of bits, something not reflected in +// :func:`min_scalar_type`, but handled as a special case in `result_type`. +// +// Examples +// -------- +// >>> np.result_type(3, np.arange(7, dtype='i1')) +// dtype('int8') +// +// >>> np.result_type('i4', 'c8') +// dtype('complex128') +// +// >>> np.result_type(3.0, -2) +// dtype('float64') +// +// +// +//go:linkname ResultType py.result_type +func ResultType(__llgo_va_list ...interface{}) *py.Object +// +// Check if the array is Fortran contiguous but *not* C contiguous. +// +// This function is obsolete and, because of changes due to relaxed stride +// checking, its return value for the same array may differ for versions +// of NumPy >= 1.10.0 and previous versions. If you only want to check if an +// array is Fortran contiguous use ``a.flags.f_contiguous`` instead. +// +// Parameters +// ---------- +// a : ndarray +// Input array. +// +// Returns +// ------- +// isfortran : bool +// Returns True if the array is Fortran contiguous but *not* C contiguous. +// +// +// Examples +// -------- +// +// np.array allows to specify whether the array is written in C-contiguous +// order (last index varies the fastest), or FORTRAN-contiguous order in +// memory (first index varies the fastest). +// +// >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') +// >>> a +// array([[1, 2, 3], +// [4, 5, 6]]) +// >>> np.isfortran(a) +// False +// +// >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') +// >>> b +// array([[1, 2, 3], +// [4, 5, 6]]) +// >>> np.isfortran(b) +// True +// +// +// The transpose of a C-ordered array is a FORTRAN-ordered array. +// +// >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') +// >>> a +// array([[1, 2, 3], +// [4, 5, 6]]) +// >>> np.isfortran(a) +// False +// >>> b = a.T +// >>> b +// array([[1, 4], +// [2, 5], +// [3, 6]]) +// >>> np.isfortran(b) +// True +// +// C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. +// +// >>> np.isfortran(np.array([1, 2], order='F')) +// False +// +// +// +//go:linkname Isfortran py.isfortran +func Isfortran(a *py.Object) *py.Object +// +// empty_like(prototype, dtype=None, order='K', subok=True, shape=None) +// +// Return a new array with the same shape and type as a given array. +// +// Parameters +// ---------- +// prototype : array_like +// The shape and data-type of `prototype` define these same attributes +// of the returned array. +// dtype : data-type, optional +// Overrides the data type of the result. +// +// .. versionadded:: 1.6.0 +// order : {'C', 'F', 'A', or 'K'}, optional +// Overrides the memory layout of the result. 'C' means C-order, +// 'F' means F-order, 'A' means 'F' if `prototype` is Fortran +// contiguous, 'C' otherwise. 'K' means match the layout of `prototype` +// as closely as possible. +// +// .. versionadded:: 1.6.0 +// subok : bool, optional. +// If True, then the newly created array will use the sub-class +// type of `prototype`, otherwise it will be a base-class array. Defaults +// to True. +// shape : int or sequence of ints, optional. +// Overrides the shape of the result. If order='K' and the number of +// dimensions is unchanged, will try to keep order, otherwise, +// order='C' is implied. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// out : ndarray +// Array of uninitialized (arbitrary) data with the same +// shape and type as `prototype`. +// +// See Also +// -------- +// ones_like : Return an array of ones with shape and type of input. +// zeros_like : Return an array of zeros with shape and type of input. +// full_like : Return a new array with shape of input filled with value. +// empty : Return a new uninitialized array. +// +// Notes +// ----- +// This function does *not* initialize the returned array; to do that use +// `zeros_like` or `ones_like` instead. It may be marginally faster than +// the functions that do set the array values. +// +// Examples +// -------- +// >>> a = ([1,2,3], [4,5,6]) # a is array-like +// >>> np.empty_like(a) +// array([[-1073741821, -1073741821, 3], # uninitialized +// [ 0, 0, -1073741821]]) +// >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) +// >>> np.empty_like(a) +// array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized +// [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) +// +// +// +//go:linkname EmptyLike py.empty_like +func EmptyLike(prototype *py.Object, dtype *py.Object, order *py.Object, subok *py.Object, shape *py.Object) *py.Object +// +// Return an array of zeros with the same shape and type as a given array. +// +// Parameters +// ---------- +// a : array_like +// The shape and data-type of `a` define these same attributes of +// the returned array. +// dtype : data-type, optional +// Overrides the data type of the result. +// +// .. versionadded:: 1.6.0 +// order : {'C', 'F', 'A', or 'K'}, optional +// Overrides the memory layout of the result. 'C' means C-order, +// 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, +// 'C' otherwise. 'K' means match the layout of `a` as closely +// as possible. +// +// .. versionadded:: 1.6.0 +// subok : bool, optional. +// If True, then the newly created array will use the sub-class +// type of `a`, otherwise it will be a base-class array. Defaults +// to True. +// shape : int or sequence of ints, optional. +// Overrides the shape of the result. If order='K' and the number of +// dimensions is unchanged, will try to keep order, otherwise, +// order='C' is implied. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// out : ndarray +// Array of zeros with the same shape and type as `a`. +// +// See Also +// -------- +// empty_like : Return an empty array with shape and type of input. +// ones_like : Return an array of ones with shape and type of input. +// full_like : Return a new array with shape of input filled with value. +// zeros : Return a new array setting values to zero. +// +// Examples +// -------- +// >>> x = np.arange(6) +// >>> x = x.reshape((2, 3)) +// >>> x +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.zeros_like(x) +// array([[0, 0, 0], +// [0, 0, 0]]) +// +// >>> y = np.arange(3, dtype=float) +// >>> y +// array([0., 1., 2.]) +// >>> np.zeros_like(y) +// array([0., 0., 0.]) +// +// +// +//go:linkname ZerosLike py.zeros_like +func ZerosLike(__llgo_va_list ...interface{}) *py.Object +// +// Return an array of ones with the same shape and type as a given array. +// +// Parameters +// ---------- +// a : array_like +// The shape and data-type of `a` define these same attributes of +// the returned array. +// dtype : data-type, optional +// Overrides the data type of the result. +// +// .. versionadded:: 1.6.0 +// order : {'C', 'F', 'A', or 'K'}, optional +// Overrides the memory layout of the result. 'C' means C-order, +// 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, +// 'C' otherwise. 'K' means match the layout of `a` as closely +// as possible. +// +// .. versionadded:: 1.6.0 +// subok : bool, optional. +// If True, then the newly created array will use the sub-class +// type of `a`, otherwise it will be a base-class array. Defaults +// to True. +// shape : int or sequence of ints, optional. +// Overrides the shape of the result. If order='K' and the number of +// dimensions is unchanged, will try to keep order, otherwise, +// order='C' is implied. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// out : ndarray +// Array of ones with the same shape and type as `a`. +// +// See Also +// -------- +// empty_like : Return an empty array with shape and type of input. +// zeros_like : Return an array of zeros with shape and type of input. +// full_like : Return a new array with shape of input filled with value. +// ones : Return a new array setting values to one. +// +// Examples +// -------- +// >>> x = np.arange(6) +// >>> x = x.reshape((2, 3)) +// >>> x +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.ones_like(x) +// array([[1, 1, 1], +// [1, 1, 1]]) +// +// >>> y = np.arange(3, dtype=float) +// >>> y +// array([0., 1., 2.]) +// >>> np.ones_like(y) +// array([1., 1., 1.]) +// +// +// +//go:linkname OnesLike py.ones_like +func OnesLike(__llgo_va_list ...interface{}) *py.Object +// +// Cross-correlation of two 1-dimensional sequences. +// +// This function computes the correlation as generally defined in signal +// processing texts: +// +// .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n +// +// with a and v sequences being zero-padded where necessary and +// :math:`\overline x` denoting complex conjugation. +// +// Parameters +// ---------- +// a, v : array_like +// Input sequences. +// mode : {'valid', 'same', 'full'}, optional +// Refer to the `convolve` docstring. Note that the default +// is 'valid', unlike `convolve`, which uses 'full'. +// old_behavior : bool +// `old_behavior` was removed in NumPy 1.10. If you need the old +// behavior, use `multiarray.correlate`. +// +// Returns +// ------- +// out : ndarray +// Discrete cross-correlation of `a` and `v`. +// +// See Also +// -------- +// convolve : Discrete, linear convolution of two one-dimensional sequences. +// multiarray.correlate : Old, no conjugate, version of correlate. +// scipy.signal.correlate : uses FFT which has superior performance on large arrays. +// +// Notes +// ----- +// The definition of correlation above is not unique and sometimes correlation +// may be defined differently. Another common definition is: +// +// .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} +// +// which is related to :math:`c_k` by :math:`c'_k = c_{-k}`. +// +// `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does +// not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might +// be preferable. +// +// +// Examples +// -------- +// >>> np.correlate([1, 2, 3], [0, 1, 0.5]) +// array([3.5]) +// >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") +// array([2. , 3.5, 3. ]) +// >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") +// array([0.5, 2. , 3.5, 3. , 0. ]) +// +// Using complex sequences: +// +// >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') +// array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) +// +// Note that you get the time reversed, complex conjugated result +// (:math:`\overline{c_{-k}}`) when the two input sequences a and v change +// places: +// +// >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') +// array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) +// +// +// +//go:linkname Correlate py.correlate +func Correlate(a *py.Object, v *py.Object, mode *py.Object) *py.Object +// +// Returns the discrete, linear convolution of two one-dimensional sequences. +// +// The convolution operator is often seen in signal processing, where it +// models the effect of a linear time-invariant system on a signal [1]_. In +// probability theory, the sum of two independent random variables is +// distributed according to the convolution of their individual +// distributions. +// +// If `v` is longer than `a`, the arrays are swapped before computation. +// +// Parameters +// ---------- +// a : (N,) array_like +// First one-dimensional input array. +// v : (M,) array_like +// Second one-dimensional input array. +// mode : {'full', 'valid', 'same'}, optional +// 'full': +// By default, mode is 'full'. This returns the convolution +// at each point of overlap, with an output shape of (N+M-1,). At +// the end-points of the convolution, the signals do not overlap +// completely, and boundary effects may be seen. +// +// 'same': +// Mode 'same' returns output of length ``max(M, N)``. Boundary +// effects are still visible. +// +// 'valid': +// Mode 'valid' returns output of length +// ``max(M, N) - min(M, N) + 1``. The convolution product is only given +// for points where the signals overlap completely. Values outside +// the signal boundary have no effect. +// +// Returns +// ------- +// out : ndarray +// Discrete, linear convolution of `a` and `v`. +// +// See Also +// -------- +// scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier +// Transform. +// scipy.linalg.toeplitz : Used to construct the convolution operator. +// polymul : Polynomial multiplication. Same output as convolve, but also +// accepts poly1d objects as input. +// +// Notes +// ----- +// The discrete convolution operation is defined as +// +// .. math:: (a * v)_n = \sum_{m = -\infty}^{\infty} a_m v_{n - m} +// +// It can be shown that a convolution :math:`x(t) * y(t)` in time/space +// is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier +// domain, after appropriate padding (padding is necessary to prevent +// circular convolution). Since multiplication is more efficient (faster) +// than convolution, the function `scipy.signal.fftconvolve` exploits the +// FFT to calculate the convolution of large data-sets. +// +// References +// ---------- +// .. [1] Wikipedia, "Convolution", +// https://en.wikipedia.org/wiki/Convolution +// +// Examples +// -------- +// Note how the convolution operator flips the second array +// before "sliding" the two across one another: +// +// >>> np.convolve([1, 2, 3], [0, 1, 0.5]) +// array([0. , 1. , 2.5, 4. , 1.5]) +// +// Only return the middle values of the convolution. +// Contains boundary effects, where zeros are taken +// into account: +// +// >>> np.convolve([1,2,3],[0,1,0.5], 'same') +// array([1. , 2.5, 4. ]) +// +// The two arrays are of the same length, so there +// is only one position where they completely overlap: +// +// >>> np.convolve([1,2,3],[0,1,0.5], 'valid') +// array([2.5]) +// +// +// +//go:linkname Convolve py.convolve +func Convolve(a *py.Object, v *py.Object, mode *py.Object) *py.Object +// +// inner(a, b, /) +// +// Inner product of two arrays. +// +// Ordinary inner product of vectors for 1-D arrays (without complex +// conjugation), in higher dimensions a sum product over the last axes. +// +// Parameters +// ---------- +// a, b : array_like +// If `a` and `b` are nonscalar, their last dimensions must match. +// +// Returns +// ------- +// out : ndarray +// If `a` and `b` are both +// scalars or both 1-D arrays then a scalar is returned; otherwise +// an array is returned. +// ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` +// +// Raises +// ------ +// ValueError +// If both `a` and `b` are nonscalar and their last dimensions have +// different sizes. +// +// See Also +// -------- +// tensordot : Sum products over arbitrary axes. +// dot : Generalised matrix product, using second last dimension of `b`. +// einsum : Einstein summation convention. +// +// Notes +// ----- +// For vectors (1-D arrays) it computes the ordinary inner-product:: +// +// np.inner(a, b) = sum(a[:]*b[:]) +// +// More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: +// +// np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) +// +// or explicitly:: +// +// np.inner(a, b)[i0,...,ir-2,j0,...,js-2] +// = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) +// +// In addition `a` or `b` may be scalars, in which case:: +// +// np.inner(a,b) = a*b +// +// Examples +// -------- +// Ordinary inner product for vectors: +// +// >>> a = np.array([1,2,3]) +// >>> b = np.array([0,1,0]) +// >>> np.inner(a, b) +// 2 +// +// Some multidimensional examples: +// +// >>> a = np.arange(24).reshape((2,3,4)) +// >>> b = np.arange(4) +// >>> c = np.inner(a, b) +// >>> c.shape +// (2, 3) +// >>> c +// array([[ 14, 38, 62], +// [ 86, 110, 134]]) +// +// >>> a = np.arange(2).reshape((1,1,2)) +// >>> b = np.arange(6).reshape((3,2)) +// >>> c = np.inner(a, b) +// >>> c.shape +// (1, 1, 3) +// >>> c +// array([[[1, 3, 5]]]) +// +// An example where `b` is a scalar: +// +// >>> np.inner(np.eye(2), 7) +// array([[7., 0.], +// [0., 7.]]) +// +// +// +//go:linkname Inner py.inner +func Inner(a *py.Object, b *py.Object) *py.Object +// +// dot(a, b, out=None) +// +// Dot product of two arrays. Specifically, +// +// - If both `a` and `b` are 1-D arrays, it is inner product of vectors +// (without complex conjugation). +// +// - If both `a` and `b` are 2-D arrays, it is matrix multiplication, +// but using :func:`matmul` or ``a @ b`` is preferred. +// +// - If either `a` or `b` is 0-D (scalar), it is equivalent to +// :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is +// preferred. +// +// - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over +// the last axis of `a` and `b`. +// +// - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a +// sum product over the last axis of `a` and the second-to-last axis of +// `b`:: +// +// dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) +// +// It uses an optimized BLAS library when possible (see `numpy.linalg`). +// +// Parameters +// ---------- +// a : array_like +// First argument. +// b : array_like +// Second argument. +// out : ndarray, optional +// Output argument. This must have the exact kind that would be returned +// if it was not used. In particular, it must have the right type, must be +// C-contiguous, and its dtype must be the dtype that would be returned +// for `dot(a,b)`. This is a performance feature. Therefore, if these +// conditions are not met, an exception is raised, instead of attempting +// to be flexible. +// +// Returns +// ------- +// output : ndarray +// Returns the dot product of `a` and `b`. If `a` and `b` are both +// scalars or both 1-D arrays then a scalar is returned; otherwise +// an array is returned. +// If `out` is given, then it is returned. +// +// Raises +// ------ +// ValueError +// If the last dimension of `a` is not the same size as +// the second-to-last dimension of `b`. +// +// See Also +// -------- +// vdot : Complex-conjugating dot product. +// tensordot : Sum products over arbitrary axes. +// einsum : Einstein summation convention. +// matmul : '@' operator as method with out parameter. +// linalg.multi_dot : Chained dot product. +// +// Examples +// -------- +// >>> np.dot(3, 4) +// 12 +// +// Neither argument is complex-conjugated: +// +// >>> np.dot([2j, 3j], [2j, 3j]) +// (-13+0j) +// +// For 2-D arrays it is the matrix product: +// +// >>> a = [[1, 0], [0, 1]] +// >>> b = [[4, 1], [2, 2]] +// >>> np.dot(a, b) +// array([[4, 1], +// [2, 2]]) +// +// >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) +// >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) +// >>> np.dot(a, b)[2,3,2,1,2,2] +// 499128 +// >>> sum(a[2,3,2,:] * b[1,2,:,2]) +// 499128 +// +// +// +//go:linkname Dot py.dot +func Dot(a *py.Object, b *py.Object, out *py.Object) *py.Object +// +// Compute the outer product of two vectors. +// +// Given two vectors `a` and `b` of length ``M`` and ``N``, repsectively, +// the outer product [1]_ is:: +// +// [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ] +// [a_1*b_0 . +// [ ... . +// [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]] +// +// Parameters +// ---------- +// a : (M,) array_like +// First input vector. Input is flattened if +// not already 1-dimensional. +// b : (N,) array_like +// Second input vector. Input is flattened if +// not already 1-dimensional. +// out : (M, N) ndarray, optional +// A location where the result is stored +// +// .. versionadded:: 1.9.0 +// +// Returns +// ------- +// out : (M, N) ndarray +// ``out[i, j] = a[i] * b[j]`` +// +// See also +// -------- +// inner +// einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. +// ufunc.outer : A generalization to dimensions other than 1D and other +// operations. ``np.multiply.outer(a.ravel(), b.ravel())`` +// is the equivalent. +// tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` +// is the equivalent. +// +// References +// ---------- +// .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd +// ed., Baltimore, MD, Johns Hopkins University Press, 1996, +// pg. 8. +// +// Examples +// -------- +// Make a (*very* coarse) grid for computing a Mandelbrot set: +// +// >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) +// >>> rl +// array([[-2., -1., 0., 1., 2.], +// [-2., -1., 0., 1., 2.], +// [-2., -1., 0., 1., 2.], +// [-2., -1., 0., 1., 2.], +// [-2., -1., 0., 1., 2.]]) +// >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) +// >>> im +// array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], +// [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], +// [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], +// [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], +// [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) +// >>> grid = rl + im +// >>> grid +// array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], +// [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], +// [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], +// [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], +// [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) +// +// An example using a "vector" of letters: +// +// >>> x = np.array(['a', 'b', 'c'], dtype=object) +// >>> np.outer(x, [1, 2, 3]) +// array([['a', 'aa', 'aaa'], +// ['b', 'bb', 'bbb'], +// ['c', 'cc', 'ccc']], dtype=object) +// +// +// +//go:linkname Outer py.outer +func Outer(__llgo_va_list ...interface{}) *py.Object +// +// vdot(a, b, /) +// +// Return the dot product of two vectors. +// +// The vdot(`a`, `b`) function handles complex numbers differently than +// dot(`a`, `b`). If the first argument is complex the complex conjugate +// of the first argument is used for the calculation of the dot product. +// +// Note that `vdot` handles multidimensional arrays differently than `dot`: +// it does *not* perform a matrix product, but flattens input arguments +// to 1-D vectors first. Consequently, it should only be used for vectors. +// +// Parameters +// ---------- +// a : array_like +// If `a` is complex the complex conjugate is taken before calculation +// of the dot product. +// b : array_like +// Second argument to the dot product. +// +// Returns +// ------- +// output : ndarray +// Dot product of `a` and `b`. Can be an int, float, or +// complex depending on the types of `a` and `b`. +// +// See Also +// -------- +// dot : Return the dot product without using the complex conjugate of the +// first argument. +// +// Examples +// -------- +// >>> a = np.array([1+2j,3+4j]) +// >>> b = np.array([5+6j,7+8j]) +// >>> np.vdot(a, b) +// (70-8j) +// >>> np.vdot(b, a) +// (70+8j) +// +// Note that higher-dimensional arrays are flattened! +// +// >>> a = np.array([[1, 4], [5, 6]]) +// >>> b = np.array([[4, 1], [2, 2]]) +// >>> np.vdot(a, b) +// 30 +// >>> np.vdot(b, a) +// 30 +// >>> 1*4 + 4*1 + 5*2 + 6*2 +// 30 +// +// +// +//go:linkname Vdot py.vdot +func Vdot(a *py.Object, b *py.Object) *py.Object +// +// Roll array elements along a given axis. +// +// Elements that roll beyond the last position are re-introduced at +// the first. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// shift : int or tuple of ints +// The number of places by which elements are shifted. If a tuple, +// then `axis` must be a tuple of the same size, and each of the +// given axes is shifted by the corresponding number. If an int +// while `axis` is a tuple of ints, then the same value is used for +// all given axes. +// axis : int or tuple of ints, optional +// Axis or axes along which elements are shifted. By default, the +// array is flattened before shifting, after which the original +// shape is restored. +// +// Returns +// ------- +// res : ndarray +// Output array, with the same shape as `a`. +// +// See Also +// -------- +// rollaxis : Roll the specified axis backwards, until it lies in a +// given position. +// +// Notes +// ----- +// .. versionadded:: 1.12.0 +// +// Supports rolling over multiple dimensions simultaneously. +// +// Examples +// -------- +// >>> x = np.arange(10) +// >>> np.roll(x, 2) +// array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) +// >>> np.roll(x, -2) +// array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) +// +// >>> x2 = np.reshape(x, (2, 5)) +// >>> x2 +// array([[0, 1, 2, 3, 4], +// [5, 6, 7, 8, 9]]) +// >>> np.roll(x2, 1) +// array([[9, 0, 1, 2, 3], +// [4, 5, 6, 7, 8]]) +// >>> np.roll(x2, -1) +// array([[1, 2, 3, 4, 5], +// [6, 7, 8, 9, 0]]) +// >>> np.roll(x2, 1, axis=0) +// array([[5, 6, 7, 8, 9], +// [0, 1, 2, 3, 4]]) +// >>> np.roll(x2, -1, axis=0) +// array([[5, 6, 7, 8, 9], +// [0, 1, 2, 3, 4]]) +// >>> np.roll(x2, 1, axis=1) +// array([[4, 0, 1, 2, 3], +// [9, 5, 6, 7, 8]]) +// >>> np.roll(x2, -1, axis=1) +// array([[1, 2, 3, 4, 0], +// [6, 7, 8, 9, 5]]) +// >>> np.roll(x2, (1, 1), axis=(1, 0)) +// array([[9, 5, 6, 7, 8], +// [4, 0, 1, 2, 3]]) +// >>> np.roll(x2, (2, 1), axis=(1, 0)) +// array([[8, 9, 5, 6, 7], +// [3, 4, 0, 1, 2]]) +// +// +// +//go:linkname Roll py.roll +func Roll(a *py.Object, shift *py.Object, axis *py.Object) *py.Object +// +// Roll the specified axis backwards, until it lies in a given position. +// +// This function continues to be supported for backward compatibility, but you +// should prefer `moveaxis`. The `moveaxis` function was added in NumPy +// 1.11. +// +// Parameters +// ---------- +// a : ndarray +// Input array. +// axis : int +// The axis to be rolled. The positions of the other axes do not +// change relative to one another. +// start : int, optional +// When ``start <= axis``, the axis is rolled back until it lies in +// this position. When ``start > axis``, the axis is rolled until it +// lies before this position. The default, 0, results in a "complete" +// roll. The following table describes how negative values of ``start`` +// are interpreted: +// +// .. table:: +// :align: left +// +// +-------------------+----------------------+ +// | ``start`` | Normalized ``start`` | +// +===================+======================+ +// | ``-(arr.ndim+1)`` | raise ``AxisError`` | +// +-------------------+----------------------+ +// | ``-arr.ndim`` | 0 | +// +-------------------+----------------------+ +// | |vdots| | |vdots| | +// +-------------------+----------------------+ +// | ``-1`` | ``arr.ndim-1`` | +// +-------------------+----------------------+ +// | ``0`` | ``0`` | +// +-------------------+----------------------+ +// | |vdots| | |vdots| | +// +-------------------+----------------------+ +// | ``arr.ndim`` | ``arr.ndim`` | +// +-------------------+----------------------+ +// | ``arr.ndim + 1`` | raise ``AxisError`` | +// +-------------------+----------------------+ +// +// .. |vdots| unicode:: U+22EE .. Vertical Ellipsis +// +// Returns +// ------- +// res : ndarray +// For NumPy >= 1.10.0 a view of `a` is always returned. For earlier +// NumPy versions a view of `a` is returned only if the order of the +// axes is changed, otherwise the input array is returned. +// +// See Also +// -------- +// moveaxis : Move array axes to new positions. +// roll : Roll the elements of an array by a number of positions along a +// given axis. +// +// Examples +// -------- +// >>> a = np.ones((3,4,5,6)) +// >>> np.rollaxis(a, 3, 1).shape +// (3, 6, 4, 5) +// >>> np.rollaxis(a, 2).shape +// (5, 3, 4, 6) +// >>> np.rollaxis(a, 1, 4).shape +// (3, 5, 6, 4) +// +// +// +//go:linkname Rollaxis py.rollaxis +func Rollaxis(a *py.Object, axis *py.Object, start *py.Object) *py.Object +// +// Move axes of an array to new positions. +// +// Other axes remain in their original order. +// +// .. versionadded:: 1.11.0 +// +// Parameters +// ---------- +// a : np.ndarray +// The array whose axes should be reordered. +// source : int or sequence of int +// Original positions of the axes to move. These must be unique. +// destination : int or sequence of int +// Destination positions for each of the original axes. These must also be +// unique. +// +// Returns +// ------- +// result : np.ndarray +// Array with moved axes. This array is a view of the input array. +// +// See Also +// -------- +// transpose : Permute the dimensions of an array. +// swapaxes : Interchange two axes of an array. +// +// Examples +// -------- +// >>> x = np.zeros((3, 4, 5)) +// >>> np.moveaxis(x, 0, -1).shape +// (4, 5, 3) +// >>> np.moveaxis(x, -1, 0).shape +// (5, 3, 4) +// +// These all achieve the same result: +// +// >>> np.transpose(x).shape +// (5, 4, 3) +// >>> np.swapaxes(x, 0, -1).shape +// (5, 4, 3) +// >>> np.moveaxis(x, [0, 1], [-1, -2]).shape +// (5, 4, 3) +// >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape +// (5, 4, 3) +// +// +// +//go:linkname Moveaxis py.moveaxis +func Moveaxis(a *py.Object, source *py.Object, destination *py.Object) *py.Object +// +// Return the cross product of two (arrays of) vectors. +// +// The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular +// to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors +// are defined by the last axis of `a` and `b` by default, and these axes +// can have dimensions 2 or 3. Where the dimension of either `a` or `b` is +// 2, the third component of the input vector is assumed to be zero and the +// cross product calculated accordingly. In cases where both input vectors +// have dimension 2, the z-component of the cross product is returned. +// +// Parameters +// ---------- +// a : array_like +// Components of the first vector(s). +// b : array_like +// Components of the second vector(s). +// axisa : int, optional +// Axis of `a` that defines the vector(s). By default, the last axis. +// axisb : int, optional +// Axis of `b` that defines the vector(s). By default, the last axis. +// axisc : int, optional +// Axis of `c` containing the cross product vector(s). Ignored if +// both input vectors have dimension 2, as the return is scalar. +// By default, the last axis. +// axis : int, optional +// If defined, the axis of `a`, `b` and `c` that defines the vector(s) +// and cross product(s). Overrides `axisa`, `axisb` and `axisc`. +// +// Returns +// ------- +// c : ndarray +// Vector cross product(s). +// +// Raises +// ------ +// ValueError +// When the dimension of the vector(s) in `a` and/or `b` does not +// equal 2 or 3. +// +// See Also +// -------- +// inner : Inner product +// outer : Outer product. +// ix_ : Construct index arrays. +// +// Notes +// ----- +// .. versionadded:: 1.9.0 +// +// Supports full broadcasting of the inputs. +// +// Examples +// -------- +// Vector cross-product. +// +// >>> x = [1, 2, 3] +// >>> y = [4, 5, 6] +// >>> np.cross(x, y) +// array([-3, 6, -3]) +// +// One vector with dimension 2. +// +// >>> x = [1, 2] +// >>> y = [4, 5, 6] +// >>> np.cross(x, y) +// array([12, -6, -3]) +// +// Equivalently: +// +// >>> x = [1, 2, 0] +// >>> y = [4, 5, 6] +// >>> np.cross(x, y) +// array([12, -6, -3]) +// +// Both vectors with dimension 2. +// +// >>> x = [1,2] +// >>> y = [4,5] +// >>> np.cross(x, y) +// array(-3) +// +// Multiple vector cross-products. Note that the direction of the cross +// product vector is defined by the *right-hand rule*. +// +// >>> x = np.array([[1,2,3], [4,5,6]]) +// >>> y = np.array([[4,5,6], [1,2,3]]) +// >>> np.cross(x, y) +// array([[-3, 6, -3], +// [ 3, -6, 3]]) +// +// The orientation of `c` can be changed using the `axisc` keyword. +// +// >>> np.cross(x, y, axisc=0) +// array([[-3, 3], +// [ 6, -6], +// [-3, 3]]) +// +// Change the vector definition of `x` and `y` using `axisa` and `axisb`. +// +// >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) +// >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) +// >>> np.cross(x, y) +// array([[ -6, 12, -6], +// [ 0, 0, 0], +// [ 6, -12, 6]]) +// >>> np.cross(x, y, axisa=0, axisb=0) +// array([[-24, 48, -24], +// [-30, 60, -30], +// [-36, 72, -36]]) +// +// +// +//go:linkname Cross py.cross +func Cross(a *py.Object, b *py.Object, axisa *py.Object, axisb *py.Object, axisc *py.Object, axis *py.Object) *py.Object +// +// Compute tensor dot product along specified axes. +// +// Given two tensors, `a` and `b`, and an array_like object containing +// two array_like objects, ``(a_axes, b_axes)``, sum the products of +// `a`'s and `b`'s elements (components) over the axes specified by +// ``a_axes`` and ``b_axes``. The third argument can be a single non-negative +// integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions +// of `a` and the first ``N`` dimensions of `b` are summed over. +// +// Parameters +// ---------- +// a, b : array_like +// Tensors to "dot". +// +// axes : int or (2,) array_like +// * integer_like +// If an int N, sum over the last N axes of `a` and the first N axes +// of `b` in order. The sizes of the corresponding axes must match. +// * (2,) array_like +// Or, a list of axes to be summed over, first sequence applying to `a`, +// second to `b`. Both elements array_like must be of the same length. +// +// Returns +// ------- +// output : ndarray +// The tensor dot product of the input. +// +// See Also +// -------- +// dot, einsum +// +// Notes +// ----- +// Three common use cases are: +// * ``axes = 0`` : tensor product :math:`a\otimes b` +// * ``axes = 1`` : tensor dot product :math:`a\cdot b` +// * ``axes = 2`` : (default) tensor double contraction :math:`a:b` +// +// When `axes` is integer_like, the sequence for evaluation will be: first +// the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and +// Nth axis in `b` last. +// +// When there is more than one axis to sum over - and they are not the last +// (first) axes of `a` (`b`) - the argument `axes` should consist of +// two sequences of the same length, with the first axis to sum over given +// first in both sequences, the second axis second, and so forth. +// +// The shape of the result consists of the non-contracted axes of the +// first tensor, followed by the non-contracted axes of the second. +// +// Examples +// -------- +// A "traditional" example: +// +// >>> a = np.arange(60.).reshape(3,4,5) +// >>> b = np.arange(24.).reshape(4,3,2) +// >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) +// >>> c.shape +// (5, 2) +// >>> c +// array([[4400., 4730.], +// [4532., 4874.], +// [4664., 5018.], +// [4796., 5162.], +// [4928., 5306.]]) +// >>> # A slower but equivalent way of computing the same... +// >>> d = np.zeros((5,2)) +// >>> for i in range(5): +// ... for j in range(2): +// ... for k in range(3): +// ... for n in range(4): +// ... d[i,j] += a[k,n,i] * b[n,k,j] +// >>> c == d +// array([[ True, True], +// [ True, True], +// [ True, True], +// [ True, True], +// [ True, True]]) +// +// An extended example taking advantage of the overloading of + and \*: +// +// >>> a = np.array(range(1, 9)) +// >>> a.shape = (2, 2, 2) +// >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) +// >>> A.shape = (2, 2) +// >>> a; A +// array([[[1, 2], +// [3, 4]], +// [[5, 6], +// [7, 8]]]) +// array([['a', 'b'], +// ['c', 'd']], dtype=object) +// +// >>> np.tensordot(a, A) # third argument default is 2 for double-contraction +// array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) +// +// >>> np.tensordot(a, A, 1) +// array([[['acc', 'bdd'], +// ['aaacccc', 'bbbdddd']], +// [['aaaaacccccc', 'bbbbbdddddd'], +// ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) +// +// >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) +// array([[[[['a', 'b'], +// ['c', 'd']], +// ... +// +// >>> np.tensordot(a, A, (0, 1)) +// array([[['abbbbb', 'cddddd'], +// ['aabbbbbb', 'ccdddddd']], +// [['aaabbbbbbb', 'cccddddddd'], +// ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) +// +// >>> np.tensordot(a, A, (2, 1)) +// array([[['abb', 'cdd'], +// ['aaabbbb', 'cccdddd']], +// [['aaaaabbbbbb', 'cccccdddddd'], +// ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) +// +// >>> np.tensordot(a, A, ((0, 1), (0, 1))) +// array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) +// +// >>> np.tensordot(a, A, ((2, 1), (1, 0))) +// array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) +// +// +// +//go:linkname Tensordot py.tensordot +func Tensordot(a *py.Object, b *py.Object, axes *py.Object) *py.Object +// fromiter(iter, dtype, count=-1, *, like=None) +// +// Create a new 1-dimensional array from an iterable object. +// +// Parameters +// ---------- +// iter : iterable object +// An iterable object providing data for the array. +// dtype : data-type +// The data-type of the returned array. +// +// .. versionchanged:: 1.23 +// Object and subarray dtypes are now supported (note that the final +// result is not 1-D for a subarray dtype). +// +// count : int, optional +// The number of items to read from *iterable*. The default is -1, +// which means all data is read. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// The output array. +// +// Notes +// ----- +// Specify `count` to improve performance. It allows ``fromiter`` to +// pre-allocate the output array, instead of resizing it on demand. +// +// Examples +// -------- +// >>> iterable = (x*x for x in range(5)) +// >>> np.fromiter(iterable, float) +// array([ 0., 1., 4., 9., 16.]) +// +// A carefully constructed subarray dtype will lead to higher dimensional +// results: +// +// >>> iterable = ((x+1, x+2) for x in range(5)) +// >>> np.fromiter(iterable, dtype=np.dtype((int, 2))) +// array([[1, 2], +// [2, 3], +// [3, 4], +// [4, 5], +// [5, 6]]) +// +//go:linkname Fromiter py.fromiter +func Fromiter(iter *py.Object, dtype *py.Object, count *py.Object) *py.Object +// +// True if two arrays have the same shape and elements, False otherwise. +// +// Parameters +// ---------- +// a1, a2 : array_like +// Input arrays. +// equal_nan : bool +// Whether to compare NaN's as equal. If the dtype of a1 and a2 is +// complex, values will be considered equal if either the real or the +// imaginary component of a given value is ``nan``. +// +// .. versionadded:: 1.19.0 +// +// Returns +// ------- +// b : bool +// Returns True if the arrays are equal. +// +// See Also +// -------- +// allclose: Returns True if two arrays are element-wise equal within a +// tolerance. +// array_equiv: Returns True if input arrays are shape consistent and all +// elements equal. +// +// Examples +// -------- +// >>> np.array_equal([1, 2], [1, 2]) +// True +// >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) +// True +// >>> np.array_equal([1, 2], [1, 2, 3]) +// False +// >>> np.array_equal([1, 2], [1, 4]) +// False +// >>> a = np.array([1, np.nan]) +// >>> np.array_equal(a, a) +// False +// >>> np.array_equal(a, a, equal_nan=True) +// True +// +// When ``equal_nan`` is True, complex values with nan components are +// considered equal if either the real *or* the imaginary components are nan. +// +// >>> a = np.array([1 + 1j]) +// >>> b = a.copy() +// >>> a.real = np.nan +// >>> b.imag = np.nan +// >>> np.array_equal(a, b, equal_nan=True) +// True +// +// +//go:linkname ArrayEqual py.array_equal +func ArrayEqual(a1 *py.Object, a2 *py.Object, equalNan *py.Object) *py.Object +// +// Returns True if input arrays are shape consistent and all elements equal. +// +// Shape consistent means they are either the same shape, or one input array +// can be broadcasted to create the same shape as the other one. +// +// Parameters +// ---------- +// a1, a2 : array_like +// Input arrays. +// +// Returns +// ------- +// out : bool +// True if equivalent, False otherwise. +// +// Examples +// -------- +// >>> np.array_equiv([1, 2], [1, 2]) +// True +// >>> np.array_equiv([1, 2], [1, 3]) +// False +// +// Showing the shape equivalence: +// +// >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) +// True +// >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) +// False +// +// >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) +// False +// +// +// +//go:linkname ArrayEquiv py.array_equiv +func ArrayEquiv(a1 *py.Object, a2 *py.Object) *py.Object +// +// Return an array representing the indices of a grid. +// +// Compute an array where the subarrays contain index values 0, 1, ... +// varying only along the corresponding axis. +// +// Parameters +// ---------- +// dimensions : sequence of ints +// The shape of the grid. +// dtype : dtype, optional +// Data type of the result. +// sparse : boolean, optional +// Return a sparse representation of the grid instead of a dense +// representation. Default is False. +// +// .. versionadded:: 1.17 +// +// Returns +// ------- +// grid : one ndarray or tuple of ndarrays +// If sparse is False: +// Returns one array of grid indices, +// ``grid.shape = (len(dimensions),) + tuple(dimensions)``. +// If sparse is True: +// Returns a tuple of arrays, with +// ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with +// dimensions[i] in the ith place +// +// See Also +// -------- +// mgrid, ogrid, meshgrid +// +// Notes +// ----- +// The output shape in the dense case is obtained by prepending the number +// of dimensions in front of the tuple of dimensions, i.e. if `dimensions` +// is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is +// ``(N, r0, ..., rN-1)``. +// +// The subarrays ``grid[k]`` contains the N-D array of indices along the +// ``k-th`` axis. Explicitly:: +// +// grid[k, i0, i1, ..., iN-1] = ik +// +// Examples +// -------- +// >>> grid = np.indices((2, 3)) +// >>> grid.shape +// (2, 2, 3) +// >>> grid[0] # row indices +// array([[0, 0, 0], +// [1, 1, 1]]) +// >>> grid[1] # column indices +// array([[0, 1, 2], +// [0, 1, 2]]) +// +// The indices can be used as an index into an array. +// +// >>> x = np.arange(20).reshape(5, 4) +// >>> row, col = np.indices((2, 3)) +// >>> x[row, col] +// array([[0, 1, 2], +// [4, 5, 6]]) +// +// Note that it would be more straightforward in the above example to +// extract the required elements directly with ``x[:2, :3]``. +// +// If sparse is set to true, the grid will be returned in a sparse +// representation. +// +// >>> i, j = np.indices((2, 3), sparse=True) +// >>> i.shape +// (2, 1) +// >>> j.shape +// (1, 3) +// >>> i # row indices +// array([[0], +// [1]]) +// >>> j # column indices +// array([[0, 1, 2]]) +// +// +// +//go:linkname Indices py.indices +func Indices(dimensions *py.Object, dtype *py.Object, sparse *py.Object) *py.Object +// +// Construct an array by executing a function over each coordinate. +// +// The resulting array therefore has a value ``fn(x, y, z)`` at +// coordinate ``(x, y, z)``. +// +// Parameters +// ---------- +// function : callable +// The function is called with N parameters, where N is the rank of +// `shape`. Each parameter represents the coordinates of the array +// varying along a specific axis. For example, if `shape` +// were ``(2, 2)``, then the parameters would be +// ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` +// shape : (N,) tuple of ints +// Shape of the output array, which also determines the shape of +// the coordinate arrays passed to `function`. +// dtype : data-type, optional +// Data-type of the coordinate arrays passed to `function`. +// By default, `dtype` is float. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// fromfunction : any +// The result of the call to `function` is passed back directly. +// Therefore the shape of `fromfunction` is completely determined by +// `function`. If `function` returns a scalar value, the shape of +// `fromfunction` would not match the `shape` parameter. +// +// See Also +// -------- +// indices, meshgrid +// +// Notes +// ----- +// Keywords other than `dtype` and `like` are passed to `function`. +// +// Examples +// -------- +// >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) +// array([[0., 0.], +// [1., 1.]]) +// +// >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) +// array([[0., 1.], +// [0., 1.]]) +// +// >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) +// array([[ True, False, False], +// [False, True, False], +// [False, False, True]]) +// +// >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) +// array([[0, 1, 2], +// [1, 2, 3], +// [2, 3, 4]]) +// +// +// +//go:linkname Fromfunction py.fromfunction +func Fromfunction(function *py.Object, shape *py.Object) *py.Object +// +// Returns a boolean array where two arrays are element-wise equal within a +// tolerance. +// +// The tolerance values are positive, typically very small numbers. The +// relative difference (`rtol` * abs(`b`)) and the absolute difference +// `atol` are added together to compare against the absolute difference +// between `a` and `b`. +// +// .. warning:: The default `atol` is not appropriate for comparing numbers +// that are much smaller than one (see Notes). +// +// Parameters +// ---------- +// a, b : array_like +// Input arrays to compare. +// rtol : float +// The relative tolerance parameter (see Notes). +// atol : float +// The absolute tolerance parameter (see Notes). +// equal_nan : bool +// Whether to compare NaN's as equal. If True, NaN's in `a` will be +// considered equal to NaN's in `b` in the output array. +// +// Returns +// ------- +// y : array_like +// Returns a boolean array of where `a` and `b` are equal within the +// given tolerance. If both `a` and `b` are scalars, returns a single +// boolean value. +// +// See Also +// -------- +// allclose +// math.isclose +// +// Notes +// ----- +// .. versionadded:: 1.7.0 +// +// For finite values, isclose uses the following equation to test whether +// two floating point values are equivalent. +// +// absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) +// +// Unlike the built-in `math.isclose`, the above equation is not symmetric +// in `a` and `b` -- it assumes `b` is the reference value -- so that +// `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, +// the default value of atol is not zero, and is used to determine what +// small values should be considered close to zero. The default value is +// appropriate for expected values of order unity: if the expected values +// are significantly smaller than one, it can result in false positives. +// `atol` should be carefully selected for the use case at hand. A zero value +// for `atol` will result in `False` if either `a` or `b` is zero. +// +// `isclose` is not defined for non-numeric data types. +// `bool` is considered a numeric data-type for this purpose. +// +// Examples +// -------- +// >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) +// array([ True, False]) +// >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) +// array([ True, True]) +// >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) +// array([False, True]) +// >>> np.isclose([1.0, np.nan], [1.0, np.nan]) +// array([ True, False]) +// >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) +// array([ True, True]) +// >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) +// array([ True, False]) +// >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) +// array([False, False]) +// >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) +// array([ True, True]) +// >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) +// array([False, True]) +// +// +//go:linkname Isclose py.isclose +func Isclose(a *py.Object, b *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object +// +// Returns True if the type of `element` is a scalar type. +// +// Parameters +// ---------- +// element : any +// Input argument, can be of any type and shape. +// +// Returns +// ------- +// val : bool +// True if `element` is a scalar type, False if it is not. +// +// See Also +// -------- +// ndim : Get the number of dimensions of an array +// +// Notes +// ----- +// If you need a stricter way to identify a *numerical* scalar, use +// ``isinstance(x, numbers.Number)``, as that returns ``False`` for most +// non-numerical elements such as strings. +// +// In most cases ``np.ndim(x) == 0`` should be used instead of this function, +// as that will also return true for 0d arrays. This is how numpy overloads +// functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` +// argument to `histogram`. Some key differences: +// +// +--------------------------------------+---------------+-------------------+ +// | x |``isscalar(x)``|``np.ndim(x) == 0``| +// +======================================+===============+===================+ +// | PEP 3141 numeric objects (including | ``True`` | ``True`` | +// | builtins) | | | +// +--------------------------------------+---------------+-------------------+ +// | builtin string and buffer objects | ``True`` | ``True`` | +// +--------------------------------------+---------------+-------------------+ +// | other builtin objects, like | ``False`` | ``True`` | +// | `pathlib.Path`, `Exception`, | | | +// | the result of `re.compile` | | | +// +--------------------------------------+---------------+-------------------+ +// | third-party objects like | ``False`` | ``True`` | +// | `matplotlib.figure.Figure` | | | +// +--------------------------------------+---------------+-------------------+ +// | zero-dimensional numpy arrays | ``False`` | ``True`` | +// +--------------------------------------+---------------+-------------------+ +// | other numpy arrays | ``False`` | ``False`` | +// +--------------------------------------+---------------+-------------------+ +// | `list`, `tuple`, and other sequence | ``False`` | ``False`` | +// | objects | | | +// +--------------------------------------+---------------+-------------------+ +// +// Examples +// -------- +// >>> np.isscalar(3.1) +// True +// >>> np.isscalar(np.array(3.1)) +// False +// >>> np.isscalar([3.1]) +// False +// >>> np.isscalar(False) +// True +// >>> np.isscalar('numpy') +// True +// +// NumPy supports PEP 3141 numbers: +// +// >>> from fractions import Fraction +// >>> np.isscalar(Fraction(5, 17)) +// True +// >>> from numbers import Number +// >>> np.isscalar(Number()) +// True +// +// +// +//go:linkname Isscalar py.isscalar +func Isscalar(element *py.Object) *py.Object +// +// Return the binary representation of the input number as a string. +// +// For negative numbers, if width is not given, a minus sign is added to the +// front. If width is given, the two's complement of the number is +// returned, with respect to that width. +// +// In a two's-complement system negative numbers are represented by the two's +// complement of the absolute value. This is the most common method of +// representing signed integers on computers [1]_. A N-bit two's-complement +// system can represent every integer in the range +// :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. +// +// Parameters +// ---------- +// num : int +// Only an integer decimal number can be used. +// width : int, optional +// The length of the returned string if `num` is positive, or the length +// of the two's complement if `num` is negative, provided that `width` is +// at least a sufficient number of bits for `num` to be represented in the +// designated form. +// +// If the `width` value is insufficient, it will be ignored, and `num` will +// be returned in binary (`num` > 0) or two's complement (`num` < 0) form +// with its width equal to the minimum number of bits needed to represent +// the number in the designated form. This behavior is deprecated and will +// later raise an error. +// +// .. deprecated:: 1.12.0 +// +// Returns +// ------- +// bin : str +// Binary representation of `num` or two's complement of `num`. +// +// See Also +// -------- +// base_repr: Return a string representation of a number in the given base +// system. +// bin: Python's built-in binary representation generator of an integer. +// +// Notes +// ----- +// `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x +// faster. +// +// References +// ---------- +// .. [1] Wikipedia, "Two's complement", +// https://en.wikipedia.org/wiki/Two's_complement +// +// Examples +// -------- +// >>> np.binary_repr(3) +// '11' +// >>> np.binary_repr(-3) +// '-11' +// >>> np.binary_repr(3, width=4) +// '0011' +// +// The two's complement is returned when the input number is negative and +// width is specified: +// +// >>> np.binary_repr(-3, width=3) +// '101' +// >>> np.binary_repr(-3, width=5) +// '11101' +// +// +// +//go:linkname BinaryRepr py.binary_repr +func BinaryRepr(num *py.Object, width *py.Object) *py.Object +// +// Return a string representation of a number in the given base system. +// +// Parameters +// ---------- +// number : int +// The value to convert. Positive and negative values are handled. +// base : int, optional +// Convert `number` to the `base` number system. The valid range is 2-36, +// the default value is 2. +// padding : int, optional +// Number of zeros padded on the left. Default is 0 (no padding). +// +// Returns +// ------- +// out : str +// String representation of `number` in `base` system. +// +// See Also +// -------- +// binary_repr : Faster version of `base_repr` for base 2. +// +// Examples +// -------- +// >>> np.base_repr(5) +// '101' +// >>> np.base_repr(6, 5) +// '11' +// >>> np.base_repr(7, base=5, padding=3) +// '00012' +// +// >>> np.base_repr(10, base=16) +// 'A' +// >>> np.base_repr(32, base=16) +// '20' +// +// +// +//go:linkname BaseRepr py.base_repr +func BaseRepr(number *py.Object, base *py.Object, padding *py.Object) *py.Object +// +// Return a new array of given shape and type, filled with ones. +// +// Parameters +// ---------- +// shape : int or sequence of ints +// Shape of the new array, e.g., ``(2, 3)`` or ``2``. +// dtype : data-type, optional +// The desired data-type for the array, e.g., `numpy.int8`. Default is +// `numpy.float64`. +// order : {'C', 'F'}, optional, default: C +// Whether to store multi-dimensional data in row-major +// (C-style) or column-major (Fortran-style) order in +// memory. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array of ones with the given shape, dtype, and order. +// +// See Also +// -------- +// ones_like : Return an array of ones with shape and type of input. +// empty : Return a new uninitialized array. +// zeros : Return a new array setting values to zero. +// full : Return a new array of given shape filled with value. +// +// +// Examples +// -------- +// >>> np.ones(5) +// array([1., 1., 1., 1., 1.]) +// +// >>> np.ones((5,), dtype=int) +// array([1, 1, 1, 1, 1]) +// +// >>> np.ones((2, 1)) +// array([[1.], +// [1.]]) +// +// >>> s = (2,2) +// >>> np.ones(s) +// array([[1., 1.], +// [1., 1.]]) +// +// +// +//go:linkname Ones py.ones +func Ones(shape *py.Object, dtype *py.Object, order *py.Object) *py.Object +// +// Return the identity array. +// +// The identity array is a square array with ones on +// the main diagonal. +// +// Parameters +// ---------- +// n : int +// Number of rows (and columns) in `n` x `n` output. +// dtype : data-type, optional +// Data-type of the output. Defaults to ``float``. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// `n` x `n` array with its main diagonal set to one, +// and all other elements 0. +// +// Examples +// -------- +// >>> np.identity(3) +// array([[1., 0., 0.], +// [0., 1., 0.], +// [0., 0., 1.]]) +// +// +// +//go:linkname Identity py.identity +func Identity(n *py.Object, dtype *py.Object) *py.Object +// +// Returns True if two arrays are element-wise equal within a tolerance. +// +// The tolerance values are positive, typically very small numbers. The +// relative difference (`rtol` * abs(`b`)) and the absolute difference +// `atol` are added together to compare against the absolute difference +// between `a` and `b`. +// +// NaNs are treated as equal if they are in the same place and if +// ``equal_nan=True``. Infs are treated as equal if they are in the same +// place and of the same sign in both arrays. +// +// Parameters +// ---------- +// a, b : array_like +// Input arrays to compare. +// rtol : float +// The relative tolerance parameter (see Notes). +// atol : float +// The absolute tolerance parameter (see Notes). +// equal_nan : bool +// Whether to compare NaN's as equal. If True, NaN's in `a` will be +// considered equal to NaN's in `b` in the output array. +// +// .. versionadded:: 1.10.0 +// +// Returns +// ------- +// allclose : bool +// Returns True if the two arrays are equal within the given +// tolerance; False otherwise. +// +// See Also +// -------- +// isclose, all, any, equal +// +// Notes +// ----- +// If the following equation is element-wise True, then allclose returns +// True. +// +// absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) +// +// The above equation is not symmetric in `a` and `b`, so that +// ``allclose(a, b)`` might be different from ``allclose(b, a)`` in +// some rare cases. +// +// The comparison of `a` and `b` uses standard broadcasting, which +// means that `a` and `b` need not have the same shape in order for +// ``allclose(a, b)`` to evaluate to True. The same is true for +// `equal` but not `array_equal`. +// +// `allclose` is not defined for non-numeric data types. +// `bool` is considered a numeric data-type for this purpose. +// +// Examples +// -------- +// >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) +// False +// >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) +// True +// >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) +// False +// >>> np.allclose([1.0, np.nan], [1.0, np.nan]) +// False +// >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) +// True +// +// +// +//go:linkname Allclose py.allclose +func Allclose(a *py.Object, b *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object +// compare_chararrays(a1, a2, cmp, rstrip) +// +// Performs element-wise comparison of two string arrays using the +// comparison operator specified by `cmp_op`. +// +// Parameters +// ---------- +// a1, a2 : array_like +// Arrays to be compared. +// cmp : {"<", "<=", "==", ">=", ">", "!="} +// Type of comparison. +// rstrip : Boolean +// If True, the spaces at the end of Strings are removed before the comparison. +// +// Returns +// ------- +// out : ndarray +// The output array of type Boolean with the same shape as a and b. +// +// Raises +// ------ +// ValueError +// If `cmp_op` is not valid. +// TypeError +// If at least one of `a` or `b` is a non-string array +// +// Examples +// -------- +// >>> a = np.array(["a", "b", "cde"]) +// >>> b = np.array(["a", "a", "dec"]) +// >>> np.compare_chararrays(a, b, ">", True) +// array([False, True, False]) +// +//go:linkname CompareChararrays py.compare_chararrays +func CompareChararrays(a1 *py.Object, a2 *py.Object, cmp *py.Object, rstrip *py.Object) *py.Object +// +// putmask(a, mask, values) +// +// Changes elements of an array based on conditional and input values. +// +// Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. +// +// If `values` is not the same size as `a` and `mask` then it will repeat. +// This gives behavior different from ``a[mask] = values``. +// +// Parameters +// ---------- +// a : ndarray +// Target array. +// mask : array_like +// Boolean mask array. It has to be the same shape as `a`. +// values : array_like +// Values to put into `a` where `mask` is True. If `values` is smaller +// than `a` it will be repeated. +// +// See Also +// -------- +// place, put, take, copyto +// +// Examples +// -------- +// >>> x = np.arange(6).reshape(2, 3) +// >>> np.putmask(x, x>2, x**2) +// >>> x +// array([[ 0, 1, 2], +// [ 9, 16, 25]]) +// +// If `values` is smaller than `a` it is repeated: +// +// >>> x = np.arange(5) +// >>> np.putmask(x, x>1, [-33, -44]) +// >>> x +// array([ 0, 1, -33, -44, -33]) +// +// +// +//go:linkname Putmask py.putmask +func Putmask(a *py.Object, mask *py.Object, values *py.Object) *py.Object +// +// Return indices that are non-zero in the flattened version of a. +// +// This is equivalent to ``np.nonzero(np.ravel(a))[0]``. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// +// Returns +// ------- +// res : ndarray +// Output array, containing the indices of the elements of ``a.ravel()`` +// that are non-zero. +// +// See Also +// -------- +// nonzero : Return the indices of the non-zero elements of the input array. +// ravel : Return a 1-D array containing the elements of the input array. +// +// Examples +// -------- +// >>> x = np.arange(-2, 3) +// >>> x +// array([-2, -1, 0, 1, 2]) +// >>> np.flatnonzero(x) +// array([0, 1, 3, 4]) +// +// Use the indices of the non-zero elements as an index array to extract +// these elements: +// +// >>> x.ravel()[np.flatnonzero(x)] +// array([-2, -1, 1, 2]) +// +// +// +//go:linkname Flatnonzero py.flatnonzero +func Flatnonzero(a *py.Object) *py.Object +// invert(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute bit-wise inversion, or bit-wise NOT, element-wise. +// +// Computes the bit-wise NOT of the underlying binary representation of +// the integers in the input arrays. This ufunc implements the C/Python +// operator ``~``. +// +// For signed integer inputs, the two's complement is returned. In a +// two's-complement system negative numbers are represented by the two's +// complement of the absolute value. This is the most common method of +// representing signed integers on computers [1]_. A N-bit +// two's-complement system can represent every integer in the range +// :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. +// +// Parameters +// ---------- +// x : array_like +// Only integer and boolean types are handled. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Result. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// bitwise_and, bitwise_or, bitwise_xor +// logical_not +// binary_repr : +// Return the binary representation of the input number as a string. +// +// Notes +// ----- +// `bitwise_not` is an alias for `invert`: +// +// >>> np.bitwise_not is np.invert +// True +// +// References +// ---------- +// .. [1] Wikipedia, "Two's complement", +// https://en.wikipedia.org/wiki/Two's_complement +// +// Examples +// -------- +// We've seen that 13 is represented by ``00001101``. +// The invert or bit-wise NOT of 13 is then: +// +// >>> x = np.invert(np.array(13, dtype=np.uint8)) +// >>> x +// 242 +// >>> np.binary_repr(x, width=8) +// '11110010' +// +// The result depends on the bit-width: +// +// >>> x = np.invert(np.array(13, dtype=np.uint16)) +// >>> x +// 65522 +// >>> np.binary_repr(x, width=16) +// '1111111111110010' +// +// When using signed integer types the result is the two's complement of +// the result for the unsigned type: +// +// >>> np.invert(np.array([13], dtype=np.int8)) +// array([-14], dtype=int8) +// >>> np.binary_repr(-14, width=8) +// '11110010' +// +// Booleans are accepted as well: +// +// >>> np.invert(np.array([True, False])) +// array([False, True]) +// +// The ``~`` operator can be used as a shorthand for ``np.invert`` on +// ndarrays. +// +// >>> x1 = np.array([True, False]) +// >>> ~x1 +// array([False, True]) +// +//go:linkname BitwiseNot py.bitwise_not +func BitwiseNot(__llgo_va_list ...interface{}) *py.Object +// +// Return a new array of given shape and type, filled with `fill_value`. +// +// Parameters +// ---------- +// shape : int or sequence of ints +// Shape of the new array, e.g., ``(2, 3)`` or ``2``. +// fill_value : scalar or array_like +// Fill value. +// dtype : data-type, optional +// The desired data-type for the array The default, None, means +// ``np.array(fill_value).dtype``. +// order : {'C', 'F'}, optional +// Whether to store multidimensional data in C- or Fortran-contiguous +// (row- or column-wise) order in memory. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array of `fill_value` with the given shape, dtype, and order. +// +// See Also +// -------- +// full_like : Return a new array with shape of input filled with value. +// empty : Return a new uninitialized array. +// ones : Return a new array setting values to one. +// zeros : Return a new array setting values to zero. +// +// Examples +// -------- +// >>> np.full((2, 2), np.inf) +// array([[inf, inf], +// [inf, inf]]) +// >>> np.full((2, 2), 10) +// array([[10, 10], +// [10, 10]]) +// +// >>> np.full((2, 2), [1, 2]) +// array([[1, 2], +// [1, 2]]) +// +// +// +//go:linkname Full py.full +func Full(shape *py.Object, fillValue *py.Object, dtype *py.Object, order *py.Object) *py.Object +// +// Return a full array with the same shape and type as a given array. +// +// Parameters +// ---------- +// a : array_like +// The shape and data-type of `a` define these same attributes of +// the returned array. +// fill_value : array_like +// Fill value. +// dtype : data-type, optional +// Overrides the data type of the result. +// order : {'C', 'F', 'A', or 'K'}, optional +// Overrides the memory layout of the result. 'C' means C-order, +// 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, +// 'C' otherwise. 'K' means match the layout of `a` as closely +// as possible. +// subok : bool, optional. +// If True, then the newly created array will use the sub-class +// type of `a`, otherwise it will be a base-class array. Defaults +// to True. +// shape : int or sequence of ints, optional. +// Overrides the shape of the result. If order='K' and the number of +// dimensions is unchanged, will try to keep order, otherwise, +// order='C' is implied. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// out : ndarray +// Array of `fill_value` with the same shape and type as `a`. +// +// See Also +// -------- +// empty_like : Return an empty array with shape and type of input. +// ones_like : Return an array of ones with shape and type of input. +// zeros_like : Return an array of zeros with shape and type of input. +// full : Return a new array of given shape filled with value. +// +// Examples +// -------- +// >>> x = np.arange(6, dtype=int) +// >>> np.full_like(x, 1) +// array([1, 1, 1, 1, 1, 1]) +// >>> np.full_like(x, 0.1) +// array([0, 0, 0, 0, 0, 0]) +// >>> np.full_like(x, 0.1, dtype=np.double) +// array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) +// >>> np.full_like(x, np.nan, dtype=np.double) +// array([nan, nan, nan, nan, nan, nan]) +// +// >>> y = np.arange(6, dtype=np.double) +// >>> np.full_like(y, 0.1) +// array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) +// +// >>> y = np.zeros([2, 2, 3], dtype=int) +// >>> np.full_like(y, [0, 0, 255]) +// array([[[ 0, 0, 255], +// [ 0, 0, 255]], +// [[ 0, 0, 255], +// [ 0, 0, 255]]]) +// +// +//go:linkname FullLike py.full_like +func FullLike(a *py.Object, fillValue *py.Object, dtype *py.Object, order *py.Object, subok *py.Object, shape *py.Object) *py.Object +// matmul(x1, x2, /, out=None, *, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj, axes, axis]) +// +// Matrix product of two arrays. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays, scalars not allowed. +// out : ndarray, optional +// A location into which the result is stored. If provided, it must have +// a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not +// provided or None, a freshly-allocated array is returned. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// .. versionadded:: 1.16 +// Now handles ufunc kwargs +// +// Returns +// ------- +// y : ndarray +// The matrix product of the inputs. +// This is a scalar only when both x1, x2 are 1-d vectors. +// +// Raises +// ------ +// ValueError +// If the last dimension of `x1` is not the same size as +// the second-to-last dimension of `x2`. +// +// If a scalar value is passed in. +// +// See Also +// -------- +// vdot : Complex-conjugating dot product. +// tensordot : Sum products over arbitrary axes. +// einsum : Einstein summation convention. +// dot : alternative matrix product with different broadcasting rules. +// +// Notes +// ----- +// +// The behavior depends on the arguments in the following way. +// +// - If both arguments are 2-D they are multiplied like conventional +// matrices. +// - If either argument is N-D, N > 2, it is treated as a stack of +// matrices residing in the last two indexes and broadcast accordingly. +// - If the first argument is 1-D, it is promoted to a matrix by +// prepending a 1 to its dimensions. After matrix multiplication +// the prepended 1 is removed. +// - If the second argument is 1-D, it is promoted to a matrix by +// appending a 1 to its dimensions. After matrix multiplication +// the appended 1 is removed. +// +// ``matmul`` differs from ``dot`` in two important ways: +// +// - Multiplication by scalars is not allowed, use ``*`` instead. +// - Stacks of matrices are broadcast together as if the matrices +// were elements, respecting the signature ``(n,k),(k,m)->(n,m)``: +// +// >>> a = np.ones([9, 5, 7, 4]) +// >>> c = np.ones([9, 5, 4, 3]) +// >>> np.dot(a, c).shape +// (9, 5, 7, 9, 5, 3) +// >>> np.matmul(a, c).shape +// (9, 5, 7, 3) +// >>> # n is 7, k is 4, m is 3 +// +// The matmul function implements the semantics of the ``@`` operator +// introduced in Python 3.5 following :pep:`465`. +// +// It uses an optimized BLAS library when possible (see `numpy.linalg`). +// +// Examples +// -------- +// For 2-D arrays it is the matrix product: +// +// >>> a = np.array([[1, 0], +// ... [0, 1]]) +// >>> b = np.array([[4, 1], +// ... [2, 2]]) +// >>> np.matmul(a, b) +// array([[4, 1], +// [2, 2]]) +// +// For 2-D mixed with 1-D, the result is the usual. +// +// >>> a = np.array([[1, 0], +// ... [0, 1]]) +// >>> b = np.array([1, 2]) +// >>> np.matmul(a, b) +// array([1, 2]) +// >>> np.matmul(b, a) +// array([1, 2]) +// +// +// Broadcasting is conventional for stacks of arrays +// +// >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) +// >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) +// >>> np.matmul(a,b).shape +// (2, 2, 2) +// >>> np.matmul(a, b)[0, 1, 1] +// 98 +// >>> sum(a[0, 1, :] * b[0 , :, 1]) +// 98 +// +// Vector, vector returns the scalar inner product, but neither argument +// is complex-conjugated: +// +// >>> np.matmul([2j, 3j], [2j, 3j]) +// (-13+0j) +// +// Scalar multiplication raises an error. +// +// >>> np.matmul([1,2], 3) +// Traceback (most recent call last): +// ... +// ValueError: matmul: Input operand 1 does not have enough dimensions ... +// +// The ``@`` operator can be used as a shorthand for ``np.matmul`` on +// ndarrays. +// +// >>> x1 = np.array([2j, 3j]) +// >>> x2 = np.array([2j, 3j]) +// >>> x1 @ x2 +// (-13+0j) +// +// .. versionadded:: 1.10.0 +// +//go:linkname Matmul py.matmul +func Matmul(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// +// shares_memory(a, b, /, max_work=None) +// +// Determine if two arrays share memory. +// +// .. warning:: +// +// This function can be exponentially slow for some inputs, unless +// `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. +// If in doubt, use `numpy.may_share_memory` instead. +// +// Parameters +// ---------- +// a, b : ndarray +// Input arrays +// max_work : int, optional +// Effort to spend on solving the overlap problem (maximum number +// of candidate solutions to consider). The following special +// values are recognized: +// +// max_work=MAY_SHARE_EXACT (default) +// The problem is solved exactly. In this case, the function returns +// True only if there is an element shared between the arrays. Finding +// the exact solution may take extremely long in some cases. +// max_work=MAY_SHARE_BOUNDS +// Only the memory bounds of a and b are checked. +// +// Raises +// ------ +// numpy.exceptions.TooHardError +// Exceeded max_work. +// +// Returns +// ------- +// out : bool +// +// See Also +// -------- +// may_share_memory +// +// Examples +// -------- +// >>> x = np.array([1, 2, 3, 4]) +// >>> np.shares_memory(x, np.array([5, 6, 7])) +// False +// >>> np.shares_memory(x[::2], x) +// True +// >>> np.shares_memory(x[::2], x[1::2]) +// False +// +// Checking whether two arrays share memory is NP-complete, and +// runtime may increase exponentially in the number of +// dimensions. Hence, `max_work` should generally be set to a finite +// number, as it is possible to construct examples that take +// extremely long to run: +// +// >>> from numpy.lib.stride_tricks import as_strided +// >>> x = np.zeros([192163377], dtype=np.int8) +// >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) +// >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) +// >>> np.shares_memory(x1, x2, max_work=1000) +// Traceback (most recent call last): +// ... +// numpy.exceptions.TooHardError: Exceeded max_work +// +// Running ``np.shares_memory(x1, x2)`` without `max_work` set takes +// around 1 minute for this case. It is possible to find problems +// that take still significantly longer. +// +// +// +//go:linkname SharesMemory py.shares_memory +func SharesMemory(a *py.Object, b *py.Object, maxWork *py.Object) *py.Object +// +// may_share_memory(a, b, /, max_work=None) +// +// Determine if two arrays might share memory +// +// A return of True does not necessarily mean that the two arrays +// share any element. It just means that they *might*. +// +// Only the memory bounds of a and b are checked by default. +// +// Parameters +// ---------- +// a, b : ndarray +// Input arrays +// max_work : int, optional +// Effort to spend on solving the overlap problem. See +// `shares_memory` for details. Default for ``may_share_memory`` +// is to do a bounds check. +// +// Returns +// ------- +// out : bool +// +// See Also +// -------- +// shares_memory +// +// Examples +// -------- +// >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) +// False +// >>> x = np.zeros([3, 4]) +// >>> np.may_share_memory(x[:,0], x[:,1]) +// True +// +// +// +//go:linkname MayShareMemory py.may_share_memory +func MayShareMemory(a *py.Object, b *py.Object, maxWork *py.Object) *py.Object +// +// Test whether all array elements along a given axis evaluate to True. +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array. +// axis : None or int or tuple of ints, optional +// Axis or axes along which a logical AND reduction is performed. +// The default (``axis=None``) is to perform a logical AND over all +// the dimensions of the input array. `axis` may be negative, in +// which case it counts from the last to the first axis. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, a reduction is performed on multiple +// axes, instead of a single axis or all the axes as before. +// out : ndarray, optional +// Alternate output array in which to place the result. +// It must have the same shape as the expected output and its +// type is preserved (e.g., if ``dtype(out)`` is float, the result +// will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more +// details. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `all` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// where : array_like of bool, optional +// Elements to include in checking for all `True` values. +// See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// all : ndarray, bool +// A new boolean or array is returned unless `out` is specified, +// in which case a reference to `out` is returned. +// +// See Also +// -------- +// ndarray.all : equivalent method +// +// any : Test whether any element along a given axis evaluates to True. +// +// Notes +// ----- +// Not a Number (NaN), positive infinity and negative infinity +// evaluate to `True` because these are not equal to zero. +// +// Examples +// -------- +// >>> np.all([[True,False],[True,True]]) +// False +// +// >>> np.all([[True,False],[True,True]], axis=0) +// array([ True, False]) +// +// >>> np.all([-1, 4, 5]) +// True +// +// >>> np.all([1.0, np.nan]) +// True +// +// >>> np.all([[True, True], [False, True]], where=[[True], [False]]) +// True +// +// >>> o=np.array(False) +// >>> z=np.all([-1, 4, 5], out=o) +// >>> id(z), id(o), z +// (28293632, 28293632, array(True)) # may vary +// +// +// +//go:linkname All py.all +func All(__llgo_va_list ...interface{}) *py.Object +// +// Check if all elements of input array are true. +// +// .. deprecated:: 1.25.0 +// ``alltrue`` is deprecated as of NumPy 1.25.0, and will be +// removed in NumPy 2.0. Please use `all` instead. +// +// See Also +// -------- +// numpy.all : Equivalent function; see for details. +// +// +//go:linkname Alltrue py.alltrue +func Alltrue(__llgo_va_list ...interface{}) *py.Object +// +// Return the maximum of an array or maximum along an axis. +// +// `amax` is an alias of `~numpy.max`. +// +// See Also +// -------- +// max : alias of this function +// ndarray.max : equivalent method +// +// +//go:linkname Amax py.amax +func Amax(__llgo_va_list ...interface{}) *py.Object +// +// Return the minimum of an array or minimum along an axis. +// +// `amin` is an alias of `~numpy.min`. +// +// See Also +// -------- +// min : alias of this function +// ndarray.min : equivalent method +// +// +//go:linkname Amin py.amin +func Amin(__llgo_va_list ...interface{}) *py.Object +// +// Test whether any array element along a given axis evaluates to True. +// +// Returns single boolean if `axis` is ``None`` +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array. +// axis : None or int or tuple of ints, optional +// Axis or axes along which a logical OR reduction is performed. +// The default (``axis=None``) is to perform a logical OR over all +// the dimensions of the input array. `axis` may be negative, in +// which case it counts from the last to the first axis. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, a reduction is performed on multiple +// axes, instead of a single axis or all the axes as before. +// out : ndarray, optional +// Alternate output array in which to place the result. It must have +// the same shape as the expected output and its type is preserved +// (e.g., if it is of type float, then it will remain so, returning +// 1.0 for True and 0.0 for False, regardless of the type of `a`). +// See :ref:`ufuncs-output-type` for more details. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `any` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// where : array_like of bool, optional +// Elements to include in checking for any `True` values. +// See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// any : bool or ndarray +// A new boolean or `ndarray` is returned unless `out` is specified, +// in which case a reference to `out` is returned. +// +// See Also +// -------- +// ndarray.any : equivalent method +// +// all : Test whether all elements along a given axis evaluate to True. +// +// Notes +// ----- +// Not a Number (NaN), positive infinity and negative infinity evaluate +// to `True` because these are not equal to zero. +// +// Examples +// -------- +// >>> np.any([[True, False], [True, True]]) +// True +// +// >>> np.any([[True, False], [False, False]], axis=0) +// array([ True, False]) +// +// >>> np.any([-1, 0, 5]) +// True +// +// >>> np.any(np.nan) +// True +// +// >>> np.any([[True, False], [False, False]], where=[[False], [True]]) +// False +// +// >>> o=np.array(False) +// >>> z=np.any([-1, 4, 5], out=o) +// >>> z, o +// (array(True), array(True)) +// >>> # Check now that z is a reference to o +// >>> z is o +// True +// >>> id(z), id(o) # identity of z and o # doctest: +SKIP +// (191614240, 191614240) +// +// +// +//go:linkname Any py.any +func Any(__llgo_va_list ...interface{}) *py.Object +// +// Returns the indices of the maximum values along an axis. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// By default, the index is into the flattened array, otherwise +// along the specified axis. +// out : array, optional +// If provided, the result will be inserted into this array. It should +// be of the appropriate shape and dtype. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the array. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// index_array : ndarray of ints +// Array of indices into the array. It has the same shape as `a.shape` +// with the dimension along `axis` removed. If `keepdims` is set to True, +// then the size of `axis` will be 1 with the resulting array having same +// shape as `a.shape`. +// +// See Also +// -------- +// ndarray.argmax, argmin +// amax : The maximum value along a given axis. +// unravel_index : Convert a flat index into an index tuple. +// take_along_axis : Apply ``np.expand_dims(index_array, axis)`` +// from argmax to an array as if by calling max. +// +// Notes +// ----- +// In case of multiple occurrences of the maximum values, the indices +// corresponding to the first occurrence are returned. +// +// Examples +// -------- +// >>> a = np.arange(6).reshape(2,3) + 10 +// >>> a +// array([[10, 11, 12], +// [13, 14, 15]]) +// >>> np.argmax(a) +// 5 +// >>> np.argmax(a, axis=0) +// array([1, 1, 1]) +// >>> np.argmax(a, axis=1) +// array([2, 2]) +// +// Indexes of the maximal elements of a N-dimensional array: +// +// >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) +// >>> ind +// (1, 2) +// >>> a[ind] +// 15 +// +// >>> b = np.arange(6) +// >>> b[1] = 5 +// >>> b +// array([0, 5, 2, 3, 4, 5]) +// >>> np.argmax(b) # Only the first occurrence is returned. +// 1 +// +// >>> x = np.array([[4,2,3], [1,0,3]]) +// >>> index_array = np.argmax(x, axis=-1) +// >>> # Same as np.amax(x, axis=-1, keepdims=True) +// >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) +// array([[4], +// [3]]) +// >>> # Same as np.amax(x, axis=-1) +// >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) +// array([4, 3]) +// +// Setting `keepdims` to `True`, +// +// >>> x = np.arange(24).reshape((2, 3, 4)) +// >>> res = np.argmax(x, axis=1, keepdims=True) +// >>> res.shape +// (2, 1, 4) +// +// +//go:linkname Argmax py.argmax +func Argmax(a *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Returns the indices of the minimum values along an axis. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// By default, the index is into the flattened array, otherwise +// along the specified axis. +// out : array, optional +// If provided, the result will be inserted into this array. It should +// be of the appropriate shape and dtype. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the array. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// index_array : ndarray of ints +// Array of indices into the array. It has the same shape as `a.shape` +// with the dimension along `axis` removed. If `keepdims` is set to True, +// then the size of `axis` will be 1 with the resulting array having same +// shape as `a.shape`. +// +// See Also +// -------- +// ndarray.argmin, argmax +// amin : The minimum value along a given axis. +// unravel_index : Convert a flat index into an index tuple. +// take_along_axis : Apply ``np.expand_dims(index_array, axis)`` +// from argmin to an array as if by calling min. +// +// Notes +// ----- +// In case of multiple occurrences of the minimum values, the indices +// corresponding to the first occurrence are returned. +// +// Examples +// -------- +// >>> a = np.arange(6).reshape(2,3) + 10 +// >>> a +// array([[10, 11, 12], +// [13, 14, 15]]) +// >>> np.argmin(a) +// 0 +// >>> np.argmin(a, axis=0) +// array([0, 0, 0]) +// >>> np.argmin(a, axis=1) +// array([0, 0]) +// +// Indices of the minimum elements of a N-dimensional array: +// +// >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) +// >>> ind +// (0, 0) +// >>> a[ind] +// 10 +// +// >>> b = np.arange(6) + 10 +// >>> b[4] = 10 +// >>> b +// array([10, 11, 12, 13, 10, 15]) +// >>> np.argmin(b) # Only the first occurrence is returned. +// 0 +// +// >>> x = np.array([[4,2,3], [1,0,3]]) +// >>> index_array = np.argmin(x, axis=-1) +// >>> # Same as np.amin(x, axis=-1, keepdims=True) +// >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) +// array([[2], +// [0]]) +// >>> # Same as np.amax(x, axis=-1) +// >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) +// array([2, 0]) +// +// Setting `keepdims` to `True`, +// +// >>> x = np.arange(24).reshape((2, 3, 4)) +// >>> res = np.argmin(x, axis=1, keepdims=True) +// >>> res.shape +// (2, 1, 4) +// +// +//go:linkname Argmin py.argmin +func Argmin(a *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Perform an indirect partition along the given axis using the +// algorithm specified by the `kind` keyword. It returns an array of +// indices of the same shape as `a` that index data along the given +// axis in partitioned order. +// +// .. versionadded:: 1.8.0 +// +// Parameters +// ---------- +// a : array_like +// Array to sort. +// kth : int or sequence of ints +// Element index to partition by. The k-th element will be in its +// final sorted position and all smaller elements will be moved +// before it and all larger elements behind it. The order of all +// elements in the partitions is undefined. If provided with a +// sequence of k-th it will partition all of them into their sorted +// position at once. +// +// .. deprecated:: 1.22.0 +// Passing booleans as index is deprecated. +// axis : int or None, optional +// Axis along which to sort. The default is -1 (the last axis). If +// None, the flattened array is used. +// kind : {'introselect'}, optional +// Selection algorithm. Default is 'introselect' +// order : str or list of str, optional +// When `a` is an array with fields defined, this argument +// specifies which fields to compare first, second, etc. A single +// field can be specified as a string, and not all fields need be +// specified, but unspecified fields will still be used, in the +// order in which they come up in the dtype, to break ties. +// +// Returns +// ------- +// index_array : ndarray, int +// Array of indices that partition `a` along the specified axis. +// If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. +// More generally, ``np.take_along_axis(a, index_array, axis=axis)`` +// always yields the partitioned `a`, irrespective of dimensionality. +// +// See Also +// -------- +// partition : Describes partition algorithms used. +// ndarray.partition : Inplace partition. +// argsort : Full indirect sort. +// take_along_axis : Apply ``index_array`` from argpartition +// to an array as if by calling partition. +// +// Notes +// ----- +// See `partition` for notes on the different selection algorithms. +// +// Examples +// -------- +// One dimensional array: +// +// >>> x = np.array([3, 4, 2, 1]) +// >>> x[np.argpartition(x, 3)] +// array([2, 1, 3, 4]) +// >>> x[np.argpartition(x, (1, 3))] +// array([1, 2, 3, 4]) +// +// >>> x = [3, 4, 2, 1] +// >>> np.array(x)[np.argpartition(x, 3)] +// array([2, 1, 3, 4]) +// +// Multi-dimensional array: +// +// >>> x = np.array([[3, 4, 2], [1, 3, 1]]) +// >>> index_array = np.argpartition(x, kth=1, axis=-1) +// >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1) +// array([[2, 3, 4], +// [1, 1, 3]]) +// +// +// +//go:linkname Argpartition py.argpartition +func Argpartition(a *py.Object, kth *py.Object, axis *py.Object, kind *py.Object, order *py.Object) *py.Object +// +// Returns the indices that would sort an array. +// +// Perform an indirect sort along the given axis using the algorithm specified +// by the `kind` keyword. It returns an array of indices of the same shape as +// `a` that index data along the given axis in sorted order. +// +// Parameters +// ---------- +// a : array_like +// Array to sort. +// axis : int or None, optional +// Axis along which to sort. The default is -1 (the last axis). If None, +// the flattened array is used. +// kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional +// Sorting algorithm. The default is 'quicksort'. Note that both 'stable' +// and 'mergesort' use timsort under the covers and, in general, the +// actual implementation will vary with data type. The 'mergesort' option +// is retained for backwards compatibility. +// +// .. versionchanged:: 1.15.0. +// The 'stable' option was added. +// order : str or list of str, optional +// When `a` is an array with fields defined, this argument specifies +// which fields to compare first, second, etc. A single field can +// be specified as a string, and not all fields need be specified, +// but unspecified fields will still be used, in the order in which +// they come up in the dtype, to break ties. +// +// Returns +// ------- +// index_array : ndarray, int +// Array of indices that sort `a` along the specified `axis`. +// If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. +// More generally, ``np.take_along_axis(a, index_array, axis=axis)`` +// always yields the sorted `a`, irrespective of dimensionality. +// +// See Also +// -------- +// sort : Describes sorting algorithms used. +// lexsort : Indirect stable sort with multiple keys. +// ndarray.sort : Inplace sort. +// argpartition : Indirect partial sort. +// take_along_axis : Apply ``index_array`` from argsort +// to an array as if by calling sort. +// +// Notes +// ----- +// See `sort` for notes on the different sorting algorithms. +// +// As of NumPy 1.4.0 `argsort` works with real/complex arrays containing +// nan values. The enhanced sort order is documented in `sort`. +// +// Examples +// -------- +// One dimensional array: +// +// >>> x = np.array([3, 1, 2]) +// >>> np.argsort(x) +// array([1, 2, 0]) +// +// Two-dimensional array: +// +// >>> x = np.array([[0, 3], [2, 2]]) +// >>> x +// array([[0, 3], +// [2, 2]]) +// +// >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) +// >>> ind +// array([[0, 1], +// [1, 0]]) +// >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) +// array([[0, 2], +// [2, 3]]) +// +// >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) +// >>> ind +// array([[0, 1], +// [0, 1]]) +// >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) +// array([[0, 3], +// [2, 2]]) +// +// Indices of the sorted elements of a N-dimensional array: +// +// >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) +// >>> ind +// (array([0, 1, 1, 0]), array([0, 0, 1, 1])) +// >>> x[ind] # same as np.sort(x, axis=None) +// array([0, 2, 2, 3]) +// +// Sorting with keys: +// +// >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x +// array([(1, 0), (0, 1)], +// dtype=[('x', '>> np.argsort(x, order=('x','y')) +// array([1, 0]) +// +// >>> np.argsort(x, order=('y','x')) +// array([0, 1]) +// +// +// +//go:linkname Argsort py.argsort +func Argsort(a *py.Object, axis *py.Object, kind *py.Object, order *py.Object) *py.Object +// +// Round an array to the given number of decimals. +// +// `around` is an alias of `~numpy.round`. +// +// See Also +// -------- +// ndarray.round : equivalent method +// round : alias for this function +// ceil, fix, floor, rint, trunc +// +// +// +//go:linkname Around py.around +func Around(a *py.Object, decimals *py.Object, out *py.Object) *py.Object +// +// Construct an array from an index array and a list of arrays to choose from. +// +// First of all, if confused or uncertain, definitely look at the Examples - +// in its full generality, this function is less simple than it might +// seem from the following code description (below ndi = +// `numpy.lib.index_tricks`): +// +// ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. +// +// But this omits some subtleties. Here is a fully general summary: +// +// Given an "index" array (`a`) of integers and a sequence of ``n`` arrays +// (`choices`), `a` and each choice array are first broadcast, as necessary, +// to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = +// 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` +// for each ``i``. Then, a new array with shape ``Ba.shape`` is created as +// follows: +// +// * if ``mode='raise'`` (the default), then, first of all, each element of +// ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose +// that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` +// position in ``Ba`` - then the value at the same position in the new array +// is the value in ``Bchoices[i]`` at that same position; +// +// * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) +// integer; modular arithmetic is used to map integers outside the range +// `[0, n-1]` back into that range; and then the new array is constructed +// as above; +// +// * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) +// integer; negative integers are mapped to 0; values greater than ``n-1`` +// are mapped to ``n-1``; and then the new array is constructed as above. +// +// Parameters +// ---------- +// a : int array +// This array must contain integers in ``[0, n-1]``, where ``n`` is the +// number of choices, unless ``mode=wrap`` or ``mode=clip``, in which +// cases any integers are permissible. +// choices : sequence of arrays +// Choice arrays. `a` and all of the choices must be broadcastable to the +// same shape. If `choices` is itself an array (not recommended), then +// its outermost dimension (i.e., the one corresponding to +// ``choices.shape[0]``) is taken as defining the "sequence". +// out : array, optional +// If provided, the result will be inserted into this array. It should +// be of the appropriate shape and dtype. Note that `out` is always +// buffered if ``mode='raise'``; use other modes for better performance. +// mode : {'raise' (default), 'wrap', 'clip'}, optional +// Specifies how indices outside ``[0, n-1]`` will be treated: +// +// * 'raise' : an exception is raised +// * 'wrap' : value becomes value mod ``n`` +// * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 +// +// Returns +// ------- +// merged_array : array +// The merged result. +// +// Raises +// ------ +// ValueError: shape mismatch +// If `a` and each choice array are not all broadcastable to the same +// shape. +// +// See Also +// -------- +// ndarray.choose : equivalent method +// numpy.take_along_axis : Preferable if `choices` is an array +// +// Notes +// ----- +// To reduce the chance of misinterpretation, even though the following +// "abuse" is nominally supported, `choices` should neither be, nor be +// thought of as, a single array, i.e., the outermost sequence-like container +// should be either a list or a tuple. +// +// Examples +// -------- +// +// >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], +// ... [20, 21, 22, 23], [30, 31, 32, 33]] +// >>> np.choose([2, 3, 1, 0], choices +// ... # the first element of the result will be the first element of the +// ... # third (2+1) "array" in choices, namely, 20; the second element +// ... # will be the second element of the fourth (3+1) choice array, i.e., +// ... # 31, etc. +// ... ) +// array([20, 31, 12, 3]) +// >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) +// array([20, 31, 12, 3]) +// >>> # because there are 4 choice arrays +// >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) +// array([20, 1, 12, 3]) +// >>> # i.e., 0 +// +// A couple examples illustrating how choose broadcasts: +// +// >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] +// >>> choices = [-10, 10] +// >>> np.choose(a, choices) +// array([[ 10, -10, 10], +// [-10, 10, -10], +// [ 10, -10, 10]]) +// +// >>> # With thanks to Anne Archibald +// >>> a = np.array([0, 1]).reshape((2,1,1)) +// >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) +// >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) +// >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 +// array([[[ 1, 1, 1, 1, 1], +// [ 2, 2, 2, 2, 2], +// [ 3, 3, 3, 3, 3]], +// [[-1, -2, -3, -4, -5], +// [-1, -2, -3, -4, -5], +// [-1, -2, -3, -4, -5]]]) +// +// +// +//go:linkname Choose py.choose +func Choose(a *py.Object, choices *py.Object, out *py.Object, mode *py.Object) *py.Object +// +// Clip (limit) the values in an array. +// +// Given an interval, values outside the interval are clipped to +// the interval edges. For example, if an interval of ``[0, 1]`` +// is specified, values smaller than 0 become 0, and values larger +// than 1 become 1. +// +// Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. +// +// No check is performed to ensure ``a_min < a_max``. +// +// Parameters +// ---------- +// a : array_like +// Array containing elements to clip. +// a_min, a_max : array_like or None +// Minimum and maximum value. If ``None``, clipping is not performed on +// the corresponding edge. Only one of `a_min` and `a_max` may be +// ``None``. Both are broadcast against `a`. +// out : ndarray, optional +// The results will be placed in this array. It may be the input +// array for in-place clipping. `out` must be of the right shape +// to hold the output. Its type is preserved. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// clipped_array : ndarray +// An array with the elements of `a`, but where values +// < `a_min` are replaced with `a_min`, and those > `a_max` +// with `a_max`. +// +// See Also +// -------- +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// When `a_min` is greater than `a_max`, `clip` returns an +// array in which all values are equal to `a_max`, +// as shown in the second example. +// +// Examples +// -------- +// >>> a = np.arange(10) +// >>> a +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// >>> np.clip(a, 1, 8) +// array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) +// >>> np.clip(a, 8, 1) +// array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) +// >>> np.clip(a, 3, 6, out=a) +// array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) +// >>> a +// array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) +// >>> a = np.arange(10) +// >>> a +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) +// array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) +// +// +// +//go:linkname Clip py.clip +func Clip(a *py.Object, aMin *py.Object, aMax *py.Object, out *py.Object) *py.Object +// +// Return selected slices of an array along given axis. +// +// When working along a given axis, a slice along that axis is returned in +// `output` for each index where `condition` evaluates to True. When +// working on a 1-D array, `compress` is equivalent to `extract`. +// +// Parameters +// ---------- +// condition : 1-D array of bools +// Array that selects which entries to return. If len(condition) +// is less than the size of `a` along the given axis, then output is +// truncated to the length of the condition array. +// a : array_like +// Array from which to extract a part. +// axis : int, optional +// Axis along which to take slices. If None (default), work on the +// flattened array. +// out : ndarray, optional +// Output array. Its type is preserved and it must be of the right +// shape to hold the output. +// +// Returns +// ------- +// compressed_array : ndarray +// A copy of `a` without the slices along axis for which `condition` +// is false. +// +// See Also +// -------- +// take, choose, diag, diagonal, select +// ndarray.compress : Equivalent method in ndarray +// extract : Equivalent method when working on 1-D arrays +// :ref:`ufuncs-output-type` +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4], [5, 6]]) +// >>> a +// array([[1, 2], +// [3, 4], +// [5, 6]]) +// >>> np.compress([0, 1], a, axis=0) +// array([[3, 4]]) +// >>> np.compress([False, True, True], a, axis=0) +// array([[3, 4], +// [5, 6]]) +// >>> np.compress([False, True], a, axis=1) +// array([[2], +// [4], +// [6]]) +// +// Working on the flattened array does not return slices along an axis but +// selects elements. +// +// >>> np.compress([False, True], a) +// array([2]) +// +// +// +//go:linkname Compress py.compress +func Compress(condition *py.Object, a *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Return the cumulative product of elements along a given axis. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// Axis along which the cumulative product is computed. By default +// the input is flattened. +// dtype : dtype, optional +// Type of the returned array, as well as of the accumulator in which +// the elements are multiplied. If *dtype* is not specified, it +// defaults to the dtype of `a`, unless `a` has an integer dtype with +// a precision less than that of the default platform integer. In +// that case, the default platform integer is used instead. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output +// but the type of the resulting values will be cast if necessary. +// +// Returns +// ------- +// cumprod : ndarray +// A new array holding the result is returned unless `out` is +// specified, in which case a reference to out is returned. +// +// See Also +// -------- +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// Arithmetic is modular when using integer types, and no error is +// raised on overflow. +// +// Examples +// -------- +// >>> a = np.array([1,2,3]) +// >>> np.cumprod(a) # intermediate results 1, 1*2 +// ... # total product 1*2*3 = 6 +// array([1, 2, 6]) +// >>> a = np.array([[1, 2, 3], [4, 5, 6]]) +// >>> np.cumprod(a, dtype=float) # specify type of output +// array([ 1., 2., 6., 24., 120., 720.]) +// +// The cumulative product for each column (i.e., over the rows) of `a`: +// +// >>> np.cumprod(a, axis=0) +// array([[ 1, 2, 3], +// [ 4, 10, 18]]) +// +// The cumulative product for each row (i.e. over the columns) of `a`: +// +// >>> np.cumprod(a,axis=1) +// array([[ 1, 2, 6], +// [ 4, 20, 120]]) +// +// +// +//go:linkname Cumprod py.cumprod +func Cumprod(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object) *py.Object +// +// Return the cumulative product over the given axis. +// +// .. deprecated:: 1.25.0 +// ``cumproduct`` is deprecated as of NumPy 1.25.0, and will be +// removed in NumPy 2.0. Please use `cumprod` instead. +// +// See Also +// -------- +// cumprod : equivalent function; see for details. +// +// +//go:linkname Cumproduct py.cumproduct +func Cumproduct(__llgo_va_list ...interface{}) *py.Object +// +// Return the cumulative sum of the elements along a given axis. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// Axis along which the cumulative sum is computed. The default +// (None) is to compute the cumsum over the flattened array. +// dtype : dtype, optional +// Type of the returned array and of the accumulator in which the +// elements are summed. If `dtype` is not specified, it defaults +// to the dtype of `a`, unless `a` has an integer dtype with a +// precision less than that of the default platform integer. In +// that case, the default platform integer is used. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output +// but the type will be cast if necessary. See :ref:`ufuncs-output-type` for +// more details. +// +// Returns +// ------- +// cumsum_along_axis : ndarray. +// A new array holding the result is returned unless `out` is +// specified, in which case a reference to `out` is returned. The +// result has the same size as `a`, and the same shape as `a` if +// `axis` is not None or `a` is a 1-d array. +// +// See Also +// -------- +// sum : Sum array elements. +// trapz : Integration of array values using the composite trapezoidal rule. +// diff : Calculate the n-th discrete difference along given axis. +// +// Notes +// ----- +// Arithmetic is modular when using integer types, and no error is +// raised on overflow. +// +// ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point +// values since ``sum`` may use a pairwise summation routine, reducing +// the roundoff-error. See `sum` for more information. +// +// Examples +// -------- +// >>> a = np.array([[1,2,3], [4,5,6]]) +// >>> a +// array([[1, 2, 3], +// [4, 5, 6]]) +// >>> np.cumsum(a) +// array([ 1, 3, 6, 10, 15, 21]) +// >>> np.cumsum(a, dtype=float) # specifies type of output value(s) +// array([ 1., 3., 6., 10., 15., 21.]) +// +// >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns +// array([[1, 2, 3], +// [5, 7, 9]]) +// >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows +// array([[ 1, 3, 6], +// [ 4, 9, 15]]) +// +// ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` +// +// >>> b = np.array([1, 2e-9, 3e-9] * 1000000) +// >>> b.cumsum()[-1] +// 1000000.0050045159 +// >>> b.sum() +// 1000000.0050000029 +// +// +// +//go:linkname Cumsum py.cumsum +func Cumsum(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object) *py.Object +// +// Return specified diagonals. +// +// If `a` is 2-D, returns the diagonal of `a` with the given offset, +// i.e., the collection of elements of the form ``a[i, i+offset]``. If +// `a` has more than two dimensions, then the axes specified by `axis1` +// and `axis2` are used to determine the 2-D sub-array whose diagonal is +// returned. The shape of the resulting array can be determined by +// removing `axis1` and `axis2` and appending an index to the right equal +// to the size of the resulting diagonals. +// +// In versions of NumPy prior to 1.7, this function always returned a new, +// independent array containing a copy of the values in the diagonal. +// +// In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, +// but depending on this fact is deprecated. Writing to the resulting +// array continues to work as it used to, but a FutureWarning is issued. +// +// Starting in NumPy 1.9 it returns a read-only view on the original array. +// Attempting to write to the resulting array will produce an error. +// +// In some future release, it will return a read/write view and writing to +// the returned array will alter your original array. The returned array +// will have the same type as the input array. +// +// If you don't write to the array returned by this function, then you can +// just ignore all of the above. +// +// If you depend on the current behavior, then we suggest copying the +// returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead +// of just ``np.diagonal(a)``. This will work with both past and future +// versions of NumPy. +// +// Parameters +// ---------- +// a : array_like +// Array from which the diagonals are taken. +// offset : int, optional +// Offset of the diagonal from the main diagonal. Can be positive or +// negative. Defaults to main diagonal (0). +// axis1 : int, optional +// Axis to be used as the first axis of the 2-D sub-arrays from which +// the diagonals should be taken. Defaults to first axis (0). +// axis2 : int, optional +// Axis to be used as the second axis of the 2-D sub-arrays from +// which the diagonals should be taken. Defaults to second axis (1). +// +// Returns +// ------- +// array_of_diagonals : ndarray +// If `a` is 2-D, then a 1-D array containing the diagonal and of the +// same type as `a` is returned unless `a` is a `matrix`, in which case +// a 1-D array rather than a (2-D) `matrix` is returned in order to +// maintain backward compatibility. +// +// If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` +// are removed, and a new axis inserted at the end corresponding to the +// diagonal. +// +// Raises +// ------ +// ValueError +// If the dimension of `a` is less than 2. +// +// See Also +// -------- +// diag : MATLAB work-a-like for 1-D and 2-D arrays. +// diagflat : Create diagonal arrays. +// trace : Sum along diagonals. +// +// Examples +// -------- +// >>> a = np.arange(4).reshape(2,2) +// >>> a +// array([[0, 1], +// [2, 3]]) +// >>> a.diagonal() +// array([0, 3]) +// >>> a.diagonal(1) +// array([1]) +// +// A 3-D example: +// +// >>> a = np.arange(8).reshape(2,2,2); a +// array([[[0, 1], +// [2, 3]], +// [[4, 5], +// [6, 7]]]) +// >>> a.diagonal(0, # Main diagonals of two arrays created by skipping +// ... 0, # across the outer(left)-most axis last and +// ... 1) # the "middle" (row) axis first. +// array([[0, 6], +// [1, 7]]) +// +// The sub-arrays whose main diagonals we just obtained; note that each +// corresponds to fixing the right-most (column) axis, and that the +// diagonals are "packed" in rows. +// +// >>> a[:,:,0] # main diagonal is [0 6] +// array([[0, 2], +// [4, 6]]) +// >>> a[:,:,1] # main diagonal is [1 7] +// array([[1, 3], +// [5, 7]]) +// +// The anti-diagonal can be obtained by reversing the order of elements +// using either `numpy.flipud` or `numpy.fliplr`. +// +// >>> a = np.arange(9).reshape(3, 3) +// >>> a +// array([[0, 1, 2], +// [3, 4, 5], +// [6, 7, 8]]) +// >>> np.fliplr(a).diagonal() # Horizontal flip +// array([2, 4, 6]) +// >>> np.flipud(a).diagonal() # Vertical flip +// array([6, 4, 2]) +// +// Note that the order in which the diagonal is retrieved varies depending +// on the flip function. +// +// +//go:linkname Diagonal py.diagonal +func Diagonal(a *py.Object, offset *py.Object, axis1 *py.Object, axis2 *py.Object) *py.Object +// +// Compute the arithmetic mean along the specified axis. +// +// Returns the average of the array elements. The average is taken over +// the flattened array by default, otherwise over the specified axis. +// `float64` intermediate and return values are used for integer inputs. +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose mean is desired. If `a` is not an +// array, a conversion is attempted. +// axis : None or int or tuple of ints, optional +// Axis or axes along which the means are computed. The default is to +// compute the mean of the flattened array. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, a mean is performed over multiple axes, +// instead of a single axis or all the axes as before. +// dtype : data-type, optional +// Type to use in computing the mean. For integer inputs, the default +// is `float64`; for floating point inputs, it is the same as the +// input dtype. +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``; if provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. +// See :ref:`ufuncs-output-type` for more details. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `mean` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// where : array_like of bool, optional +// Elements to include in the mean. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// m : ndarray, see dtype parameter above +// If `out=None`, returns a new array containing the mean values, +// otherwise a reference to the output array is returned. +// +// See Also +// -------- +// average : Weighted average +// std, var, nanmean, nanstd, nanvar +// +// Notes +// ----- +// The arithmetic mean is the sum of the elements along the axis divided +// by the number of elements. +// +// Note that for floating-point input, the mean is computed using the +// same precision the input has. Depending on the input data, this can +// cause the results to be inaccurate, especially for `float32` (see +// example below). Specifying a higher-precision accumulator using the +// `dtype` keyword can alleviate this issue. +// +// By default, `float16` results are computed using `float32` intermediates +// for extra precision. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4]]) +// >>> np.mean(a) +// 2.5 +// >>> np.mean(a, axis=0) +// array([2., 3.]) +// >>> np.mean(a, axis=1) +// array([1.5, 3.5]) +// +// In single precision, `mean` can be inaccurate: +// +// >>> a = np.zeros((2, 512*512), dtype=np.float32) +// >>> a[0, :] = 1.0 +// >>> a[1, :] = 0.1 +// >>> np.mean(a) +// 0.54999924 +// +// Computing the mean in float64 is more accurate: +// +// >>> np.mean(a, dtype=np.float64) +// 0.55000000074505806 # may vary +// +// Specifying a where argument: +// +// >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) +// >>> np.mean(a) +// 12.0 +// >>> np.mean(a, where=[[True], [False], [False]]) +// 9.0 +// +// +// +//go:linkname Mean py.mean +func Mean(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object) *py.Object +// +// Return the maximum of an array or maximum along an axis. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : None or int or tuple of ints, optional +// Axis or axes along which to operate. By default, flattened input is +// used. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, the maximum is selected over multiple axes, +// instead of a single axis or all the axes as before. +// out : ndarray, optional +// Alternative output array in which to place the result. Must +// be of the same shape and buffer length as the expected output. +// See :ref:`ufuncs-output-type` for more details. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the ``max`` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// initial : scalar, optional +// The minimum value of an output element. Must be present to allow +// computation on empty slice. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.15.0 +// +// where : array_like of bool, optional +// Elements to compare for the maximum. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// max : ndarray or scalar +// Maximum of `a`. If `axis` is None, the result is a scalar value. +// If `axis` is an int, the result is an array of dimension +// ``a.ndim - 1``. If `axis` is a tuple, the result is an array of +// dimension ``a.ndim - len(axis)``. +// +// See Also +// -------- +// amin : +// The minimum value of an array along a given axis, propagating any NaNs. +// nanmax : +// The maximum value of an array along a given axis, ignoring any NaNs. +// maximum : +// Element-wise maximum of two arrays, propagating any NaNs. +// fmax : +// Element-wise maximum of two arrays, ignoring any NaNs. +// argmax : +// Return the indices of the maximum values. +// +// nanmin, minimum, fmin +// +// Notes +// ----- +// NaN values are propagated, that is if at least one item is NaN, the +// corresponding max value will be NaN as well. To ignore NaN values +// (MATLAB behavior), please use nanmax. +// +// Don't use `~numpy.max` for element-wise comparison of 2 arrays; when +// ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than +// ``max(a, axis=0)``. +// +// Examples +// -------- +// >>> a = np.arange(4).reshape((2,2)) +// >>> a +// array([[0, 1], +// [2, 3]]) +// >>> np.max(a) # Maximum of the flattened array +// 3 +// >>> np.max(a, axis=0) # Maxima along the first axis +// array([2, 3]) +// >>> np.max(a, axis=1) # Maxima along the second axis +// array([1, 3]) +// >>> np.max(a, where=[False, True], initial=-1, axis=0) +// array([-1, 3]) +// >>> b = np.arange(5, dtype=float) +// >>> b[2] = np.NaN +// >>> np.max(b) +// nan +// >>> np.max(b, where=~np.isnan(b), initial=-1) +// 4.0 +// >>> np.nanmax(b) +// 4.0 +// +// You can use an initial value to compute the maximum of an empty slice, or +// to initialize it to a different value: +// +// >>> np.max([[-50], [10]], axis=-1, initial=0) +// array([ 0, 10]) +// +// Notice that the initial value is used as one of the elements for which the +// maximum is determined, unlike for the default argument Python's max +// function, which is only used for empty iterables. +// +// >>> np.max([5], initial=6) +// 6 +// >>> max([5], default=6) +// 5 +// +// +//go:linkname Max py.max +func Max(a *py.Object, axis *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the minimum of an array or minimum along an axis. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : None or int or tuple of ints, optional +// Axis or axes along which to operate. By default, flattened input is +// used. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, the minimum is selected over multiple axes, +// instead of a single axis or all the axes as before. +// out : ndarray, optional +// Alternative output array in which to place the result. Must +// be of the same shape and buffer length as the expected output. +// See :ref:`ufuncs-output-type` for more details. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the ``min`` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// initial : scalar, optional +// The maximum value of an output element. Must be present to allow +// computation on empty slice. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.15.0 +// +// where : array_like of bool, optional +// Elements to compare for the minimum. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// min : ndarray or scalar +// Minimum of `a`. If `axis` is None, the result is a scalar value. +// If `axis` is an int, the result is an array of dimension +// ``a.ndim - 1``. If `axis` is a tuple, the result is an array of +// dimension ``a.ndim - len(axis)``. +// +// See Also +// -------- +// amax : +// The maximum value of an array along a given axis, propagating any NaNs. +// nanmin : +// The minimum value of an array along a given axis, ignoring any NaNs. +// minimum : +// Element-wise minimum of two arrays, propagating any NaNs. +// fmin : +// Element-wise minimum of two arrays, ignoring any NaNs. +// argmin : +// Return the indices of the minimum values. +// +// nanmax, maximum, fmax +// +// Notes +// ----- +// NaN values are propagated, that is if at least one item is NaN, the +// corresponding min value will be NaN as well. To ignore NaN values +// (MATLAB behavior), please use nanmin. +// +// Don't use `~numpy.min` for element-wise comparison of 2 arrays; when +// ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than +// ``min(a, axis=0)``. +// +// Examples +// -------- +// >>> a = np.arange(4).reshape((2,2)) +// >>> a +// array([[0, 1], +// [2, 3]]) +// >>> np.min(a) # Minimum of the flattened array +// 0 +// >>> np.min(a, axis=0) # Minima along the first axis +// array([0, 1]) +// >>> np.min(a, axis=1) # Minima along the second axis +// array([0, 2]) +// >>> np.min(a, where=[False, True], initial=10, axis=0) +// array([10, 1]) +// +// >>> b = np.arange(5, dtype=float) +// >>> b[2] = np.NaN +// >>> np.min(b) +// nan +// >>> np.min(b, where=~np.isnan(b), initial=10) +// 0.0 +// >>> np.nanmin(b) +// 0.0 +// +// >>> np.min([[-50], [10]], axis=-1, initial=0) +// array([-50, 0]) +// +// Notice that the initial value is used as one of the elements for which the +// minimum is determined, unlike for the default argument Python's max +// function, which is only used for empty iterables. +// +// Notice that this isn't the same as Python's ``default`` argument. +// +// >>> np.min([6], initial=5) +// 5 +// >>> min([6], default=5) +// 6 +// +// +//go:linkname Min py.min +func Min(a *py.Object, axis *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the number of dimensions of an array. +// +// Parameters +// ---------- +// a : array_like +// Input array. If it is not already an ndarray, a conversion is +// attempted. +// +// Returns +// ------- +// number_of_dimensions : int +// The number of dimensions in `a`. Scalars are zero-dimensional. +// +// See Also +// -------- +// ndarray.ndim : equivalent method +// shape : dimensions of array +// ndarray.shape : dimensions of array +// +// Examples +// -------- +// >>> np.ndim([[1,2,3],[4,5,6]]) +// 2 +// >>> np.ndim(np.array([[1,2,3],[4,5,6]])) +// 2 +// >>> np.ndim(1) +// 0 +// +// +// +//go:linkname Ndim py.ndim +func Ndim(a *py.Object) *py.Object +// +// Return the indices of the elements that are non-zero. +// +// Returns a tuple of arrays, one for each dimension of `a`, +// containing the indices of the non-zero elements in that +// dimension. The values in `a` are always tested and returned in +// row-major, C-style order. +// +// To group the indices by element, rather than dimension, use `argwhere`, +// which returns a row for each non-zero element. +// +// .. note:: +// +// When called on a zero-d array or scalar, ``nonzero(a)`` is treated +// as ``nonzero(atleast_1d(a))``. +// +// .. deprecated:: 1.17.0 +// +// Use `atleast_1d` explicitly if this behavior is deliberate. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// +// Returns +// ------- +// tuple_of_arrays : tuple +// Indices of elements that are non-zero. +// +// See Also +// -------- +// flatnonzero : +// Return indices that are non-zero in the flattened version of the input +// array. +// ndarray.nonzero : +// Equivalent ndarray method. +// count_nonzero : +// Counts the number of non-zero elements in the input array. +// +// Notes +// ----- +// While the nonzero values can be obtained with ``a[nonzero(a)]``, it is +// recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which +// will correctly handle 0-d arrays. +// +// Examples +// -------- +// >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) +// >>> x +// array([[3, 0, 0], +// [0, 4, 0], +// [5, 6, 0]]) +// >>> np.nonzero(x) +// (array([0, 1, 2, 2]), array([0, 1, 0, 1])) +// +// >>> x[np.nonzero(x)] +// array([3, 4, 5, 6]) +// >>> np.transpose(np.nonzero(x)) +// array([[0, 0], +// [1, 1], +// [2, 0], +// [2, 1]]) +// +// A common use for ``nonzero`` is to find the indices of an array, where +// a condition is True. Given an array `a`, the condition `a` > 3 is a +// boolean array and since False is interpreted as 0, np.nonzero(a > 3) +// yields the indices of the `a` where the condition is true. +// +// >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) +// >>> a > 3 +// array([[False, False, False], +// [ True, True, True], +// [ True, True, True]]) +// >>> np.nonzero(a > 3) +// (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) +// +// Using this result to index `a` is equivalent to using the mask directly: +// +// >>> a[np.nonzero(a > 3)] +// array([4, 5, 6, 7, 8, 9]) +// >>> a[a > 3] # prefer this spelling +// array([4, 5, 6, 7, 8, 9]) +// +// ``nonzero`` can also be called as a method of the array. +// +// >>> (a > 3).nonzero() +// (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) +// +// +// +//go:linkname Nonzero py.nonzero +func Nonzero(a *py.Object) *py.Object +// +// Return a partitioned copy of an array. +// +// Creates a copy of the array with its elements rearranged in such a +// way that the value of the element in k-th position is in the position +// the value would be in a sorted array. In the partitioned array, all +// elements before the k-th element are less than or equal to that +// element, and all the elements after the k-th element are greater than +// or equal to that element. The ordering of the elements in the two +// partitions is undefined. +// +// .. versionadded:: 1.8.0 +// +// Parameters +// ---------- +// a : array_like +// Array to be sorted. +// kth : int or sequence of ints +// Element index to partition by. The k-th value of the element +// will be in its final sorted position and all smaller elements +// will be moved before it and all equal or greater elements behind +// it. The order of all elements in the partitions is undefined. If +// provided with a sequence of k-th it will partition all elements +// indexed by k-th of them into their sorted position at once. +// +// .. deprecated:: 1.22.0 +// Passing booleans as index is deprecated. +// axis : int or None, optional +// Axis along which to sort. If None, the array is flattened before +// sorting. The default is -1, which sorts along the last axis. +// kind : {'introselect'}, optional +// Selection algorithm. Default is 'introselect'. +// order : str or list of str, optional +// When `a` is an array with fields defined, this argument +// specifies which fields to compare first, second, etc. A single +// field can be specified as a string. Not all fields need be +// specified, but unspecified fields will still be used, in the +// order in which they come up in the dtype, to break ties. +// +// Returns +// ------- +// partitioned_array : ndarray +// Array of the same type and shape as `a`. +// +// See Also +// -------- +// ndarray.partition : Method to sort an array in-place. +// argpartition : Indirect partition. +// sort : Full sorting +// +// Notes +// ----- +// The various selection algorithms are characterized by their average +// speed, worst case performance, work space size, and whether they are +// stable. A stable sort keeps items with the same key in the same +// relative order. The available algorithms have the following +// properties: +// +// ================= ======= ============= ============ ======= +// kind speed worst case work space stable +// ================= ======= ============= ============ ======= +// 'introselect' 1 O(n) 0 no +// ================= ======= ============= ============ ======= +// +// All the partition algorithms make temporary copies of the data when +// partitioning along any but the last axis. Consequently, +// partitioning along the last axis is faster and uses less space than +// partitioning along any other axis. +// +// The sort order for complex numbers is lexicographic. If both the +// real and imaginary parts are non-nan then the order is determined by +// the real parts except when they are equal, in which case the order +// is determined by the imaginary parts. +// +// Examples +// -------- +// >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) +// >>> p = np.partition(a, 4) +// >>> p +// array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) +// +// ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal +// to ``p[4]``, and all elements in ``p[5:]`` are greater than or +// equal to ``p[4]``. The partition is:: +// +// [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7] +// +// The next example shows the use of multiple values passed to `kth`. +// +// >>> p2 = np.partition(a, (4, 8)) +// >>> p2 +// array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7]) +// +// ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]`` +// are less than or equal to ``p2[4]``, all elements in ``p2[5:8]`` +// are greater than or equal to ``p2[4]`` and less than or equal to +// ``p2[8]``, and all elements in ``p2[9:]`` are greater than or +// equal to ``p2[8]``. The partition is:: +// +// [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7] +// +// +//go:linkname Partition py.partition +func Partition(a *py.Object, kth *py.Object, axis *py.Object, kind *py.Object, order *py.Object) *py.Object +// +// Return the product of array elements over a given axis. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : None or int or tuple of ints, optional +// Axis or axes along which a product is performed. The default, +// axis=None, will calculate the product of all the elements in the +// input array. If axis is negative it counts from the last to the +// first axis. +// +// .. versionadded:: 1.7.0 +// +// If axis is a tuple of ints, a product is performed on all of the +// axes specified in the tuple instead of a single axis or all the +// axes as before. +// dtype : dtype, optional +// The type of the returned array, as well as of the accumulator in +// which the elements are multiplied. The dtype of `a` is used by +// default unless `a` has an integer dtype of less precision than the +// default platform integer. In that case, if `a` is signed then the +// platform integer is used while if `a` is unsigned then an unsigned +// integer of the same precision as the platform integer is used. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape as the expected output, but the type of the output +// values will be cast if necessary. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left in the +// result as dimensions with size one. With this option, the result +// will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `prod` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// initial : scalar, optional +// The starting value for this product. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.15.0 +// +// where : array_like of bool, optional +// Elements to include in the product. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// product_along_axis : ndarray, see `dtype` parameter above. +// An array shaped as `a` but with the specified axis removed. +// Returns a reference to `out` if specified. +// +// See Also +// -------- +// ndarray.prod : equivalent method +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// Arithmetic is modular when using integer types, and no error is +// raised on overflow. That means that, on a 32-bit platform: +// +// >>> x = np.array([536870910, 536870910, 536870910, 536870910]) +// >>> np.prod(x) +// 16 # may vary +// +// The product of an empty array is the neutral element 1: +// +// >>> np.prod([]) +// 1.0 +// +// Examples +// -------- +// By default, calculate the product of all elements: +// +// >>> np.prod([1.,2.]) +// 2.0 +// +// Even when the input array is two-dimensional: +// +// >>> a = np.array([[1., 2.], [3., 4.]]) +// >>> np.prod(a) +// 24.0 +// +// But we can also specify the axis over which to multiply: +// +// >>> np.prod(a, axis=1) +// array([ 2., 12.]) +// >>> np.prod(a, axis=0) +// array([3., 8.]) +// +// Or select specific elements to include: +// +// >>> np.prod([1., np.nan, 3.], where=[True, False, True]) +// 3.0 +// +// If the type of `x` is unsigned, then the output type is +// the unsigned platform integer: +// +// >>> x = np.array([1, 2, 3], dtype=np.uint8) +// >>> np.prod(x).dtype == np.uint +// True +// +// If `x` is of a signed integer type, then the output type +// is the default platform integer: +// +// >>> x = np.array([1, 2, 3], dtype=np.int8) +// >>> np.prod(x).dtype == int +// True +// +// You can also start the product with a value other than one: +// +// >>> np.prod([1, 2], initial=5) +// 10 +// +// +//go:linkname Prod py.prod +func Prod(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the product of array elements over a given axis. +// +// .. deprecated:: 1.25.0 +// ``product`` is deprecated as of NumPy 1.25.0, and will be +// removed in NumPy 2.0. Please use `prod` instead. +// +// See Also +// -------- +// prod : equivalent function; see for details. +// +// +//go:linkname Product py.product +func Product(__llgo_va_list ...interface{}) *py.Object +// +// Range of values (maximum - minimum) along an axis. +// +// The name of the function comes from the acronym for 'peak to peak'. +// +// .. warning:: +// `ptp` preserves the data type of the array. This means the +// return value for an input of signed integers with n bits +// (e.g. `np.int8`, `np.int16`, etc) is also a signed integer +// with n bits. In that case, peak-to-peak values greater than +// ``2**(n-1)-1`` will be returned as negative values. An example +// with a work-around is shown below. +// +// Parameters +// ---------- +// a : array_like +// Input values. +// axis : None or int or tuple of ints, optional +// Axis along which to find the peaks. By default, flatten the +// array. `axis` may be negative, in +// which case it counts from the last to the first axis. +// +// .. versionadded:: 1.15.0 +// +// If this is a tuple of ints, a reduction is performed on multiple +// axes, instead of a single axis or all the axes as before. +// out : array_like +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output, +// but the type of the output values will be cast if necessary. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `ptp` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// Returns +// ------- +// ptp : ndarray or scalar +// The range of a given array - `scalar` if array is one-dimensional +// or a new array holding the result along the given axis +// +// Examples +// -------- +// >>> x = np.array([[4, 9, 2, 10], +// ... [6, 9, 7, 12]]) +// +// >>> np.ptp(x, axis=1) +// array([8, 6]) +// +// >>> np.ptp(x, axis=0) +// array([2, 0, 5, 2]) +// +// >>> np.ptp(x) +// 10 +// +// This example shows that a negative value can be returned when +// the input is an array of signed integers. +// +// >>> y = np.array([[1, 127], +// ... [0, 127], +// ... [-1, 127], +// ... [-2, 127]], dtype=np.int8) +// >>> np.ptp(y, axis=1) +// array([ 126, 127, -128, -127], dtype=int8) +// +// A work-around is to use the `view()` method to view the result as +// unsigned integers with the same bit width: +// +// >>> np.ptp(y, axis=1).view(np.uint8) +// array([126, 127, 128, 129], dtype=uint8) +// +// +// +//go:linkname Ptp py.ptp +func Ptp(a *py.Object, axis *py.Object, out *py.Object, keepdims *py.Object) *py.Object +// +// Replaces specified elements of an array with given values. +// +// The indexing works on the flattened target array. `put` is roughly +// equivalent to: +// +// :: +// +// a.flat[ind] = v +// +// Parameters +// ---------- +// a : ndarray +// Target array. +// ind : array_like +// Target indices, interpreted as integers. +// v : array_like +// Values to place in `a` at target indices. If `v` is shorter than +// `ind` it will be repeated as necessary. +// mode : {'raise', 'wrap', 'clip'}, optional +// Specifies how out-of-bounds indices will behave. +// +// * 'raise' -- raise an error (default) +// * 'wrap' -- wrap around +// * 'clip' -- clip to the range +// +// 'clip' mode means that all indices that are too large are replaced +// by the index that addresses the last element along that axis. Note +// that this disables indexing with negative numbers. In 'raise' mode, +// if an exception occurs the target array may still be modified. +// +// See Also +// -------- +// putmask, place +// put_along_axis : Put elements by matching the array and the index arrays +// +// Examples +// -------- +// >>> a = np.arange(5) +// >>> np.put(a, [0, 2], [-44, -55]) +// >>> a +// array([-44, 1, -55, 3, 4]) +// +// >>> a = np.arange(5) +// >>> np.put(a, 22, -5, mode='clip') +// >>> a +// array([ 0, 1, 2, 3, -5]) +// +// +// +//go:linkname Put py.put +func Put(a *py.Object, ind *py.Object, v *py.Object, mode *py.Object) *py.Object +// Return a contiguous flattened array. +// +// A 1-D array, containing the elements of the input, is returned. A copy is +// made only if needed. +// +// As of NumPy 1.10, the returned array will have the same type as the input +// array. (for example, a masked array will be returned for a masked array +// input) +// +// Parameters +// ---------- +// a : array_like +// Input array. The elements in `a` are read in the order specified by +// `order`, and packed as a 1-D array. +// order : {'C','F', 'A', 'K'}, optional +// +// The elements of `a` are read using this index order. 'C' means +// to index the elements in row-major, C-style order, +// with the last axis index changing fastest, back to the first +// axis index changing slowest. 'F' means to index the elements +// in column-major, Fortran-style order, with the +// first index changing fastest, and the last index changing +// slowest. Note that the 'C' and 'F' options take no account of +// the memory layout of the underlying array, and only refer to +// the order of axis indexing. 'A' means to read the elements in +// Fortran-like index order if `a` is Fortran *contiguous* in +// memory, C-like order otherwise. 'K' means to read the +// elements in the order they occur in memory, except for +// reversing the data when strides are negative. By default, 'C' +// index order is used. +// +// Returns +// ------- +// y : array_like +// y is a contiguous 1-D array of the same subtype as `a`, +// with shape ``(a.size,)``. +// Note that matrices are special cased for backward compatibility, +// if `a` is a matrix, then y is a 1-D ndarray. +// +// See Also +// -------- +// ndarray.flat : 1-D iterator over an array. +// ndarray.flatten : 1-D array copy of the elements of an array +// in row-major order. +// ndarray.reshape : Change the shape of an array without changing its data. +// +// Notes +// ----- +// In row-major, C-style order, in two dimensions, the row index +// varies the slowest, and the column index the quickest. This can +// be generalized to multiple dimensions, where row-major order +// implies that the index along the first axis varies slowest, and +// the index along the last quickest. The opposite holds for +// column-major, Fortran-style index ordering. +// +// When a view is desired in as many cases as possible, ``arr.reshape(-1)`` +// may be preferable. However, ``ravel`` supports ``K`` in the optional +// ``order`` argument while ``reshape`` does not. +// +// Examples +// -------- +// It is equivalent to ``reshape(-1, order=order)``. +// +// >>> x = np.array([[1, 2, 3], [4, 5, 6]]) +// >>> np.ravel(x) +// array([1, 2, 3, 4, 5, 6]) +// +// >>> x.reshape(-1) +// array([1, 2, 3, 4, 5, 6]) +// +// >>> np.ravel(x, order='F') +// array([1, 4, 2, 5, 3, 6]) +// +// When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: +// +// >>> np.ravel(x.T) +// array([1, 4, 2, 5, 3, 6]) +// >>> np.ravel(x.T, order='A') +// array([1, 2, 3, 4, 5, 6]) +// +// When ``order`` is 'K', it will preserve orderings that are neither 'C' +// nor 'F', but won't reverse axes: +// +// >>> a = np.arange(3)[::-1]; a +// array([2, 1, 0]) +// >>> a.ravel(order='C') +// array([2, 1, 0]) +// >>> a.ravel(order='K') +// array([2, 1, 0]) +// +// >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a +// array([[[ 0, 2, 4], +// [ 1, 3, 5]], +// [[ 6, 8, 10], +// [ 7, 9, 11]]]) +// >>> a.ravel(order='C') +// array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) +// >>> a.ravel(order='K') +// array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) +// +// +// +//go:linkname Ravel py.ravel +func Ravel(a *py.Object, order *py.Object) *py.Object +// +// Repeat each element of an array after themselves +// +// Parameters +// ---------- +// a : array_like +// Input array. +// repeats : int or array of ints +// The number of repetitions for each element. `repeats` is broadcasted +// to fit the shape of the given axis. +// axis : int, optional +// The axis along which to repeat values. By default, use the +// flattened input array, and return a flat output array. +// +// Returns +// ------- +// repeated_array : ndarray +// Output array which has the same shape as `a`, except along +// the given axis. +// +// See Also +// -------- +// tile : Tile an array. +// unique : Find the unique elements of an array. +// +// Examples +// -------- +// >>> np.repeat(3, 4) +// array([3, 3, 3, 3]) +// >>> x = np.array([[1,2],[3,4]]) +// >>> np.repeat(x, 2) +// array([1, 1, 2, 2, 3, 3, 4, 4]) +// >>> np.repeat(x, 3, axis=1) +// array([[1, 1, 1, 2, 2, 2], +// [3, 3, 3, 4, 4, 4]]) +// >>> np.repeat(x, [1, 2], axis=0) +// array([[1, 2], +// [3, 4], +// [3, 4]]) +// +// +// +//go:linkname Repeat py.repeat +func Repeat(a *py.Object, repeats *py.Object, axis *py.Object) *py.Object +// +// Gives a new shape to an array without changing its data. +// +// Parameters +// ---------- +// a : array_like +// Array to be reshaped. +// newshape : int or tuple of ints +// The new shape should be compatible with the original shape. If +// an integer, then the result will be a 1-D array of that length. +// One shape dimension can be -1. In this case, the value is +// inferred from the length of the array and remaining dimensions. +// order : {'C', 'F', 'A'}, optional +// Read the elements of `a` using this index order, and place the +// elements into the reshaped array using this index order. 'C' +// means to read / write the elements using C-like index order, +// with the last axis index changing fastest, back to the first +// axis index changing slowest. 'F' means to read / write the +// elements using Fortran-like index order, with the first index +// changing fastest, and the last index changing slowest. Note that +// the 'C' and 'F' options take no account of the memory layout of +// the underlying array, and only refer to the order of indexing. +// 'A' means to read / write the elements in Fortran-like index +// order if `a` is Fortran *contiguous* in memory, C-like order +// otherwise. +// +// Returns +// ------- +// reshaped_array : ndarray +// This will be a new view object if possible; otherwise, it will +// be a copy. Note there is no guarantee of the *memory layout* (C- or +// Fortran- contiguous) of the returned array. +// +// See Also +// -------- +// ndarray.reshape : Equivalent method. +// +// Notes +// ----- +// It is not always possible to change the shape of an array without copying +// the data. +// +// The `order` keyword gives the index ordering both for *fetching* the values +// from `a`, and then *placing* the values into the output array. +// For example, let's say you have an array: +// +// >>> a = np.arange(6).reshape((3, 2)) +// >>> a +// array([[0, 1], +// [2, 3], +// [4, 5]]) +// +// You can think of reshaping as first raveling the array (using the given +// index order), then inserting the elements from the raveled array into the +// new array using the same kind of index ordering as was used for the +// raveling. +// +// >>> np.reshape(a, (2, 3)) # C-like index ordering +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering +// array([[0, 4, 3], +// [2, 1, 5]]) +// >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') +// array([[0, 4, 3], +// [2, 1, 5]]) +// +// Examples +// -------- +// >>> a = np.array([[1,2,3], [4,5,6]]) +// >>> np.reshape(a, 6) +// array([1, 2, 3, 4, 5, 6]) +// >>> np.reshape(a, 6, order='F') +// array([1, 4, 2, 5, 3, 6]) +// +// >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 +// array([[1, 2], +// [3, 4], +// [5, 6]]) +// +// +//go:linkname Reshape py.reshape +func Reshape(a *py.Object, newshape *py.Object, order *py.Object) *py.Object +// +// Return a new array with the specified shape. +// +// If the new array is larger than the original array, then the new +// array is filled with repeated copies of `a`. Note that this behavior +// is different from a.resize(new_shape) which fills with zeros instead +// of repeated copies of `a`. +// +// Parameters +// ---------- +// a : array_like +// Array to be resized. +// +// new_shape : int or tuple of int +// Shape of resized array. +// +// Returns +// ------- +// reshaped_array : ndarray +// The new array is formed from the data in the old array, repeated +// if necessary to fill out the required number of elements. The +// data are repeated iterating over the array in C-order. +// +// See Also +// -------- +// numpy.reshape : Reshape an array without changing the total size. +// numpy.pad : Enlarge and pad an array. +// numpy.repeat : Repeat elements of an array. +// ndarray.resize : resize an array in-place. +// +// Notes +// ----- +// When the total size of the array does not change `~numpy.reshape` should +// be used. In most other cases either indexing (to reduce the size) +// or padding (to increase the size) may be a more appropriate solution. +// +// Warning: This functionality does **not** consider axes separately, +// i.e. it does not apply interpolation/extrapolation. +// It fills the return array with the required number of elements, iterating +// over `a` in C-order, disregarding axes (and cycling back from the start if +// the new shape is larger). This functionality is therefore not suitable to +// resize images, or data where each axis represents a separate and distinct +// entity. +// +// Examples +// -------- +// >>> a=np.array([[0,1],[2,3]]) +// >>> np.resize(a,(2,3)) +// array([[0, 1, 2], +// [3, 0, 1]]) +// >>> np.resize(a,(1,4)) +// array([[0, 1, 2, 3]]) +// >>> np.resize(a,(2,4)) +// array([[0, 1, 2, 3], +// [0, 1, 2, 3]]) +// +// +// +//go:linkname Resize py.resize +func Resize(a *py.Object, newShape *py.Object) *py.Object +// +// Evenly round to the given number of decimals. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// decimals : int, optional +// Number of decimal places to round to (default: 0). If +// decimals is negative, it specifies the number of positions to +// the left of the decimal point. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape as the expected output, but the type of the output +// values will be cast if necessary. See :ref:`ufuncs-output-type` for more +// details. +// +// Returns +// ------- +// rounded_array : ndarray +// An array of the same type as `a`, containing the rounded values. +// Unless `out` was specified, a new array is created. A reference to +// the result is returned. +// +// The real and imaginary parts of complex numbers are rounded +// separately. The result of rounding a float is a float. +// +// See Also +// -------- +// ndarray.round : equivalent method +// around : an alias for this function +// ceil, fix, floor, rint, trunc +// +// +// Notes +// ----- +// For values exactly halfway between rounded decimal values, NumPy +// rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, +// -0.5 and 0.5 round to 0.0, etc. +// +// ``np.round`` uses a fast but sometimes inexact algorithm to round +// floating-point datatypes. For positive `decimals` it is equivalent to +// ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has +// error due to the inexact representation of decimal fractions in the IEEE +// floating point standard [1]_ and errors introduced when scaling by powers +// of ten. For instance, note the extra "1" in the following: +// +// >>> np.round(56294995342131.5, 3) +// 56294995342131.51 +// +// If your goal is to print such values with a fixed number of decimals, it is +// preferable to use numpy's float printing routines to limit the number of +// printed decimals: +// +// >>> np.format_float_positional(56294995342131.5, precision=3) +// '56294995342131.5' +// +// The float printing routines use an accurate but much more computationally +// demanding algorithm to compute the number of digits after the decimal +// point. +// +// Alternatively, Python's builtin `round` function uses a more accurate +// but slower algorithm for 64-bit floating point values: +// +// >>> round(56294995342131.5, 3) +// 56294995342131.5 +// >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 +// (16.06, 16.05) +// +// +// References +// ---------- +// .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, +// https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF +// +// Examples +// -------- +// >>> np.round([0.37, 1.64]) +// array([0., 2.]) +// >>> np.round([0.37, 1.64], decimals=1) +// array([0.4, 1.6]) +// >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value +// array([0., 2., 2., 4., 4.]) +// >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned +// array([ 1, 2, 3, 11]) +// >>> np.round([1,2,3,11], decimals=-1) +// array([ 0, 0, 0, 10]) +// +// +// +//go:linkname Round py.round +func Round(a *py.Object, decimals *py.Object, out *py.Object) *py.Object +// +// Round an array to the given number of decimals. +// +// `~numpy.round_` is a disrecommended backwards-compatibility +// alias of `~numpy.around` and `~numpy.round`. +// +// .. deprecated:: 1.25.0 +// ``round_`` is deprecated as of NumPy 1.25.0, and will be +// removed in NumPy 2.0. Please use `round` instead. +// +// See Also +// -------- +// around : equivalent function; see for details. +// +// +//go:linkname Round_ py.round_ +func Round_(a *py.Object, decimals *py.Object, out *py.Object) *py.Object +// +// Find indices where elements should be inserted to maintain order. +// +// Find the indices into a sorted array `a` such that, if the +// corresponding elements in `v` were inserted before the indices, the +// order of `a` would be preserved. +// +// Assuming that `a` is sorted: +// +// ====== ============================ +// `side` returned index `i` satisfies +// ====== ============================ +// left ``a[i-1] < v <= a[i]`` +// right ``a[i-1] <= v < a[i]`` +// ====== ============================ +// +// Parameters +// ---------- +// a : 1-D array_like +// Input array. If `sorter` is None, then it must be sorted in +// ascending order, otherwise `sorter` must be an array of indices +// that sort it. +// v : array_like +// Values to insert into `a`. +// side : {'left', 'right'}, optional +// If 'left', the index of the first suitable location found is given. +// If 'right', return the last such index. If there is no suitable +// index, return either 0 or N (where N is the length of `a`). +// sorter : 1-D array_like, optional +// Optional array of integer indices that sort array a into ascending +// order. They are typically the result of argsort. +// +// .. versionadded:: 1.7.0 +// +// Returns +// ------- +// indices : int or array of ints +// Array of insertion points with the same shape as `v`, +// or an integer if `v` is a scalar. +// +// See Also +// -------- +// sort : Return a sorted copy of an array. +// histogram : Produce histogram from 1-D data. +// +// Notes +// ----- +// Binary search is used to find the required insertion points. +// +// As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing +// `nan` values. The enhanced sort order is documented in `sort`. +// +// This function uses the same algorithm as the builtin python `bisect.bisect_left` +// (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, +// which is also vectorized in the `v` argument. +// +// Examples +// -------- +// >>> np.searchsorted([1,2,3,4,5], 3) +// 2 +// >>> np.searchsorted([1,2,3,4,5], 3, side='right') +// 3 +// >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) +// array([0, 5, 1, 2]) +// +// +// +//go:linkname Searchsorted py.searchsorted +func Searchsorted(a *py.Object, v *py.Object, side *py.Object, sorter *py.Object) *py.Object +// +// Return the shape of an array. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// +// Returns +// ------- +// shape : tuple of ints +// The elements of the shape tuple give the lengths of the +// corresponding array dimensions. +// +// See Also +// -------- +// len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with +// ``N>=1``. +// ndarray.shape : Equivalent array method. +// +// Examples +// -------- +// >>> np.shape(np.eye(3)) +// (3, 3) +// >>> np.shape([[1, 3]]) +// (1, 2) +// >>> np.shape([0]) +// (1,) +// >>> np.shape(0) +// () +// +// >>> a = np.array([(1, 2), (3, 4), (5, 6)], +// ... dtype=[('x', 'i4'), ('y', 'i4')]) +// >>> np.shape(a) +// (3,) +// >>> a.shape +// (3,) +// +// +// +//go:linkname Shape py.shape +func Shape(a *py.Object) *py.Object +// +// Return the number of elements along a given axis. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : int, optional +// Axis along which the elements are counted. By default, give +// the total number of elements. +// +// Returns +// ------- +// element_count : int +// Number of elements along the specified axis. +// +// See Also +// -------- +// shape : dimensions of array +// ndarray.shape : dimensions of array +// ndarray.size : number of elements in array +// +// Examples +// -------- +// >>> a = np.array([[1,2,3],[4,5,6]]) +// >>> np.size(a) +// 6 +// >>> np.size(a,1) +// 3 +// >>> np.size(a,0) +// 2 +// +// +// +//go:linkname Size py.size +func Size(a *py.Object, axis *py.Object) *py.Object +// +// Check whether some values are true. +// +// Refer to `any` for full documentation. +// +// .. deprecated:: 1.25.0 +// ``sometrue`` is deprecated as of NumPy 1.25.0, and will be +// removed in NumPy 2.0. Please use `any` instead. +// +// See Also +// -------- +// any : equivalent function; see for details. +// +// +//go:linkname Sometrue py.sometrue +func Sometrue(__llgo_va_list ...interface{}) *py.Object +// +// Return a sorted copy of an array. +// +// Parameters +// ---------- +// a : array_like +// Array to be sorted. +// axis : int or None, optional +// Axis along which to sort. If None, the array is flattened before +// sorting. The default is -1, which sorts along the last axis. +// kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional +// Sorting algorithm. The default is 'quicksort'. Note that both 'stable' +// and 'mergesort' use timsort or radix sort under the covers and, in general, +// the actual implementation will vary with data type. The 'mergesort' option +// is retained for backwards compatibility. +// +// .. versionchanged:: 1.15.0. +// The 'stable' option was added. +// +// order : str or list of str, optional +// When `a` is an array with fields defined, this argument specifies +// which fields to compare first, second, etc. A single field can +// be specified as a string, and not all fields need be specified, +// but unspecified fields will still be used, in the order in which +// they come up in the dtype, to break ties. +// +// Returns +// ------- +// sorted_array : ndarray +// Array of the same type and shape as `a`. +// +// See Also +// -------- +// ndarray.sort : Method to sort an array in-place. +// argsort : Indirect sort. +// lexsort : Indirect stable sort on multiple keys. +// searchsorted : Find elements in a sorted array. +// partition : Partial sort. +// +// Notes +// ----- +// The various sorting algorithms are characterized by their average speed, +// worst case performance, work space size, and whether they are stable. A +// stable sort keeps items with the same key in the same relative +// order. The four algorithms implemented in NumPy have the following +// properties: +// +// =========== ======= ============= ============ ======== +// kind speed worst case work space stable +// =========== ======= ============= ============ ======== +// 'quicksort' 1 O(n^2) 0 no +// 'heapsort' 3 O(n*log(n)) 0 no +// 'mergesort' 2 O(n*log(n)) ~n/2 yes +// 'timsort' 2 O(n*log(n)) ~n/2 yes +// =========== ======= ============= ============ ======== +// +// .. note:: The datatype determines which of 'mergesort' or 'timsort' +// is actually used, even if 'mergesort' is specified. User selection +// at a finer scale is not currently available. +// +// All the sort algorithms make temporary copies of the data when +// sorting along any but the last axis. Consequently, sorting along +// the last axis is faster and uses less space than sorting along +// any other axis. +// +// The sort order for complex numbers is lexicographic. If both the real +// and imaginary parts are non-nan then the order is determined by the +// real parts except when they are equal, in which case the order is +// determined by the imaginary parts. +// +// Previous to numpy 1.4.0 sorting real and complex arrays containing nan +// values led to undefined behaviour. In numpy versions >= 1.4.0 nan +// values are sorted to the end. The extended sort order is: +// +// * Real: [R, nan] +// * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] +// +// where R is a non-nan real value. Complex values with the same nan +// placements are sorted according to the non-nan part if it exists. +// Non-nan values are sorted as before. +// +// .. versionadded:: 1.12.0 +// +// quicksort has been changed to `introsort `_. +// When sorting does not make enough progress it switches to +// `heapsort `_. +// This implementation makes quicksort O(n*log(n)) in the worst case. +// +// 'stable' automatically chooses the best stable sorting algorithm +// for the data type being sorted. +// It, along with 'mergesort' is currently mapped to +// `timsort `_ +// or `radix sort `_ +// depending on the data type. +// API forward compatibility currently limits the +// ability to select the implementation and it is hardwired for the different +// data types. +// +// .. versionadded:: 1.17.0 +// +// Timsort is added for better performance on already or nearly +// sorted data. On random data timsort is almost identical to +// mergesort. It is now used for stable sort while quicksort is still the +// default sort if none is chosen. For timsort details, refer to +// `CPython listsort.txt `_. +// 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an +// O(n) sort instead of O(n log n). +// +// .. versionchanged:: 1.18.0 +// +// NaT now sorts to the end of arrays for consistency with NaN. +// +// Examples +// -------- +// >>> a = np.array([[1,4],[3,1]]) +// >>> np.sort(a) # sort along the last axis +// array([[1, 4], +// [1, 3]]) +// >>> np.sort(a, axis=None) # sort the flattened array +// array([1, 1, 3, 4]) +// >>> np.sort(a, axis=0) # sort along the first axis +// array([[1, 1], +// [3, 4]]) +// +// Use the `order` keyword to specify a field to use when sorting a +// structured array: +// +// >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] +// >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), +// ... ('Galahad', 1.7, 38)] +// >>> a = np.array(values, dtype=dtype) # create a structured array +// >>> np.sort(a, order='height') # doctest: +SKIP +// array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), +// ('Lancelot', 1.8999999999999999, 38)], +// dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP +// array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), +// ('Arthur', 1.8, 41)], +// dtype=[('name', '|S10'), ('height', '>> x = np.array([[[0], [1], [2]]]) +// >>> x.shape +// (1, 3, 1) +// >>> np.squeeze(x).shape +// (3,) +// >>> np.squeeze(x, axis=0).shape +// (3, 1) +// >>> np.squeeze(x, axis=1).shape +// Traceback (most recent call last): +// ... +// ValueError: cannot select an axis to squeeze out which has size not equal to one +// >>> np.squeeze(x, axis=2).shape +// (1, 3) +// >>> x = np.array([[1234]]) +// >>> x.shape +// (1, 1) +// >>> np.squeeze(x) +// array(1234) # 0d array +// >>> np.squeeze(x).shape +// () +// >>> np.squeeze(x)[()] +// 1234 +// +// +// +//go:linkname Squeeze py.squeeze +func Squeeze(a *py.Object, axis *py.Object) *py.Object +// +// Compute the standard deviation along the specified axis. +// +// Returns the standard deviation, a measure of the spread of a distribution, +// of the array elements. The standard deviation is computed for the +// flattened array by default, otherwise over the specified axis. +// +// Parameters +// ---------- +// a : array_like +// Calculate the standard deviation of these values. +// axis : None or int or tuple of ints, optional +// Axis or axes along which the standard deviation is computed. The +// default is to compute the standard deviation of the flattened array. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, a standard deviation is performed over +// multiple axes, instead of a single axis or all the axes as before. +// dtype : dtype, optional +// Type to use in computing the standard deviation. For arrays of +// integer type the default is float64, for arrays of float types it is +// the same as the array type. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape as the expected output but the type (of the calculated +// values) will be cast if necessary. +// ddof : int, optional +// Means Delta Degrees of Freedom. The divisor used in calculations +// is ``N - ddof``, where ``N`` represents the number of elements. +// By default `ddof` is zero. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `std` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// where : array_like of bool, optional +// Elements to include in the standard deviation. +// See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// standard_deviation : ndarray, see dtype parameter above. +// If `out` is None, return a new array containing the standard deviation, +// otherwise return a reference to the output array. +// +// See Also +// -------- +// var, mean, nanmean, nanstd, nanvar +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// The standard deviation is the square root of the average of the squared +// deviations from the mean, i.e., ``std = sqrt(mean(x))``, where +// ``x = abs(a - a.mean())**2``. +// +// The average squared deviation is typically calculated as ``x.sum() / N``, +// where ``N = len(x)``. If, however, `ddof` is specified, the divisor +// ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` +// provides an unbiased estimator of the variance of the infinite population. +// ``ddof=0`` provides a maximum likelihood estimate of the variance for +// normally distributed variables. The standard deviation computed in this +// function is the square root of the estimated variance, so even with +// ``ddof=1``, it will not be an unbiased estimate of the standard deviation +// per se. +// +// Note that, for complex numbers, `std` takes the absolute +// value before squaring, so that the result is always real and nonnegative. +// +// For floating-point input, the *std* is computed using the same +// precision the input has. Depending on the input data, this can cause +// the results to be inaccurate, especially for float32 (see example below). +// Specifying a higher-accuracy accumulator using the `dtype` keyword can +// alleviate this issue. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4]]) +// >>> np.std(a) +// 1.1180339887498949 # may vary +// >>> np.std(a, axis=0) +// array([1., 1.]) +// >>> np.std(a, axis=1) +// array([0.5, 0.5]) +// +// In single precision, std() can be inaccurate: +// +// >>> a = np.zeros((2, 512*512), dtype=np.float32) +// >>> a[0, :] = 1.0 +// >>> a[1, :] = 0.1 +// >>> np.std(a) +// 0.45000005 +// +// Computing the standard deviation in float64 is more accurate: +// +// >>> np.std(a, dtype=np.float64) +// 0.44999999925494177 # may vary +// +// Specifying a where argument: +// +// >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) +// >>> np.std(a) +// 2.614064523559687 # may vary +// >>> np.std(a, where=[[True], [True], [False]]) +// 2.0 +// +// +// +//go:linkname Std py.std +func Std(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, ddof *py.Object, keepdims *py.Object) *py.Object +// +// Sum of array elements over a given axis. +// +// Parameters +// ---------- +// a : array_like +// Elements to sum. +// axis : None or int or tuple of ints, optional +// Axis or axes along which a sum is performed. The default, +// axis=None, will sum all of the elements of the input array. If +// axis is negative it counts from the last to the first axis. +// +// .. versionadded:: 1.7.0 +// +// If axis is a tuple of ints, a sum is performed on all of the axes +// specified in the tuple instead of a single axis or all the axes as +// before. +// dtype : dtype, optional +// The type of the returned array and of the accumulator in which the +// elements are summed. The dtype of `a` is used by default unless `a` +// has an integer dtype of less precision than the default platform +// integer. In that case, if `a` is signed then the platform integer +// is used while if `a` is unsigned then an unsigned integer of the +// same precision as the platform integer is used. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape as the expected output, but the type of the output +// values will be cast if necessary. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `sum` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// initial : scalar, optional +// Starting value for the sum. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.15.0 +// +// where : array_like of bool, optional +// Elements to include in the sum. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// sum_along_axis : ndarray +// An array with the same shape as `a`, with the specified +// axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar +// is returned. If an output array is specified, a reference to +// `out` is returned. +// +// See Also +// -------- +// ndarray.sum : Equivalent method. +// +// add.reduce : Equivalent functionality of `add`. +// +// cumsum : Cumulative sum of array elements. +// +// trapz : Integration of array values using the composite trapezoidal rule. +// +// mean, average +// +// Notes +// ----- +// Arithmetic is modular when using integer types, and no error is +// raised on overflow. +// +// The sum of an empty array is the neutral element 0: +// +// >>> np.sum([]) +// 0.0 +// +// For floating point numbers the numerical precision of sum (and +// ``np.add.reduce``) is in general limited by directly adding each number +// individually to the result causing rounding errors in every step. +// However, often numpy will use a numerically better approach (partial +// pairwise summation) leading to improved precision in many use-cases. +// This improved precision is always provided when no ``axis`` is given. +// When ``axis`` is given, it will depend on which axis is summed. +// Technically, to provide the best speed possible, the improved precision +// is only used when the summation is along the fast axis in memory. +// Note that the exact precision may vary depending on other parameters. +// In contrast to NumPy, Python's ``math.fsum`` function uses a slower but +// more precise approach to summation. +// Especially when summing a large number of lower precision floating point +// numbers, such as ``float32``, numerical errors can become significant. +// In such cases it can be advisable to use `dtype="float64"` to use a higher +// precision for the output. +// +// Examples +// -------- +// >>> np.sum([0.5, 1.5]) +// 2.0 +// >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) +// 1 +// >>> np.sum([[0, 1], [0, 5]]) +// 6 +// >>> np.sum([[0, 1], [0, 5]], axis=0) +// array([0, 6]) +// >>> np.sum([[0, 1], [0, 5]], axis=1) +// array([1, 5]) +// >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) +// array([1., 5.]) +// +// If the accumulator is too small, overflow occurs: +// +// >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) +// -128 +// +// You can also start the sum with a value other than zero: +// +// >>> np.sum([10], initial=5) +// 15 +// +// +//go:linkname Sum py.sum +func Sum(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Interchange two axes of an array. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis1 : int +// First axis. +// axis2 : int +// Second axis. +// +// Returns +// ------- +// a_swapped : ndarray +// For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is +// returned; otherwise a new array is created. For earlier NumPy +// versions a view of `a` is returned only if the order of the +// axes is changed, otherwise the input array is returned. +// +// Examples +// -------- +// >>> x = np.array([[1,2,3]]) +// >>> np.swapaxes(x,0,1) +// array([[1], +// [2], +// [3]]) +// +// >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) +// >>> x +// array([[[0, 1], +// [2, 3]], +// [[4, 5], +// [6, 7]]]) +// +// >>> np.swapaxes(x,0,2) +// array([[[0, 4], +// [2, 6]], +// [[1, 5], +// [3, 7]]]) +// +// +// +//go:linkname Swapaxes py.swapaxes +func Swapaxes(a *py.Object, axis1 *py.Object, axis2 *py.Object) *py.Object +// +// Take elements from an array along an axis. +// +// When axis is not None, this function does the same thing as "fancy" +// indexing (indexing arrays using arrays); however, it can be easier to use +// if you need elements along a given axis. A call such as +// ``np.take(arr, indices, axis=3)`` is equivalent to +// ``arr[:,:,:,indices,...]``. +// +// Explained without fancy indexing, this is equivalent to the following use +// of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of +// indices:: +// +// Ni, Nk = a.shape[:axis], a.shape[axis+1:] +// Nj = indices.shape +// for ii in ndindex(Ni): +// for jj in ndindex(Nj): +// for kk in ndindex(Nk): +// out[ii + jj + kk] = a[ii + (indices[jj],) + kk] +// +// Parameters +// ---------- +// a : array_like (Ni..., M, Nk...) +// The source array. +// indices : array_like (Nj...) +// The indices of the values to extract. +// +// .. versionadded:: 1.8.0 +// +// Also allow scalars for indices. +// axis : int, optional +// The axis over which to select values. By default, the flattened +// input array is used. +// out : ndarray, optional (Ni..., Nj..., Nk...) +// If provided, the result will be placed in this array. It should +// be of the appropriate shape and dtype. Note that `out` is always +// buffered if `mode='raise'`; use other modes for better performance. +// mode : {'raise', 'wrap', 'clip'}, optional +// Specifies how out-of-bounds indices will behave. +// +// * 'raise' -- raise an error (default) +// * 'wrap' -- wrap around +// * 'clip' -- clip to the range +// +// 'clip' mode means that all indices that are too large are replaced +// by the index that addresses the last element along that axis. Note +// that this disables indexing with negative numbers. +// +// Returns +// ------- +// out : ndarray (Ni..., Nj..., Nk...) +// The returned array has the same type as `a`. +// +// See Also +// -------- +// compress : Take elements using a boolean mask +// ndarray.take : equivalent method +// take_along_axis : Take elements by matching the array and the index arrays +// +// Notes +// ----- +// +// By eliminating the inner loop in the description above, and using `s_` to +// build simple slice objects, `take` can be expressed in terms of applying +// fancy indexing to each 1-d slice:: +// +// Ni, Nk = a.shape[:axis], a.shape[axis+1:] +// for ii in ndindex(Ni): +// for kk in ndindex(Nj): +// out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] +// +// For this reason, it is equivalent to (but faster than) the following use +// of `apply_along_axis`:: +// +// out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) +// +// Examples +// -------- +// >>> a = [4, 3, 5, 7, 6, 8] +// >>> indices = [0, 1, 4] +// >>> np.take(a, indices) +// array([4, 3, 6]) +// +// In this example if `a` is an ndarray, "fancy" indexing can be used. +// +// >>> a = np.array(a) +// >>> a[indices] +// array([4, 3, 6]) +// +// If `indices` is not one dimensional, the output also has these dimensions. +// +// >>> np.take(a, [[0, 1], [2, 3]]) +// array([[4, 3], +// [5, 7]]) +// +// +//go:linkname Take py.take +func Take(a *py.Object, indices *py.Object, axis *py.Object, out *py.Object, mode *py.Object) *py.Object +// +// Return the sum along diagonals of the array. +// +// If `a` is 2-D, the sum along its diagonal with the given offset +// is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. +// +// If `a` has more than two dimensions, then the axes specified by axis1 and +// axis2 are used to determine the 2-D sub-arrays whose traces are returned. +// The shape of the resulting array is the same as that of `a` with `axis1` +// and `axis2` removed. +// +// Parameters +// ---------- +// a : array_like +// Input array, from which the diagonals are taken. +// offset : int, optional +// Offset of the diagonal from the main diagonal. Can be both positive +// and negative. Defaults to 0. +// axis1, axis2 : int, optional +// Axes to be used as the first and second axis of the 2-D sub-arrays +// from which the diagonals should be taken. Defaults are the first two +// axes of `a`. +// dtype : dtype, optional +// Determines the data-type of the returned array and of the accumulator +// where the elements are summed. If dtype has the value None and `a` is +// of integer type of precision less than the default integer +// precision, then the default integer precision is used. Otherwise, +// the precision is the same as that of `a`. +// out : ndarray, optional +// Array into which the output is placed. Its type is preserved and +// it must be of the right shape to hold the output. +// +// Returns +// ------- +// sum_along_diagonals : ndarray +// If `a` is 2-D, the sum along the diagonal is returned. If `a` has +// larger dimensions, then an array of sums along diagonals is returned. +// +// See Also +// -------- +// diag, diagonal, diagflat +// +// Examples +// -------- +// >>> np.trace(np.eye(3)) +// 3.0 +// >>> a = np.arange(8).reshape((2,2,2)) +// >>> np.trace(a) +// array([6, 8]) +// +// >>> a = np.arange(24).reshape((2,2,2,3)) +// >>> np.trace(a).shape +// (2, 3) +// +// +// +//go:linkname Trace py.trace +func Trace(a *py.Object, offset *py.Object, axis1 *py.Object, axis2 *py.Object, dtype *py.Object, out *py.Object) *py.Object +// +// Returns an array with axes transposed. +// +// For a 1-D array, this returns an unchanged view of the original array, as a +// transposed vector is simply the same vector. +// To convert a 1-D array into a 2-D column vector, an additional dimension +// must be added, e.g., ``np.atleast2d(a).T`` achieves this, as does +// ``a[:, np.newaxis]``. +// For a 2-D array, this is the standard matrix transpose. +// For an n-D array, if axes are given, their order indicates how the +// axes are permuted (see Examples). If axes are not provided, then +// ``transpose(a).shape == a.shape[::-1]``. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axes : tuple or list of ints, optional +// If specified, it must be a tuple or list which contains a permutation +// of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis +// of the returned array will correspond to the axis numbered ``axes[i]`` +// of the input. If not specified, defaults to ``range(a.ndim)[::-1]``, +// which reverses the order of the axes. +// +// Returns +// ------- +// p : ndarray +// `a` with its axes permuted. A view is returned whenever possible. +// +// See Also +// -------- +// ndarray.transpose : Equivalent method. +// moveaxis : Move axes of an array to new positions. +// argsort : Return the indices that would sort an array. +// +// Notes +// ----- +// Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors +// when using the `axes` keyword argument. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4]]) +// >>> a +// array([[1, 2], +// [3, 4]]) +// >>> np.transpose(a) +// array([[1, 3], +// [2, 4]]) +// +// >>> a = np.array([1, 2, 3, 4]) +// >>> a +// array([1, 2, 3, 4]) +// >>> np.transpose(a) +// array([1, 2, 3, 4]) +// +// >>> a = np.ones((1, 2, 3)) +// >>> np.transpose(a, (1, 0, 2)).shape +// (2, 1, 3) +// +// >>> a = np.ones((2, 3, 4, 5)) +// >>> np.transpose(a).shape +// (5, 4, 3, 2) +// +// +// +//go:linkname Transpose py.transpose +func Transpose(a *py.Object, axes *py.Object) *py.Object +// +// Compute the variance along the specified axis. +// +// Returns the variance of the array elements, a measure of the spread of a +// distribution. The variance is computed for the flattened array by +// default, otherwise over the specified axis. +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose variance is desired. If `a` is not an +// array, a conversion is attempted. +// axis : None or int or tuple of ints, optional +// Axis or axes along which the variance is computed. The default is to +// compute the variance of the flattened array. +// +// .. versionadded:: 1.7.0 +// +// If this is a tuple of ints, a variance is performed over multiple axes, +// instead of a single axis or all the axes as before. +// dtype : data-type, optional +// Type to use in computing the variance. For arrays of integer type +// the default is `float64`; for arrays of float types it is the same as +// the array type. +// out : ndarray, optional +// Alternate output array in which to place the result. It must have +// the same shape as the expected output, but the type is cast if +// necessary. +// ddof : int, optional +// "Delta Degrees of Freedom": the divisor used in the calculation is +// ``N - ddof``, where ``N`` represents the number of elements. By +// default `ddof` is zero. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the input array. +// +// If the default value is passed, then `keepdims` will not be +// passed through to the `var` method of sub-classes of +// `ndarray`, however any non-default value will be. If the +// sub-class' method does not implement `keepdims` any +// exceptions will be raised. +// +// where : array_like of bool, optional +// Elements to include in the variance. See `~numpy.ufunc.reduce` for +// details. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// variance : ndarray, see dtype parameter above +// If ``out=None``, returns a new array containing the variance; +// otherwise, a reference to the output array is returned. +// +// See Also +// -------- +// std, mean, nanmean, nanstd, nanvar +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// The variance is the average of the squared deviations from the mean, +// i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``. +// +// The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``. +// If, however, `ddof` is specified, the divisor ``N - ddof`` is used +// instead. In standard statistical practice, ``ddof=1`` provides an +// unbiased estimator of the variance of a hypothetical infinite population. +// ``ddof=0`` provides a maximum likelihood estimate of the variance for +// normally distributed variables. +// +// Note that for complex numbers, the absolute value is taken before +// squaring, so that the result is always real and nonnegative. +// +// For floating-point input, the variance is computed using the same +// precision the input has. Depending on the input data, this can cause +// the results to be inaccurate, especially for `float32` (see example +// below). Specifying a higher-accuracy accumulator using the ``dtype`` +// keyword can alleviate this issue. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, 4]]) +// >>> np.var(a) +// 1.25 +// >>> np.var(a, axis=0) +// array([1., 1.]) +// >>> np.var(a, axis=1) +// array([0.25, 0.25]) +// +// In single precision, var() can be inaccurate: +// +// >>> a = np.zeros((2, 512*512), dtype=np.float32) +// >>> a[0, :] = 1.0 +// >>> a[1, :] = 0.1 +// >>> np.var(a) +// 0.20250003 +// +// Computing the variance in float64 is more accurate: +// +// >>> np.var(a, dtype=np.float64) +// 0.20249999932944759 # may vary +// >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 +// 0.2025 +// +// Specifying a where argument: +// +// >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) +// >>> np.var(a) +// 6.833333333333333 # may vary +// >>> np.var(a, where=[[True], [True], [False]]) +// 4.0 +// +// +// +//go:linkname Var py.var +func Var(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, ddof *py.Object, keepdims *py.Object) *py.Object +// absolute(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Calculate the absolute value element-wise. +// +// ``np.abs`` is a shorthand for this function. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// absolute : ndarray +// An ndarray containing the absolute value of +// each element in `x`. For complex input, ``a + ib``, the +// absolute value is :math:`\sqrt{ a^2 + b^2 }`. +// This is a scalar if `x` is a scalar. +// +// Examples +// -------- +// >>> x = np.array([-1.2, 1.2]) +// >>> np.absolute(x) +// array([ 1.2, 1.2]) +// >>> np.absolute(1.2 + 1j) +// 1.5620499351813308 +// +// Plot the function over ``[-10, 10]``: +// +// >>> import matplotlib.pyplot as plt +// +// >>> x = np.linspace(start=-10, stop=10, num=101) +// >>> plt.plot(x, np.absolute(x)) +// >>> plt.show() +// +// Plot the function over the complex plane: +// +// >>> xx = x + 1j * x[:, np.newaxis] +// >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray') +// >>> plt.show() +// +// The `abs` function can be used as a shorthand for ``np.absolute`` on +// ndarrays. +// +// >>> x = np.array([-1.2, 1.2]) +// >>> abs(x) +// array([1.2, 1.2]) +// +//go:linkname Absolute py.absolute +func Absolute(x *py.Object, out *py.Object) *py.Object +// add(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Add arguments element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays to be added. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// add : ndarray or scalar +// The sum of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Notes +// ----- +// Equivalent to `x1` + `x2` in terms of array broadcasting. +// +// Examples +// -------- +// >>> np.add(1.0, 4.0) +// 5.0 +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> np.add(x1, x2) +// array([[ 0., 2., 4.], +// [ 3., 5., 7.], +// [ 6., 8., 10.]]) +// +// The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays. +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> x1 + x2 +// array([[ 0., 2., 4.], +// [ 3., 5., 7.], +// [ 6., 8., 10.]]) +// +//go:linkname Add py.add +func Add(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// arccos(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Trigonometric inverse cosine, element-wise. +// +// The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``. +// +// Parameters +// ---------- +// x : array_like +// `x`-coordinate on the unit circle. +// For real arguments, the domain is [-1, 1]. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// angle : ndarray +// The angle of the ray intersecting the unit circle at the given +// `x`-coordinate in radians [0, pi]. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// cos, arctan, arcsin, emath.arccos +// +// Notes +// ----- +// `arccos` is a multivalued function: for each `x` there are infinitely +// many numbers `z` such that ``cos(z) = x``. The convention is to return +// the angle `z` whose real part lies in `[0, pi]`. +// +// For real-valued input data types, `arccos` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arccos` is a complex analytic function that +// has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from +// above on the former and from below on the latter. +// +// The inverse `cos` is also known as `acos` or cos^-1. +// +// References +// ---------- +// M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 79. +// https://personal.math.ubc.ca/~cbm/aands/page_79.htm +// +// Examples +// -------- +// We expect the arccos of 1 to be 0, and of -1 to be pi: +// +// >>> np.arccos([1, -1]) +// array([ 0. , 3.14159265]) +// +// Plot arccos: +// +// >>> import matplotlib.pyplot as plt +// >>> x = np.linspace(-1, 1, num=100) +// >>> plt.plot(x, np.arccos(x)) +// >>> plt.axis('tight') +// >>> plt.show() +// +//go:linkname Arccos py.arccos +func Arccos(x *py.Object, out *py.Object) *py.Object +// arccosh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Inverse hyperbolic cosine, element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// arccosh : ndarray +// Array of the same shape as `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// +// cosh, arcsinh, sinh, arctanh, tanh +// +// Notes +// ----- +// `arccosh` is a multivalued function: for each `x` there are infinitely +// many numbers `z` such that `cosh(z) = x`. The convention is to return the +// `z` whose imaginary part lies in ``[-pi, pi]`` and the real part in +// ``[0, inf]``. +// +// For real-valued input data types, `arccosh` always returns real output. +// For each value that cannot be expressed as a real number or infinity, it +// yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arccosh` is a complex analytical function that +// has a branch cut `[-inf, 1]` and is continuous from above on it. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 86. +// https://personal.math.ubc.ca/~cbm/aands/page_86.htm +// .. [2] Wikipedia, "Inverse hyperbolic function", +// https://en.wikipedia.org/wiki/Arccosh +// +// Examples +// -------- +// >>> np.arccosh([np.e, 10.0]) +// array([ 1.65745445, 2.99322285]) +// >>> np.arccosh(1) +// 0.0 +// +//go:linkname Arccosh py.arccosh +func Arccosh(x *py.Object, out *py.Object) *py.Object +// arcsin(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Inverse sine, element-wise. +// +// Parameters +// ---------- +// x : array_like +// `y`-coordinate on the unit circle. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// angle : ndarray +// The inverse sine of each element in `x`, in radians and in the +// closed interval ``[-pi/2, pi/2]``. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// sin, cos, arccos, tan, arctan, arctan2, emath.arcsin +// +// Notes +// ----- +// `arcsin` is a multivalued function: for each `x` there are infinitely +// many numbers `z` such that :math:`sin(z) = x`. The convention is to +// return the angle `z` whose real part lies in [-pi/2, pi/2]. +// +// For real-valued input data types, *arcsin* always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arcsin` is a complex analytic function that +// has, by convention, the branch cuts [-inf, -1] and [1, inf] and is +// continuous from above on the former and from below on the latter. +// +// The inverse sine is also known as `asin` or sin^{-1}. +// +// References +// ---------- +// Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, +// 10th printing, New York: Dover, 1964, pp. 79ff. +// https://personal.math.ubc.ca/~cbm/aands/page_79.htm +// +// Examples +// -------- +// >>> np.arcsin(1) # pi/2 +// 1.5707963267948966 +// >>> np.arcsin(-1) # -pi/2 +// -1.5707963267948966 +// >>> np.arcsin(0) +// 0.0 +// +//go:linkname Arcsin py.arcsin +func Arcsin(x *py.Object, out *py.Object) *py.Object +// arcsinh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Inverse hyperbolic sine element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Array of the same shape as `x`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// `arcsinh` is a multivalued function: for each `x` there are infinitely +// many numbers `z` such that `sinh(z) = x`. The convention is to return the +// `z` whose imaginary part lies in `[-pi/2, pi/2]`. +// +// For real-valued input data types, `arcsinh` always returns real output. +// For each value that cannot be expressed as a real number or infinity, it +// returns ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arccos` is a complex analytical function that +// has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from +// the right on the former and from the left on the latter. +// +// The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 86. +// https://personal.math.ubc.ca/~cbm/aands/page_86.htm +// .. [2] Wikipedia, "Inverse hyperbolic function", +// https://en.wikipedia.org/wiki/Arcsinh +// +// Examples +// -------- +// >>> np.arcsinh(np.array([np.e, 10.0])) +// array([ 1.72538256, 2.99822295]) +// +//go:linkname Arcsinh py.arcsinh +func Arcsinh(x *py.Object, out *py.Object) *py.Object +// arctan(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Trigonometric inverse tangent, element-wise. +// +// The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``. +// +// Parameters +// ---------- +// x : array_like +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Out has the same shape as `x`. Its real part is in +// ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``). +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`) +// and the positive `x`-axis. +// angle : Argument of complex values. +// +// Notes +// ----- +// `arctan` is a multi-valued function: for each `x` there are infinitely +// many numbers `z` such that tan(`z`) = `x`. The convention is to return +// the angle `z` whose real part lies in [-pi/2, pi/2]. +// +// For real-valued input data types, `arctan` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arctan` is a complex analytic function that +// has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous +// from the left on the former and from the right on the latter. +// +// The inverse tangent is also known as `atan` or tan^{-1}. +// +// References +// ---------- +// Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, +// 10th printing, New York: Dover, 1964, pp. 79. +// https://personal.math.ubc.ca/~cbm/aands/page_79.htm +// +// Examples +// -------- +// We expect the arctan of 0 to be 0, and of 1 to be pi/4: +// +// >>> np.arctan([0, 1]) +// array([ 0. , 0.78539816]) +// +// >>> np.pi/4 +// 0.78539816339744828 +// +// Plot arctan: +// +// >>> import matplotlib.pyplot as plt +// >>> x = np.linspace(-10, 10) +// >>> plt.plot(x, np.arctan(x)) +// >>> plt.axis('tight') +// >>> plt.show() +// +//go:linkname Arctan py.arctan +func Arctan(x *py.Object, out *py.Object) *py.Object +// arctan2(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. +// +// The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is +// the signed angle in radians between the ray ending at the origin and +// passing through the point (1,0), and the ray ending at the origin and +// passing through the point (`x2`, `x1`). (Note the role reversal: the +// "`y`-coordinate" is the first function parameter, the "`x`-coordinate" +// is the second.) By IEEE convention, this function is defined for +// `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see +// Notes for specific values). +// +// This function is not defined for complex-valued arguments; for the +// so-called argument of complex values, use `angle`. +// +// Parameters +// ---------- +// x1 : array_like, real-valued +// `y`-coordinates. +// x2 : array_like, real-valued +// `x`-coordinates. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// angle : ndarray +// Array of angles in radians, in the range ``[-pi, pi]``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// arctan, tan, angle +// +// Notes +// ----- +// *arctan2* is identical to the `atan2` function of the underlying +// C library. The following special values are defined in the C +// standard: [1]_ +// +// ====== ====== ================ +// `x1` `x2` `arctan2(x1,x2)` +// ====== ====== ================ +// +/- 0 +0 +/- 0 +// +/- 0 -0 +/- pi +// > 0 +/-inf +0 / +pi +// < 0 +/-inf -0 / -pi +// +/-inf +inf +/- (pi/4) +// +/-inf -inf +/- (3*pi/4) +// ====== ====== ================ +// +// Note that +0 and -0 are distinct floating point numbers, as are +inf +// and -inf. +// +// References +// ---------- +// .. [1] ISO/IEC standard 9899:1999, "Programming language C." +// +// Examples +// -------- +// Consider four points in different quadrants: +// +// >>> x = np.array([-1, +1, +1, -1]) +// >>> y = np.array([-1, -1, +1, +1]) +// >>> np.arctan2(y, x) * 180 / np.pi +// array([-135., -45., 45., 135.]) +// +// Note the order of the parameters. `arctan2` is defined also when `x2` = 0 +// and at several other special points, obtaining values in +// the range ``[-pi, pi]``: +// +// >>> np.arctan2([1., -1.], [0., 0.]) +// array([ 1.57079633, -1.57079633]) +// >>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf]) +// array([0. , 3.14159265, 0.78539816]) +// +//go:linkname Arctan2 py.arctan2 +func Arctan2(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// arctanh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Inverse hyperbolic tangent element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Array of the same shape as `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// emath.arctanh +// +// Notes +// ----- +// `arctanh` is a multivalued function: for each `x` there are infinitely +// many numbers `z` such that ``tanh(z) = x``. The convention is to return +// the `z` whose imaginary part lies in `[-pi/2, pi/2]`. +// +// For real-valued input data types, `arctanh` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `arctanh` is a complex analytical function +// that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from +// above on the former and from below on the latter. +// +// The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 86. +// https://personal.math.ubc.ca/~cbm/aands/page_86.htm +// .. [2] Wikipedia, "Inverse hyperbolic function", +// https://en.wikipedia.org/wiki/Arctanh +// +// Examples +// -------- +// >>> np.arctanh([0, -0.5]) +// array([ 0. , -0.54930614]) +// +//go:linkname Arctanh py.arctanh +func Arctanh(x *py.Object, out *py.Object) *py.Object +// bitwise_and(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the bit-wise AND of two arrays element-wise. +// +// Computes the bit-wise AND of the underlying binary representation of +// the integers in the input arrays. This ufunc implements the C/Python +// operator ``&``. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Only integer and boolean types are handled. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Result. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_and +// bitwise_or +// bitwise_xor +// binary_repr : +// Return the binary representation of the input number as a string. +// +// Examples +// -------- +// The number 13 is represented by ``00001101``. Likewise, 17 is +// represented by ``00010001``. The bit-wise AND of 13 and 17 is +// therefore ``000000001``, or 1: +// +// >>> np.bitwise_and(13, 17) +// 1 +// +// >>> np.bitwise_and(14, 13) +// 12 +// >>> np.binary_repr(12) +// '1100' +// >>> np.bitwise_and([14,3], 13) +// array([12, 1]) +// +// >>> np.bitwise_and([11,7], [4,25]) +// array([0, 1]) +// >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) +// array([ 2, 4, 16]) +// >>> np.bitwise_and([True, True], [False, True]) +// array([False, True]) +// +// The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on +// ndarrays. +// +// >>> x1 = np.array([2, 5, 255]) +// >>> x2 = np.array([3, 14, 16]) +// >>> x1 & x2 +// array([ 2, 4, 16]) +// +//go:linkname BitwiseAnd py.bitwise_and +func BitwiseAnd(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// bitwise_or(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the bit-wise OR of two arrays element-wise. +// +// Computes the bit-wise OR of the underlying binary representation of +// the integers in the input arrays. This ufunc implements the C/Python +// operator ``|``. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Only integer and boolean types are handled. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Result. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_or +// bitwise_and +// bitwise_xor +// binary_repr : +// Return the binary representation of the input number as a string. +// +// Examples +// -------- +// The number 13 has the binary representation ``00001101``. Likewise, +// 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is +// then ``00011101``, or 29: +// +// >>> np.bitwise_or(13, 16) +// 29 +// >>> np.binary_repr(29) +// '11101' +// +// >>> np.bitwise_or(32, 2) +// 34 +// >>> np.bitwise_or([33, 4], 1) +// array([33, 5]) +// >>> np.bitwise_or([33, 4], [1, 2]) +// array([33, 6]) +// +// >>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4])) +// array([ 6, 5, 255]) +// >>> np.array([2, 5, 255]) | np.array([4, 4, 4]) +// array([ 6, 5, 255]) +// >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32), +// ... np.array([4, 4, 4, 2147483647], dtype=np.int32)) +// array([ 6, 5, 255, 2147483647]) +// >>> np.bitwise_or([True, True], [False, True]) +// array([ True, True]) +// +// The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on +// ndarrays. +// +// >>> x1 = np.array([2, 5, 255]) +// >>> x2 = np.array([4, 4, 4]) +// >>> x1 | x2 +// array([ 6, 5, 255]) +// +//go:linkname BitwiseOr py.bitwise_or +func BitwiseOr(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// bitwise_xor(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the bit-wise XOR of two arrays element-wise. +// +// Computes the bit-wise XOR of the underlying binary representation of +// the integers in the input arrays. This ufunc implements the C/Python +// operator ``^``. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Only integer and boolean types are handled. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Result. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_xor +// bitwise_and +// bitwise_or +// binary_repr : +// Return the binary representation of the input number as a string. +// +// Examples +// -------- +// The number 13 is represented by ``00001101``. Likewise, 17 is +// represented by ``00010001``. The bit-wise XOR of 13 and 17 is +// therefore ``00011100``, or 28: +// +// >>> np.bitwise_xor(13, 17) +// 28 +// >>> np.binary_repr(28) +// '11100' +// +// >>> np.bitwise_xor(31, 5) +// 26 +// >>> np.bitwise_xor([31,3], 5) +// array([26, 6]) +// +// >>> np.bitwise_xor([31,3], [5,6]) +// array([26, 5]) +// >>> np.bitwise_xor([True, True], [False, True]) +// array([ True, False]) +// +// The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on +// ndarrays. +// +// >>> x1 = np.array([True, True]) +// >>> x2 = np.array([False, True]) +// >>> x1 ^ x2 +// array([ True, False]) +// +//go:linkname BitwiseXor py.bitwise_xor +func BitwiseXor(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// cbrt(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the cube-root of an array, element-wise. +// +// .. versionadded:: 1.10.0 +// +// Parameters +// ---------- +// x : array_like +// The values whose cube-roots are required. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// An array of the same shape as `x`, containing the cube +// cube-root of each element in `x`. +// If `out` was provided, `y` is a reference to it. +// This is a scalar if `x` is a scalar. +// +// +// Examples +// -------- +// >>> np.cbrt([1,8,27]) +// array([ 1., 2., 3.]) +// +//go:linkname Cbrt py.cbrt +func Cbrt(x *py.Object, out *py.Object) *py.Object +// ceil(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the ceiling of the input, element-wise. +// +// The ceil of the scalar `x` is the smallest integer `i`, such that +// ``i >= x``. It is often denoted as :math:`\lceil x \rceil`. +// +// Parameters +// ---------- +// x : array_like +// Input data. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The ceiling of each element in `x`, with `float` dtype. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// floor, trunc, rint, fix +// +// Examples +// -------- +// >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) +// >>> np.ceil(a) +// array([-1., -1., -0., 1., 2., 2., 2.]) +// +//go:linkname Ceil py.ceil +func Ceil(x *py.Object, out *py.Object) *py.Object +// conjugate(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the complex conjugate, element-wise. +// +// The complex conjugate of a complex number is obtained by changing the +// sign of its imaginary part. +// +// Parameters +// ---------- +// x : array_like +// Input value. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The complex conjugate of `x`, with same dtype as `y`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// `conj` is an alias for `conjugate`: +// +// >>> np.conj is np.conjugate +// True +// +// Examples +// -------- +// >>> np.conjugate(1+2j) +// (1-2j) +// +// >>> x = np.eye(2) + 1j * np.eye(2) +// >>> np.conjugate(x) +// array([[ 1.-1.j, 0.-0.j], +// [ 0.-0.j, 1.-1.j]]) +// +//go:linkname Conj py.conj +func Conj(__llgo_va_list ...interface{}) *py.Object +// conjugate(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the complex conjugate, element-wise. +// +// The complex conjugate of a complex number is obtained by changing the +// sign of its imaginary part. +// +// Parameters +// ---------- +// x : array_like +// Input value. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The complex conjugate of `x`, with same dtype as `y`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// `conj` is an alias for `conjugate`: +// +// >>> np.conj is np.conjugate +// True +// +// Examples +// -------- +// >>> np.conjugate(1+2j) +// (1-2j) +// +// >>> x = np.eye(2) + 1j * np.eye(2) +// >>> np.conjugate(x) +// array([[ 1.-1.j, 0.-0.j], +// [ 0.-0.j, 1.-1.j]]) +// +//go:linkname Conjugate py.conjugate +func Conjugate(x *py.Object, out *py.Object) *py.Object +// copysign(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Change the sign of x1 to that of x2, element-wise. +// +// If `x2` is a scalar, its sign will be copied to all elements of `x1`. +// +// Parameters +// ---------- +// x1 : array_like +// Values to change the sign of. +// x2 : array_like +// The sign of `x2` is copied to `x1`. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// The values of `x1` with the sign of `x2`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Examples +// -------- +// >>> np.copysign(1.3, -1) +// -1.3 +// >>> 1/np.copysign(0, 1) +// inf +// >>> 1/np.copysign(0, -1) +// -inf +// +// >>> np.copysign([-1, 0, 1], -1.1) +// array([-1., -0., -1.]) +// >>> np.copysign([-1, 0, 1], np.arange(3)-1) +// array([-1., 0., 1.]) +// +//go:linkname Copysign py.copysign +func Copysign(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// cos(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Cosine element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input array in radians. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding cosine values. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// If `out` is provided, the function writes the result into it, +// and returns a reference to `out`. (See Examples) +// +// References +// ---------- +// M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. +// New York, NY: Dover, 1972. +// +// Examples +// -------- +// >>> np.cos(np.array([0, np.pi/2, np.pi])) +// array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) +// >>> +// >>> # Example of providing the optional output parameter +// >>> out1 = np.array([0], dtype='d') +// >>> out2 = np.cos([0.1], out1) +// >>> out2 is out1 +// True +// >>> +// >>> # Example of ValueError due to provision of shape mis-matched `out` +// >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) +// Traceback (most recent call last): +// File "", line 1, in +// ValueError: operands could not be broadcast together with shapes (3,3) (2,2) +// +//go:linkname Cos py.cos +func Cos(x *py.Object, out *py.Object) *py.Object +// cosh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Hyperbolic cosine, element-wise. +// +// Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array of same shape as `x`. +// This is a scalar if `x` is a scalar. +// +// Examples +// -------- +// >>> np.cosh(0) +// 1.0 +// +// The hyperbolic cosine describes the shape of a hanging cable: +// +// >>> import matplotlib.pyplot as plt +// >>> x = np.linspace(-4, 4, 1000) +// >>> plt.plot(x, np.cosh(x)) +// >>> plt.show() +// +//go:linkname Cosh py.cosh +func Cosh(x *py.Object, out *py.Object) *py.Object +// deg2rad(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Convert angles from degrees to radians. +// +// Parameters +// ---------- +// x : array_like +// Angles in degrees. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding angle in radians. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// rad2deg : Convert angles from radians to degrees. +// unwrap : Remove large jumps in angle by wrapping. +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// ``deg2rad(x)`` is ``x * pi / 180``. +// +// Examples +// -------- +// >>> np.deg2rad(180) +// 3.1415926535897931 +// +//go:linkname Deg2rad py.deg2rad +func Deg2rad(x *py.Object, out *py.Object) *py.Object +// degrees(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Convert angles from radians to degrees. +// +// Parameters +// ---------- +// x : array_like +// Input array in radians. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray of floats +// The corresponding degree values; if `out` was supplied this is a +// reference to it. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// rad2deg : equivalent function +// +// Examples +// -------- +// Convert a radian array to degrees +// +// >>> rad = np.arange(12.)*np.pi/6 +// >>> np.degrees(rad) +// array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., +// 270., 300., 330.]) +// +// >>> out = np.zeros((rad.shape)) +// >>> r = np.degrees(rad, out) +// >>> np.all(r == out) +// True +// +//go:linkname Degrees py.degrees +func Degrees(x *py.Object, out *py.Object) *py.Object +// divide(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Divide arguments element-wise. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend array. +// x2 : array_like +// Divisor array. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The quotient ``x1/x2``, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// seterr : Set whether to raise or warn on overflow, underflow and +// division by zero. +// +// Notes +// ----- +// Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting. +// +// The ``true_divide(x1, x2)`` function is an alias for +// ``divide(x1, x2)``. +// +// Examples +// -------- +// >>> np.divide(2.0, 4.0) +// 0.5 +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> np.divide(x1, x2) +// array([[nan, 1. , 1. ], +// [inf, 4. , 2.5], +// [inf, 7. , 4. ]]) +// +// The ``/`` operator can be used as a shorthand for ``np.divide`` on +// ndarrays. +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = 2 * np.ones(3) +// >>> x1 / x2 +// array([[0. , 0.5, 1. ], +// [1.5, 2. , 2.5], +// [3. , 3.5, 4. ]]) +// +//go:linkname Divide py.divide +func Divide(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// divmod(x1, x2[, out1, out2], / [, out=(None, None)], *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return element-wise quotient and remainder simultaneously. +// +// .. versionadded:: 1.13.0 +// +// ``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster +// because it avoids redundant work. It is used to implement the Python +// built-in function ``divmod`` on NumPy arrays. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend array. +// x2 : array_like +// Divisor array. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out1 : ndarray +// Element-wise quotient resulting from floor division. +// This is a scalar if both `x1` and `x2` are scalars. +// out2 : ndarray +// Element-wise remainder from floor division. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// floor_divide : Equivalent to Python's ``//`` operator. +// remainder : Equivalent to Python's ``%`` operator. +// modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return +// values switched. +// +// Examples +// -------- +// >>> np.divmod(np.arange(5), 3) +// (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) +// +// The `divmod` function can be used as a shorthand for ``np.divmod`` on +// ndarrays. +// +// >>> x = np.arange(5) +// >>> divmod(x, 3) +// (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) +// +//go:linkname Divmod py.divmod +func Divmod(x1 *py.Object, x2 *py.Object, out1 *py.Object, out2 *py.Object, out *py.Object) *py.Object +// equal(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return (x1 == x2) element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array, element-wise comparison of `x1` and `x2`. +// Typically of type bool, unless ``dtype=object`` is passed. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// not_equal, greater_equal, less_equal, greater, less +// +// Examples +// -------- +// >>> np.equal([0, 1, 3], np.arange(3)) +// array([ True, True, False]) +// +// What is compared are values, not types. So an int (1) and an array of +// length one can evaluate as True: +// +// >>> np.equal(1, np.ones(1)) +// array([ True]) +// +// The ``==`` operator can be used as a shorthand for ``np.equal`` on +// ndarrays. +// +// >>> a = np.array([2, 4, 6]) +// >>> b = np.array([2, 4, 2]) +// >>> a == b +// array([ True, True, False]) +// +//go:linkname Equal py.equal +func Equal(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// exp(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Calculate the exponential of all elements in the input array. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array, element-wise exponential of `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// expm1 : Calculate ``exp(x) - 1`` for all elements in the array. +// exp2 : Calculate ``2**x`` for all elements in the array. +// +// Notes +// ----- +// The irrational number ``e`` is also known as Euler's number. It is +// approximately 2.718281, and is the base of the natural logarithm, +// ``ln`` (this means that, if :math:`x = \ln y = \log_e y`, +// then :math:`e^x = y`. For real input, ``exp(x)`` is always positive. +// +// For complex arguments, ``x = a + ib``, we can write +// :math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already +// known (it is the real argument, described above). The second term, +// :math:`e^{ib}`, is :math:`\cos b + i \sin b`, a function with +// magnitude 1 and a periodic phase. +// +// References +// ---------- +// .. [1] Wikipedia, "Exponential function", +// https://en.wikipedia.org/wiki/Exponential_function +// .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions +// with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, +// https://personal.math.ubc.ca/~cbm/aands/page_69.htm +// +// Examples +// -------- +// Plot the magnitude and phase of ``exp(x)`` in the complex plane: +// +// >>> import matplotlib.pyplot as plt +// +// >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) +// >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane +// >>> out = np.exp(xx) +// +// >>> plt.subplot(121) +// >>> plt.imshow(np.abs(out), +// ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray') +// >>> plt.title('Magnitude of exp(x)') +// +// >>> plt.subplot(122) +// >>> plt.imshow(np.angle(out), +// ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv') +// >>> plt.title('Phase (angle) of exp(x)') +// >>> plt.show() +// +//go:linkname Exp py.exp +func Exp(x *py.Object, out *py.Object) *py.Object +// exp2(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Calculate `2**p` for all `p` in the input array. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Element-wise 2 to the power `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// power +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// +// +// Examples +// -------- +// >>> np.exp2([2, 3]) +// array([ 4., 8.]) +// +//go:linkname Exp2 py.exp2 +func Exp2(x *py.Object, out *py.Object) *py.Object +// expm1(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Calculate ``exp(x) - 1`` for all elements in the array. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Element-wise exponential minus one: ``out = exp(x) - 1``. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// log1p : ``log(1 + x)``, the inverse of expm1. +// +// +// Notes +// ----- +// This function provides greater precision than ``exp(x) - 1`` +// for small values of ``x``. +// +// Examples +// -------- +// The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to +// about 32 significant digits. This example shows the superiority of +// expm1 in this case. +// +// >>> np.expm1(1e-10) +// 1.00000000005e-10 +// >>> np.exp(1e-10) - 1 +// 1.000000082740371e-10 +// +//go:linkname Expm1 py.expm1 +func Expm1(x *py.Object, out *py.Object) *py.Object +// fabs(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the absolute values element-wise. +// +// This function returns the absolute values (positive magnitude) of the +// data in `x`. Complex values are not handled, use `absolute` to find the +// absolute values of complex data. +// +// Parameters +// ---------- +// x : array_like +// The array of numbers for which the absolute values are required. If +// `x` is a scalar, the result `y` will also be a scalar. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The absolute values of `x`, the returned values are always floats. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// absolute : Absolute values including `complex` types. +// +// Examples +// -------- +// >>> np.fabs(-1) +// 1.0 +// >>> np.fabs([-1.2, 1.2]) +// array([ 1.2, 1.2]) +// +//go:linkname Fabs py.fabs +func Fabs(x *py.Object, out *py.Object) *py.Object +// floor(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the floor of the input, element-wise. +// +// The floor of the scalar `x` is the largest integer `i`, such that +// `i <= x`. It is often denoted as :math:`\lfloor x \rfloor`. +// +// Parameters +// ---------- +// x : array_like +// Input data. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The floor of each element in `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// ceil, trunc, rint, fix +// +// Notes +// ----- +// Some spreadsheet programs calculate the "floor-towards-zero", where +// ``floor(-2.5) == -2``. NumPy instead uses the definition of +// `floor` where `floor(-2.5) == -3`. The "floor-towards-zero" +// function is called ``fix`` in NumPy. +// +// Examples +// -------- +// >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) +// >>> np.floor(a) +// array([-2., -2., -1., 0., 1., 1., 2.]) +// +//go:linkname Floor py.floor +func Floor(x *py.Object, out *py.Object) *py.Object +// floor_divide(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the largest integer smaller or equal to the division of the inputs. +// It is equivalent to the Python ``//`` operator and pairs with the +// Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)`` +// up to roundoff. +// +// Parameters +// ---------- +// x1 : array_like +// Numerator. +// x2 : array_like +// Denominator. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// y = floor(`x1`/`x2`) +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// remainder : Remainder complementary to floor_divide. +// divmod : Simultaneous floor division and remainder. +// divide : Standard division. +// floor : Round a number to the nearest integer toward minus infinity. +// ceil : Round a number to the nearest integer toward infinity. +// +// Examples +// -------- +// >>> np.floor_divide(7,3) +// 2 +// >>> np.floor_divide([1., 2., 3., 4.], 2.5) +// array([ 0., 0., 1., 1.]) +// +// The ``//`` operator can be used as a shorthand for ``np.floor_divide`` +// on ndarrays. +// +// >>> x1 = np.array([1., 2., 3., 4.]) +// >>> x1 // 2.5 +// array([0., 0., 1., 1.]) +// +//go:linkname FloorDivide py.floor_divide +func FloorDivide(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// float_power(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// First array elements raised to powers from second array, element-wise. +// +// Raise each base in `x1` to the positionally-corresponding power in `x2`. +// `x1` and `x2` must be broadcastable to the same shape. This differs from +// the power function in that integers, float16, and float32 are promoted to +// floats with a minimum precision of float64 so that the result is always +// inexact. The intent is that the function will return a usable result for +// negative powers and seldom overflow for positive powers. +// +// Negative values raised to a non-integral value will return ``nan``. +// To get complex results, cast the input to complex, or specify the +// ``dtype`` to be ``complex`` (see the example below). +// +// .. versionadded:: 1.12.0 +// +// Parameters +// ---------- +// x1 : array_like +// The bases. +// x2 : array_like +// The exponents. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The bases in `x1` raised to the exponents in `x2`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// power : power function that preserves type +// +// Examples +// -------- +// Cube each element in a list. +// +// >>> x1 = range(6) +// >>> x1 +// [0, 1, 2, 3, 4, 5] +// >>> np.float_power(x1, 3) +// array([ 0., 1., 8., 27., 64., 125.]) +// +// Raise the bases to different exponents. +// +// >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] +// >>> np.float_power(x1, x2) +// array([ 0., 1., 8., 27., 16., 5.]) +// +// The effect of broadcasting. +// +// >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) +// >>> x2 +// array([[1, 2, 3, 3, 2, 1], +// [1, 2, 3, 3, 2, 1]]) +// >>> np.float_power(x1, x2) +// array([[ 0., 1., 8., 27., 16., 5.], +// [ 0., 1., 8., 27., 16., 5.]]) +// +// Negative values raised to a non-integral value will result in ``nan`` +// (and a warning will be generated). +// +// >>> x3 = np.array([-1, -4]) +// >>> with np.errstate(invalid='ignore'): +// ... p = np.float_power(x3, 1.5) +// ... +// >>> p +// array([nan, nan]) +// +// To get complex results, give the argument ``dtype=complex``. +// +// >>> np.float_power(x3, 1.5, dtype=complex) +// array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) +// +//go:linkname FloatPower py.float_power +func FloatPower(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// fmax(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Element-wise maximum of array elements. +// +// Compare two arrays and return a new array containing the element-wise +// maxima. If one of the elements being compared is a NaN, then the +// non-nan element is returned. If both elements are NaNs then the first +// is returned. The latter distinction is important for complex NaNs, +// which are defined as at least one of the real or imaginary parts being +// a NaN. The net effect is that NaNs are ignored when possible. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays holding the elements to be compared. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The maximum of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// fmin : +// Element-wise minimum of two arrays, ignores NaNs. +// maximum : +// Element-wise maximum of two arrays, propagates NaNs. +// amax : +// The maximum value of an array along a given axis, propagates NaNs. +// nanmax : +// The maximum value of an array along a given axis, ignores NaNs. +// +// minimum, amin, nanmin +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither +// x1 nor x2 are NaNs, but it is faster and does proper broadcasting. +// +// Examples +// -------- +// >>> np.fmax([2, 3, 4], [1, 5, 2]) +// array([ 2., 5., 4.]) +// +// >>> np.fmax(np.eye(2), [0.5, 2]) +// array([[ 1. , 2. ], +// [ 0.5, 2. ]]) +// +// >>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan]) +// array([ 0., 0., nan]) +// +//go:linkname Fmax py.fmax +func Fmax(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// fmin(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Element-wise minimum of array elements. +// +// Compare two arrays and return a new array containing the element-wise +// minima. If one of the elements being compared is a NaN, then the +// non-nan element is returned. If both elements are NaNs then the first +// is returned. The latter distinction is important for complex NaNs, +// which are defined as at least one of the real or imaginary parts being +// a NaN. The net effect is that NaNs are ignored when possible. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays holding the elements to be compared. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The minimum of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// fmax : +// Element-wise maximum of two arrays, ignores NaNs. +// minimum : +// Element-wise minimum of two arrays, propagates NaNs. +// amin : +// The minimum value of an array along a given axis, propagates NaNs. +// nanmin : +// The minimum value of an array along a given axis, ignores NaNs. +// +// maximum, amax, nanmax +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither +// x1 nor x2 are NaNs, but it is faster and does proper broadcasting. +// +// Examples +// -------- +// >>> np.fmin([2, 3, 4], [1, 5, 2]) +// array([1, 3, 2]) +// +// >>> np.fmin(np.eye(2), [0.5, 2]) +// array([[ 0.5, 0. ], +// [ 0. , 1. ]]) +// +// >>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan]) +// array([ 0., 0., nan]) +// +//go:linkname Fmin py.fmin +func Fmin(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// fmod(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns the element-wise remainder of division. +// +// This is the NumPy implementation of the C library function fmod, the +// remainder has the same sign as the dividend `x1`. It is equivalent to +// the Matlab(TM) ``rem`` function and should not be confused with the +// Python modulus operator ``x1 % x2``. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend. +// x2 : array_like +// Divisor. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : array_like +// The remainder of the division of `x1` by `x2`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// remainder : Equivalent to the Python ``%`` operator. +// divide +// +// Notes +// ----- +// The result of the modulo operation for negative dividend and divisors +// is bound by conventions. For `fmod`, the sign of result is the sign of +// the dividend, while for `remainder` the sign of the result is the sign +// of the divisor. The `fmod` function is equivalent to the Matlab(TM) +// ``rem`` function. +// +// Examples +// -------- +// >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) +// array([-1, 0, -1, 1, 0, 1]) +// >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) +// array([1, 0, 1, 1, 0, 1]) +// +// >>> np.fmod([5, 3], [2, 2.]) +// array([ 1., 1.]) +// >>> a = np.arange(-3, 3).reshape(3, 2) +// >>> a +// array([[-3, -2], +// [-1, 0], +// [ 1, 2]]) +// >>> np.fmod(a, [2,2]) +// array([[-1, 0], +// [-1, 0], +// [ 1, 0]]) +// +//go:linkname Fmod py.fmod +func Fmod(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// frexp(x[, out1, out2], / [, out=(None, None)], *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Decompose the elements of x into mantissa and twos exponent. +// +// Returns (`mantissa`, `exponent`), where ``x = mantissa * 2**exponent``. +// The mantissa lies in the open interval(-1, 1), while the twos +// exponent is a signed integer. +// +// Parameters +// ---------- +// x : array_like +// Array of numbers to be decomposed. +// out1 : ndarray, optional +// Output array for the mantissa. Must have the same shape as `x`. +// out2 : ndarray, optional +// Output array for the exponent. Must have the same shape as `x`. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// mantissa : ndarray +// Floating values between -1 and 1. +// This is a scalar if `x` is a scalar. +// exponent : ndarray +// Integer exponents of 2. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`. +// +// Notes +// ----- +// Complex dtypes are not supported, they will raise a TypeError. +// +// Examples +// -------- +// >>> x = np.arange(9) +// >>> y1, y2 = np.frexp(x) +// >>> y1 +// array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, +// 0.5 ]) +// >>> y2 +// array([0, 1, 2, 2, 3, 3, 3, 3, 4]) +// >>> y1 * 2**y2 +// array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) +// +//go:linkname Frexp py.frexp +func Frexp(x *py.Object, out1 *py.Object, out2 *py.Object, out *py.Object) *py.Object +// frompyfunc(func, /, nin, nout, *[, identity]) +// +// Takes an arbitrary Python function and returns a NumPy ufunc. +// +// Can be used, for example, to add broadcasting to a built-in Python +// function (see Examples section). +// +// Parameters +// ---------- +// func : Python function object +// An arbitrary Python function. +// nin : int +// The number of input arguments. +// nout : int +// The number of objects returned by `func`. +// identity : object, optional +// The value to use for the `~numpy.ufunc.identity` attribute of the resulting +// object. If specified, this is equivalent to setting the underlying +// C ``identity`` field to ``PyUFunc_IdentityValue``. +// If omitted, the identity is set to ``PyUFunc_None``. Note that this is +// _not_ equivalent to setting the identity to ``None``, which implies the +// operation is reorderable. +// +// Returns +// ------- +// out : ufunc +// Returns a NumPy universal function (``ufunc``) object. +// +// See Also +// -------- +// vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy. +// +// Notes +// ----- +// The returned ufunc always returns PyObject arrays. +// +// Examples +// -------- +// Use frompyfunc to add broadcasting to the Python function ``oct``: +// +// >>> oct_array = np.frompyfunc(oct, 1, 1) +// >>> oct_array(np.array((10, 30, 100))) +// array(['0o12', '0o36', '0o144'], dtype=object) +// >>> np.array((oct(10), oct(30), oct(100))) # for comparison +// array(['0o12', '0o36', '0o144'], dtype='>> np.gcd(12, 20) +// 4 +// >>> np.gcd.reduce([15, 25, 35]) +// 5 +// >>> np.gcd(np.arange(6), 20) +// array([20, 1, 2, 1, 4, 5]) +// +//go:linkname Gcd py.gcd +func Gcd(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// geterrobj() +// +// Return the current object that defines floating-point error handling. +// +// The error object contains all information that defines the error handling +// behavior in NumPy. `geterrobj` is used internally by the other +// functions that get and set error handling behavior (`geterr`, `seterr`, +// `geterrcall`, `seterrcall`). +// +// Returns +// ------- +// errobj : list +// The error object, a list containing three elements: +// [internal numpy buffer size, error mask, error callback function]. +// +// The error mask is a single integer that holds the treatment information +// on all four floating point errors. The information for each error type +// is contained in three bits of the integer. If we print it in base 8, we +// can see what treatment is set for "invalid", "under", "over", and +// "divide" (in that order). The printed string can be interpreted with +// +// * 0 : 'ignore' +// * 1 : 'warn' +// * 2 : 'raise' +// * 3 : 'call' +// * 4 : 'print' +// * 5 : 'log' +// +// See Also +// -------- +// seterrobj, seterr, geterr, seterrcall, geterrcall +// getbufsize, setbufsize +// +// Notes +// ----- +// For complete documentation of the types of floating-point exceptions and +// treatment options, see `seterr`. +// +// Examples +// -------- +// >>> np.geterrobj() # first get the defaults +// [8192, 521, None] +// +// >>> def err_handler(type, flag): +// ... print("Floating point error (%s), with flag %s" % (type, flag)) +// ... +// >>> old_bufsize = np.setbufsize(20000) +// >>> old_err = np.seterr(divide='raise') +// >>> old_handler = np.seterrcall(err_handler) +// >>> np.geterrobj() +// [8192, 521, ] +// +// >>> old_err = np.seterr(all='ignore') +// >>> np.base_repr(np.geterrobj()[1], 8) +// '0' +// >>> old_err = np.seterr(divide='warn', over='log', under='call', +// ... invalid='print') +// >>> np.base_repr(np.geterrobj()[1], 8) +// '4351' +// +//go:linkname Geterrobj py.geterrobj +func Geterrobj() *py.Object +//go:linkname Greater py.greater +func Greater(__llgo_va_list ...interface{}) *py.Object +//go:linkname GreaterEqual py.greater_equal +func GreaterEqual(__llgo_va_list ...interface{}) *py.Object +// heaviside(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the Heaviside step function. +// +// The Heaviside step function is defined as:: +// +// 0 if x1 < 0 +// heaviside(x1, x2) = x2 if x1 == 0 +// 1 if x1 > 0 +// +// where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used. +// +// Parameters +// ---------- +// x1 : array_like +// Input values. +// x2 : array_like +// The value of the function when x1 is 0. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// The output array, element-wise Heaviside step function of `x1`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Notes +// ----- +// .. versionadded:: 1.13.0 +// +// References +// ---------- +// .. Wikipedia, "Heaviside step function", +// https://en.wikipedia.org/wiki/Heaviside_step_function +// +// Examples +// -------- +// >>> np.heaviside([-1.5, 0, 2.0], 0.5) +// array([ 0. , 0.5, 1. ]) +// >>> np.heaviside([-1.5, 0, 2.0], 1) +// array([ 0., 1., 1.]) +// +//go:linkname Heaviside py.heaviside +func Heaviside(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// hypot(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Given the "legs" of a right triangle, return its hypotenuse. +// +// Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or +// `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), +// it is broadcast for use with each element of the other argument. +// (See Examples) +// +// Parameters +// ---------- +// x1, x2 : array_like +// Leg of the triangle(s). +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// z : ndarray +// The hypotenuse of the triangle(s). +// This is a scalar if both `x1` and `x2` are scalars. +// +// Examples +// -------- +// >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) +// array([[ 5., 5., 5.], +// [ 5., 5., 5.], +// [ 5., 5., 5.]]) +// +// Example showing broadcast of scalar_like argument: +// +// >>> np.hypot(3*np.ones((3, 3)), [4]) +// array([[ 5., 5., 5.], +// [ 5., 5., 5.], +// [ 5., 5., 5.]]) +// +//go:linkname Hypot py.hypot +func Hypot(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// invert(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute bit-wise inversion, or bit-wise NOT, element-wise. +// +// Computes the bit-wise NOT of the underlying binary representation of +// the integers in the input arrays. This ufunc implements the C/Python +// operator ``~``. +// +// For signed integer inputs, the two's complement is returned. In a +// two's-complement system negative numbers are represented by the two's +// complement of the absolute value. This is the most common method of +// representing signed integers on computers [1]_. A N-bit +// two's-complement system can represent every integer in the range +// :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. +// +// Parameters +// ---------- +// x : array_like +// Only integer and boolean types are handled. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Result. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// bitwise_and, bitwise_or, bitwise_xor +// logical_not +// binary_repr : +// Return the binary representation of the input number as a string. +// +// Notes +// ----- +// `bitwise_not` is an alias for `invert`: +// +// >>> np.bitwise_not is np.invert +// True +// +// References +// ---------- +// .. [1] Wikipedia, "Two's complement", +// https://en.wikipedia.org/wiki/Two's_complement +// +// Examples +// -------- +// We've seen that 13 is represented by ``00001101``. +// The invert or bit-wise NOT of 13 is then: +// +// >>> x = np.invert(np.array(13, dtype=np.uint8)) +// >>> x +// 242 +// >>> np.binary_repr(x, width=8) +// '11110010' +// +// The result depends on the bit-width: +// +// >>> x = np.invert(np.array(13, dtype=np.uint16)) +// >>> x +// 65522 +// >>> np.binary_repr(x, width=16) +// '1111111111110010' +// +// When using signed integer types the result is the two's complement of +// the result for the unsigned type: +// +// >>> np.invert(np.array([13], dtype=np.int8)) +// array([-14], dtype=int8) +// >>> np.binary_repr(-14, width=8) +// '11110010' +// +// Booleans are accepted as well: +// +// >>> np.invert(np.array([True, False])) +// array([False, True]) +// +// The ``~`` operator can be used as a shorthand for ``np.invert`` on +// ndarrays. +// +// >>> x1 = np.array([True, False]) +// >>> ~x1 +// array([False, True]) +// +//go:linkname Invert py.invert +func Invert(x *py.Object, out *py.Object) *py.Object +// isfinite(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Test element-wise for finiteness (not infinity and not Not a Number). +// +// The result is returned as a boolean array. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray, bool +// True where ``x`` is not positive infinity, negative infinity, +// or NaN; false otherwise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// isinf, isneginf, isposinf, isnan +// +// Notes +// ----- +// Not a Number, positive infinity and negative infinity are considered +// to be non-finite. +// +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). This means that Not a Number is not equivalent to infinity. +// Also that positive infinity is not equivalent to negative infinity. But +// infinity is equivalent to positive infinity. Errors result if the +// second argument is also supplied when `x` is a scalar input, or if +// first and second arguments have different shapes. +// +// Examples +// -------- +// >>> np.isfinite(1) +// True +// >>> np.isfinite(0) +// True +// >>> np.isfinite(np.nan) +// False +// >>> np.isfinite(np.inf) +// False +// >>> np.isfinite(np.NINF) +// False +// >>> np.isfinite([np.log(-1.),1.,np.log(0)]) +// array([False, True, False]) +// +// >>> x = np.array([-np.inf, 0., np.inf]) +// >>> y = np.array([2, 2, 2]) +// >>> np.isfinite(x, y) +// array([0, 1, 0]) +// >>> y +// array([0, 1, 0]) +// +//go:linkname Isfinite py.isfinite +func Isfinite(x *py.Object, out *py.Object) *py.Object +// isinf(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Test element-wise for positive or negative infinity. +// +// Returns a boolean array of the same shape as `x`, True where ``x == +// +/-inf``, otherwise False. +// +// Parameters +// ---------- +// x : array_like +// Input values +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : bool (scalar) or boolean ndarray +// True where ``x`` is positive or negative infinity, false otherwise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// isneginf, isposinf, isnan, isfinite +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). +// +// Errors result if the second argument is supplied when the first +// argument is a scalar, or if the first and second arguments have +// different shapes. +// +// Examples +// -------- +// >>> np.isinf(np.inf) +// True +// >>> np.isinf(np.nan) +// False +// >>> np.isinf(np.NINF) +// True +// >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) +// array([ True, True, False, False]) +// +// >>> x = np.array([-np.inf, 0., np.inf]) +// >>> y = np.array([2, 2, 2]) +// >>> np.isinf(x, y) +// array([1, 0, 1]) +// >>> y +// array([1, 0, 1]) +// +//go:linkname Isinf py.isinf +func Isinf(x *py.Object, out *py.Object) *py.Object +// isnan(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Test element-wise for NaN and return result as a boolean array. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or bool +// True where ``x`` is NaN, false otherwise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// isinf, isneginf, isposinf, isfinite, isnat +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). This means that Not a Number is not equivalent to infinity. +// +// Examples +// -------- +// >>> np.isnan(np.nan) +// True +// >>> np.isnan(np.inf) +// False +// >>> np.isnan([np.log(-1.),1.,np.log(0)]) +// array([ True, False, False]) +// +//go:linkname Isnan py.isnan +func Isnan(x *py.Object, out *py.Object) *py.Object +// isnat(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Test element-wise for NaT (not a time) and return result as a boolean array. +// +// .. versionadded:: 1.13.0 +// +// Parameters +// ---------- +// x : array_like +// Input array with datetime or timedelta data type. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or bool +// True where ``x`` is NaT, false otherwise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// isnan, isinf, isneginf, isposinf, isfinite +// +// Examples +// -------- +// >>> np.isnat(np.datetime64("NaT")) +// True +// >>> np.isnat(np.datetime64("2016-01-01")) +// False +// >>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]")) +// array([ True, False]) +// +//go:linkname Isnat py.isnat +func Isnat(x *py.Object, out *py.Object) *py.Object +// lcm(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns the lowest common multiple of ``|x1|`` and ``|x2|`` +// +// Parameters +// ---------- +// x1, x2 : array_like, int +// Arrays of values. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// +// Returns +// ------- +// y : ndarray or scalar +// The lowest common multiple of the absolute value of the inputs +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// gcd : The greatest common divisor +// +// Examples +// -------- +// >>> np.lcm(12, 20) +// 60 +// >>> np.lcm.reduce([3, 12, 20]) +// 60 +// >>> np.lcm.reduce([40, 12, 20]) +// 120 +// >>> np.lcm(np.arange(6), 20) +// array([ 0, 20, 20, 60, 20, 20]) +// +//go:linkname Lcm py.lcm +func Lcm(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// ldexp(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns x1 * 2**x2, element-wise. +// +// The mantissas `x1` and twos exponents `x2` are used to construct +// floating point numbers ``x1 * 2**x2``. +// +// Parameters +// ---------- +// x1 : array_like +// Array of multipliers. +// x2 : array_like, int +// Array of twos exponents. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The result of ``x1 * 2**x2``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`. +// +// Notes +// ----- +// Complex dtypes are not supported, they will raise a TypeError. +// +// `ldexp` is useful as the inverse of `frexp`, if used by itself it is +// more clear to simply use the expression ``x1 * 2**x2``. +// +// Examples +// -------- +// >>> np.ldexp(5, np.arange(4)) +// array([ 5., 10., 20., 40.], dtype=float16) +// +// >>> x = np.arange(6) +// >>> np.ldexp(*np.frexp(x)) +// array([ 0., 1., 2., 3., 4., 5.]) +// +//go:linkname Ldexp py.ldexp +func Ldexp(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// left_shift(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Shift the bits of an integer to the left. +// +// Bits are shifted to the left by appending `x2` 0s at the right of `x1`. +// Since the internal representation of numbers is in binary format, this +// operation is equivalent to multiplying `x1` by ``2**x2``. +// +// Parameters +// ---------- +// x1 : array_like of integer type +// Input values. +// x2 : array_like of integer type +// Number of zeros to append to `x1`. Has to be non-negative. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : array of integer type +// Return `x1` with bits shifted `x2` times to the left. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// right_shift : Shift the bits of an integer to the right. +// binary_repr : Return the binary representation of the input number +// as a string. +// +// Examples +// -------- +// >>> np.binary_repr(5) +// '101' +// >>> np.left_shift(5, 2) +// 20 +// >>> np.binary_repr(20) +// '10100' +// +// >>> np.left_shift(5, [1,2,3]) +// array([10, 20, 40]) +// +// Note that the dtype of the second argument may change the dtype of the +// result and can lead to unexpected results in some cases (see +// :ref:`Casting Rules `): +// +// >>> a = np.left_shift(np.uint8(255), 1) # Expect 254 +// >>> print(a, type(a)) # Unexpected result due to upcasting +// 510 +// >>> b = np.left_shift(np.uint8(255), np.uint8(1)) +// >>> print(b, type(b)) +// 254 +// +// The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on +// ndarrays. +// +// >>> x1 = 5 +// >>> x2 = np.array([1, 2, 3]) +// >>> x1 << x2 +// array([10, 20, 40]) +// +//go:linkname LeftShift py.left_shift +func LeftShift(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// less(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the truth value of (x1 < x2) element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array, element-wise comparison of `x1` and `x2`. +// Typically of type bool, unless ``dtype=object`` is passed. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// greater, less_equal, greater_equal, equal, not_equal +// +// Examples +// -------- +// >>> np.less([1, 2], [2, 2]) +// array([ True, False]) +// +// The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays. +// +// >>> a = np.array([1, 2]) +// >>> b = np.array([2, 2]) +// >>> a < b +// array([ True, False]) +// +//go:linkname Less py.less +func Less(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// less_equal(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the truth value of (x1 <= x2) element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array, element-wise comparison of `x1` and `x2`. +// Typically of type bool, unless ``dtype=object`` is passed. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// greater, less, greater_equal, equal, not_equal +// +// Examples +// -------- +// >>> np.less_equal([4, 2, 1], [2, 2, 2]) +// array([False, True, True]) +// +// The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on +// ndarrays. +// +// >>> a = np.array([4, 2, 1]) +// >>> b = np.array([2, 2, 2]) +// >>> a <= b +// array([False, True, True]) +// +//go:linkname LessEqual py.less_equal +func LessEqual(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// log(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Natural logarithm, element-wise. +// +// The natural logarithm `log` is the inverse of the exponential function, +// so that `log(exp(x)) = x`. The natural logarithm is logarithm in base +// `e`. +// +// Parameters +// ---------- +// x : array_like +// Input value. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The natural logarithm of `x`, element-wise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// log10, log2, log1p, emath.log +// +// Notes +// ----- +// Logarithm is a multivalued function: for each `x` there is an infinite +// number of `z` such that `exp(z) = x`. The convention is to return the +// `z` whose imaginary part lies in `(-pi, pi]`. +// +// For real-valued input data types, `log` always returns real output. For +// each value that cannot be expressed as a real number or infinity, it +// yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `log` is a complex analytical function that +// has a branch cut `[-inf, 0]` and is continuous from above on it. `log` +// handles the floating-point negative zero as an infinitesimal negative +// number, conforming to the C99 standard. +// +// In the cases where the input has a negative real part and a very small +// negative complex part (approaching 0), the result is so close to `-pi` +// that it evaluates to exactly `-pi`. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 67. +// https://personal.math.ubc.ca/~cbm/aands/page_67.htm +// .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm +// +// Examples +// -------- +// >>> np.log([1, np.e, np.e**2, 0]) +// array([ 0., 1., 2., -Inf]) +// +//go:linkname Log py.log +func Log(x *py.Object, out *py.Object) *py.Object +// log10(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the base 10 logarithm of the input array, element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The logarithm to the base 10 of `x`, element-wise. NaNs are +// returned where x is negative. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// emath.log10 +// +// Notes +// ----- +// Logarithm is a multivalued function: for each `x` there is an infinite +// number of `z` such that `10**z = x`. The convention is to return the +// `z` whose imaginary part lies in `(-pi, pi]`. +// +// For real-valued input data types, `log10` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `log10` is a complex analytical function that +// has a branch cut `[-inf, 0]` and is continuous from above on it. +// `log10` handles the floating-point negative zero as an infinitesimal +// negative number, conforming to the C99 standard. +// +// In the cases where the input has a negative real part and a very small +// negative complex part (approaching 0), the result is so close to `-pi` +// that it evaluates to exactly `-pi`. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 67. +// https://personal.math.ubc.ca/~cbm/aands/page_67.htm +// .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm +// +// Examples +// -------- +// >>> np.log10([1e-15, -3.]) +// array([-15., nan]) +// +//go:linkname Log10 py.log10 +func Log10(x *py.Object, out *py.Object) *py.Object +// log1p(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the natural logarithm of one plus the input array, element-wise. +// +// Calculates ``log(1 + x)``. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// Natural logarithm of `1 + x`, element-wise. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// expm1 : ``exp(x) - 1``, the inverse of `log1p`. +// +// Notes +// ----- +// For real-valued input, `log1p` is accurate also for `x` so small +// that `1 + x == 1` in floating-point accuracy. +// +// Logarithm is a multivalued function: for each `x` there is an infinite +// number of `z` such that `exp(z) = 1 + x`. The convention is to return +// the `z` whose imaginary part lies in `[-pi, pi]`. +// +// For real-valued input data types, `log1p` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `log1p` is a complex analytical function that +// has a branch cut `[-inf, -1]` and is continuous from above on it. +// `log1p` handles the floating-point negative zero as an infinitesimal +// negative number, conforming to the C99 standard. +// +// References +// ---------- +// .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", +// 10th printing, 1964, pp. 67. +// https://personal.math.ubc.ca/~cbm/aands/page_67.htm +// .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm +// +// Examples +// -------- +// >>> np.log1p(1e-99) +// 1e-99 +// >>> np.log(1 + 1e-99) +// 0.0 +// +//go:linkname Log1p py.log1p +func Log1p(x *py.Object, out *py.Object) *py.Object +// log2(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Base-2 logarithm of `x`. +// +// Parameters +// ---------- +// x : array_like +// Input values. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// Base-2 logarithm of `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// log, log10, log1p, emath.log2 +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// Logarithm is a multivalued function: for each `x` there is an infinite +// number of `z` such that `2**z = x`. The convention is to return the `z` +// whose imaginary part lies in `(-pi, pi]`. +// +// For real-valued input data types, `log2` always returns real output. +// For each value that cannot be expressed as a real number or infinity, +// it yields ``nan`` and sets the `invalid` floating point error flag. +// +// For complex-valued input, `log2` is a complex analytical function that +// has a branch cut `[-inf, 0]` and is continuous from above on it. `log2` +// handles the floating-point negative zero as an infinitesimal negative +// number, conforming to the C99 standard. +// +// In the cases where the input has a negative real part and a very small +// negative complex part (approaching 0), the result is so close to `-pi` +// that it evaluates to exactly `-pi`. +// +// Examples +// -------- +// >>> x = np.array([0, 1, 2, 2**4]) +// >>> np.log2(x) +// array([-Inf, 0., 1., 4.]) +// +// >>> xi = np.array([0+1.j, 1, 2+0.j, 4.j]) +// >>> np.log2(xi) +// array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j]) +// +//go:linkname Log2 py.log2 +func Log2(x *py.Object, out *py.Object) *py.Object +// logaddexp(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Logarithm of the sum of exponentiations of the inputs. +// +// Calculates ``log(exp(x1) + exp(x2))``. This function is useful in +// statistics where the calculated probabilities of events may be so small +// as to exceed the range of normal floating point numbers. In such cases +// the logarithm of the calculated probability is stored. This function +// allows adding probabilities stored in such a fashion. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input values. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// result : ndarray +// Logarithm of ``exp(x1) + exp(x2)``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2. +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// Examples +// -------- +// >>> prob1 = np.log(1e-50) +// >>> prob2 = np.log(2.5e-50) +// >>> prob12 = np.logaddexp(prob1, prob2) +// >>> prob12 +// -113.87649168120691 +// >>> np.exp(prob12) +// 3.5000000000000057e-50 +// +//go:linkname Logaddexp py.logaddexp +func Logaddexp(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// logaddexp2(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Logarithm of the sum of exponentiations of the inputs in base-2. +// +// Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine +// learning when the calculated probabilities of events may be so small as +// to exceed the range of normal floating point numbers. In such cases +// the base-2 logarithm of the calculated probability can be used instead. +// This function allows adding probabilities stored in such a fashion. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input values. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// result : ndarray +// Base-2 logarithm of ``2**x1 + 2**x2``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logaddexp: Logarithm of the sum of exponentiations of the inputs. +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// Examples +// -------- +// >>> prob1 = np.log2(1e-50) +// >>> prob2 = np.log2(2.5e-50) +// >>> prob12 = np.logaddexp2(prob1, prob2) +// >>> prob1, prob2, prob12 +// (-166.09640474436813, -164.77447664948076, -164.28904982231052) +// >>> 2**prob12 +// 3.4999999999999914e-50 +// +//go:linkname Logaddexp2 py.logaddexp2 +func Logaddexp2(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// logical_and(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the truth value of x1 AND x2 element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or bool +// Boolean result of the logical AND operation applied to the elements +// of `x1` and `x2`; the shape is determined by broadcasting. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_or, logical_not, logical_xor +// bitwise_and +// +// Examples +// -------- +// >>> np.logical_and(True, False) +// False +// >>> np.logical_and([True, False], [False, False]) +// array([False, False]) +// +// >>> x = np.arange(5) +// >>> np.logical_and(x>1, x<4) +// array([False, False, True, True, False]) +// +// +// The ``&`` operator can be used as a shorthand for ``np.logical_and`` on +// boolean ndarrays. +// +// >>> a = np.array([True, False]) +// >>> b = np.array([False, False]) +// >>> a & b +// array([False, False]) +// +//go:linkname LogicalAnd py.logical_and +func LogicalAnd(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// logical_not(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the truth value of NOT x element-wise. +// +// Parameters +// ---------- +// x : array_like +// Logical NOT is applied to the elements of `x`. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : bool or ndarray of bool +// Boolean result with the same shape as `x` of the NOT operation +// on elements of `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// logical_and, logical_or, logical_xor +// +// Examples +// -------- +// >>> np.logical_not(3) +// False +// >>> np.logical_not([True, False, 0, 1]) +// array([False, True, True, False]) +// +// >>> x = np.arange(5) +// >>> np.logical_not(x<3) +// array([False, False, False, True, True]) +// +//go:linkname LogicalNot py.logical_not +func LogicalNot(x *py.Object, out *py.Object) *py.Object +// logical_or(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the truth value of x1 OR x2 element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Logical OR is applied to the elements of `x1` and `x2`. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or bool +// Boolean result of the logical OR operation applied to the elements +// of `x1` and `x2`; the shape is determined by broadcasting. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_and, logical_not, logical_xor +// bitwise_or +// +// Examples +// -------- +// >>> np.logical_or(True, False) +// True +// >>> np.logical_or([True, False], [False, False]) +// array([ True, False]) +// +// >>> x = np.arange(5) +// >>> np.logical_or(x < 1, x > 3) +// array([ True, False, False, False, True]) +// +// The ``|`` operator can be used as a shorthand for ``np.logical_or`` on +// boolean ndarrays. +// +// >>> a = np.array([True, False]) +// >>> b = np.array([False, False]) +// >>> a | b +// array([ True, False]) +// +//go:linkname LogicalOr py.logical_or +func LogicalOr(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// logical_xor(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute the truth value of x1 XOR x2, element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Logical XOR is applied to the elements of `x1` and `x2`. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : bool or ndarray of bool +// Boolean result of the logical XOR operation applied to the elements +// of `x1` and `x2`; the shape is determined by broadcasting. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// logical_and, logical_or, logical_not, bitwise_xor +// +// Examples +// -------- +// >>> np.logical_xor(True, False) +// True +// >>> np.logical_xor([True, True, False, False], [True, False, True, False]) +// array([False, True, True, False]) +// +// >>> x = np.arange(5) +// >>> np.logical_xor(x < 1, x > 3) +// array([ True, False, False, False, True]) +// +// Simple example showing support of broadcasting +// +// >>> np.logical_xor(0, np.eye(2)) +// array([[ True, False], +// [False, True]]) +// +//go:linkname LogicalXor py.logical_xor +func LogicalXor(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// maximum(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Element-wise maximum of array elements. +// +// Compare two arrays and return a new array containing the element-wise +// maxima. If one of the elements being compared is a NaN, then that +// element is returned. If both elements are NaNs then the first is +// returned. The latter distinction is important for complex NaNs, which +// are defined as at least one of the real or imaginary parts being a NaN. +// The net effect is that NaNs are propagated. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays holding the elements to be compared. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The maximum of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// minimum : +// Element-wise minimum of two arrays, propagates NaNs. +// fmax : +// Element-wise maximum of two arrays, ignores NaNs. +// amax : +// The maximum value of an array along a given axis, propagates NaNs. +// nanmax : +// The maximum value of an array along a given axis, ignores NaNs. +// +// fmin, amin, nanmin +// +// Notes +// ----- +// The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when +// neither x1 nor x2 are nans, but it is faster and does proper +// broadcasting. +// +// Examples +// -------- +// >>> np.maximum([2, 3, 4], [1, 5, 2]) +// array([2, 5, 4]) +// +// >>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting +// array([[ 1. , 2. ], +// [ 0.5, 2. ]]) +// +// >>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan]) +// array([nan, nan, nan]) +// >>> np.maximum(np.Inf, 1) +// inf +// +//go:linkname Maximum py.maximum +func Maximum(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// minimum(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Element-wise minimum of array elements. +// +// Compare two arrays and return a new array containing the element-wise +// minima. If one of the elements being compared is a NaN, then that +// element is returned. If both elements are NaNs then the first is +// returned. The latter distinction is important for complex NaNs, which +// are defined as at least one of the real or imaginary parts being a NaN. +// The net effect is that NaNs are propagated. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays holding the elements to be compared. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The minimum of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// maximum : +// Element-wise maximum of two arrays, propagates NaNs. +// fmin : +// Element-wise minimum of two arrays, ignores NaNs. +// amin : +// The minimum value of an array along a given axis, propagates NaNs. +// nanmin : +// The minimum value of an array along a given axis, ignores NaNs. +// +// fmax, amax, nanmax +// +// Notes +// ----- +// The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when +// neither x1 nor x2 are NaNs, but it is faster and does proper +// broadcasting. +// +// Examples +// -------- +// >>> np.minimum([2, 3, 4], [1, 5, 2]) +// array([1, 3, 2]) +// +// >>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting +// array([[ 0.5, 0. ], +// [ 0. , 1. ]]) +// +// >>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan]) +// array([nan, nan, nan]) +// >>> np.minimum(-np.Inf, 1) +// -inf +// +//go:linkname Minimum py.minimum +func Minimum(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// remainder(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns the element-wise remainder of division. +// +// Computes the remainder complementary to the `floor_divide` function. It is +// equivalent to the Python modulus operator``x1 % x2`` and has the same sign +// as the divisor `x2`. The MATLAB function equivalent to ``np.remainder`` +// is ``mod``. +// +// .. warning:: +// +// This should not be confused with: +// +// * Python 3.7's `math.remainder` and C's ``remainder``, which +// computes the IEEE remainder, which are the complement to +// ``round(x1 / x2)``. +// * The MATLAB ``rem`` function and or the C ``%`` operator which is the +// complement to ``int(x1 / x2)``. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend array. +// x2 : array_like +// Divisor array. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The element-wise remainder of the quotient ``floor_divide(x1, x2)``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// floor_divide : Equivalent of Python ``//`` operator. +// divmod : Simultaneous floor division and remainder. +// fmod : Equivalent of the MATLAB ``rem`` function. +// divide, floor +// +// Notes +// ----- +// Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) +// integers. +// ``mod`` is an alias of ``remainder``. +// +// Examples +// -------- +// >>> np.remainder([4, 7], [2, 3]) +// array([0, 1]) +// >>> np.remainder(np.arange(7), 5) +// array([0, 1, 2, 3, 4, 0, 1]) +// +// The ``%`` operator can be used as a shorthand for ``np.remainder`` on +// ndarrays. +// +// >>> x1 = np.arange(7) +// >>> x1 % 5 +// array([0, 1, 2, 3, 4, 0, 1]) +// +//go:linkname Mod py.mod +func Mod(__llgo_va_list ...interface{}) *py.Object +// modf(x[, out1, out2], / [, out=(None, None)], *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the fractional and integral parts of an array, element-wise. +// +// The fractional and integral parts are negative if the given number is +// negative. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y1 : ndarray +// Fractional part of `x`. +// This is a scalar if `x` is a scalar. +// y2 : ndarray +// Integral part of `x`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// For integer input the return values are floats. +// +// See Also +// -------- +// divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values +// switched, except it always has a positive remainder. +// +// Examples +// -------- +// >>> np.modf([0, 3.5]) +// (array([ 0. , 0.5]), array([ 0., 3.])) +// >>> np.modf(-0.5) +// (-0.5, -0) +// +//go:linkname Modf py.modf +func Modf(x *py.Object, out1 *py.Object, out2 *py.Object, out *py.Object) *py.Object +// multiply(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Multiply arguments element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays to be multiplied. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The product of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Notes +// ----- +// Equivalent to `x1` * `x2` in terms of array broadcasting. +// +// Examples +// -------- +// >>> np.multiply(2.0, 4.0) +// 8.0 +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> np.multiply(x1, x2) +// array([[ 0., 1., 4.], +// [ 0., 4., 10.], +// [ 0., 7., 16.]]) +// +// The ``*`` operator can be used as a shorthand for ``np.multiply`` on +// ndarrays. +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> x1 * x2 +// array([[ 0., 1., 4.], +// [ 0., 4., 10.], +// [ 0., 7., 16.]]) +// +//go:linkname Multiply py.multiply +func Multiply(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// negative(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Numerical negative, element-wise. +// +// Parameters +// ---------- +// x : array_like or scalar +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// Returned array or scalar: `y = -x`. +// This is a scalar if `x` is a scalar. +// +// Examples +// -------- +// >>> np.negative([1.,-1.]) +// array([-1., 1.]) +// +// The unary ``-`` operator can be used as a shorthand for ``np.negative`` on +// ndarrays. +// +// >>> x1 = np.array(([1., -1.])) +// >>> -x1 +// array([-1., 1.]) +// +//go:linkname Negative py.negative +func Negative(x *py.Object, out *py.Object) *py.Object +// nextafter(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the next floating-point value after x1 towards x2, element-wise. +// +// Parameters +// ---------- +// x1 : array_like +// Values to find the next representable value of. +// x2 : array_like +// The direction where to look for the next representable value of `x1`. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// The next representable values of `x1` in the direction of `x2`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Examples +// -------- +// >>> eps = np.finfo(np.float64).eps +// >>> np.nextafter(1, 2) == eps + 1 +// True +// >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] +// array([ True, True]) +// +//go:linkname Nextafter py.nextafter +func Nextafter(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// not_equal(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return (x1 != x2) element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// Input arrays. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array, element-wise comparison of `x1` and `x2`. +// Typically of type bool, unless ``dtype=object`` is passed. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// equal, greater, greater_equal, less, less_equal +// +// Examples +// -------- +// >>> np.not_equal([1.,2.], [1., 3.]) +// array([False, True]) +// >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) +// array([[False, True], +// [False, True]]) +// +// The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on +// ndarrays. +// +// >>> a = np.array([1., 2.]) +// >>> b = np.array([1., 3.]) +// >>> a != b +// array([False, True]) +// +//go:linkname NotEqual py.not_equal +func NotEqual(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// positive(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Numerical positive, element-wise. +// +// .. versionadded:: 1.13.0 +// +// Parameters +// ---------- +// x : array_like or scalar +// Input array. +// +// Returns +// ------- +// y : ndarray or scalar +// Returned array or scalar: `y = +x`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// Equivalent to `x.copy()`, but only defined for types that support +// arithmetic. +// +// Examples +// -------- +// +// >>> x1 = np.array(([1., -1.])) +// >>> np.positive(x1) +// array([ 1., -1.]) +// +// The unary ``+`` operator can be used as a shorthand for ``np.positive`` on +// ndarrays. +// +// >>> x1 = np.array(([1., -1.])) +// >>> +x1 +// array([ 1., -1.]) +// +//go:linkname Positive py.positive +func Positive(x *py.Object, out *py.Object) *py.Object +// power(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// First array elements raised to powers from second array, element-wise. +// +// Raise each base in `x1` to the positionally-corresponding power in +// `x2`. `x1` and `x2` must be broadcastable to the same shape. +// +// An integer type raised to a negative integer power will raise a +// ``ValueError``. +// +// Negative values raised to a non-integral value will return ``nan``. +// To get complex results, cast the input to complex, or specify the +// ``dtype`` to be ``complex`` (see the example below). +// +// Parameters +// ---------- +// x1 : array_like +// The bases. +// x2 : array_like +// The exponents. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The bases in `x1` raised to the exponents in `x2`. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// float_power : power function that promotes integers to float +// +// Examples +// -------- +// Cube each element in an array. +// +// >>> x1 = np.arange(6) +// >>> x1 +// [0, 1, 2, 3, 4, 5] +// >>> np.power(x1, 3) +// array([ 0, 1, 8, 27, 64, 125]) +// +// Raise the bases to different exponents. +// +// >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] +// >>> np.power(x1, x2) +// array([ 0., 1., 8., 27., 16., 5.]) +// +// The effect of broadcasting. +// +// >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) +// >>> x2 +// array([[1, 2, 3, 3, 2, 1], +// [1, 2, 3, 3, 2, 1]]) +// >>> np.power(x1, x2) +// array([[ 0, 1, 8, 27, 16, 5], +// [ 0, 1, 8, 27, 16, 5]]) +// +// The ``**`` operator can be used as a shorthand for ``np.power`` on +// ndarrays. +// +// >>> x2 = np.array([1, 2, 3, 3, 2, 1]) +// >>> x1 = np.arange(6) +// >>> x1 ** x2 +// array([ 0, 1, 8, 27, 16, 5]) +// +// Negative values raised to a non-integral value will result in ``nan`` +// (and a warning will be generated). +// +// >>> x3 = np.array([-1.0, -4.0]) +// >>> with np.errstate(invalid='ignore'): +// ... p = np.power(x3, 1.5) +// ... +// >>> p +// array([nan, nan]) +// +// To get complex results, give the argument ``dtype=complex``. +// +// >>> np.power(x3, 1.5, dtype=complex) +// array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) +// +//go:linkname Power py.power +func Power(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// rad2deg(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Convert angles from radians to degrees. +// +// Parameters +// ---------- +// x : array_like +// Angle in radians. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding angle in degrees. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// deg2rad : Convert angles from degrees to radians. +// unwrap : Remove large jumps in angle by wrapping. +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// rad2deg(x) is ``180 * x / pi``. +// +// Examples +// -------- +// >>> np.rad2deg(np.pi/2) +// 90.0 +// +//go:linkname Rad2deg py.rad2deg +func Rad2deg(x *py.Object, out *py.Object) *py.Object +// radians(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Convert angles from degrees to radians. +// +// Parameters +// ---------- +// x : array_like +// Input array in degrees. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding radian values. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// deg2rad : equivalent function +// +// Examples +// -------- +// Convert a degree array to radians +// +// >>> deg = np.arange(12.) * 30. +// >>> np.radians(deg) +// array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 , +// 2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898, +// 5.23598776, 5.75958653]) +// +// >>> out = np.zeros((deg.shape)) +// >>> ret = np.radians(deg, out) +// >>> ret is out +// True +// +//go:linkname Radians py.radians +func Radians(x *py.Object, out *py.Object) *py.Object +// reciprocal(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the reciprocal of the argument, element-wise. +// +// Calculates ``1/x``. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// Return array. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// .. note:: +// This function is not designed to work with integers. +// +// For integer arguments with absolute value larger than 1 the result is +// always zero because of the way Python handles integer division. For +// integer zero the result is an overflow. +// +// Examples +// -------- +// >>> np.reciprocal(2.) +// 0.5 +// >>> np.reciprocal([1, 2., 3.33]) +// array([ 1. , 0.5 , 0.3003003]) +// +//go:linkname Reciprocal py.reciprocal +func Reciprocal(x *py.Object, out *py.Object) *py.Object +// remainder(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns the element-wise remainder of division. +// +// Computes the remainder complementary to the `floor_divide` function. It is +// equivalent to the Python modulus operator``x1 % x2`` and has the same sign +// as the divisor `x2`. The MATLAB function equivalent to ``np.remainder`` +// is ``mod``. +// +// .. warning:: +// +// This should not be confused with: +// +// * Python 3.7's `math.remainder` and C's ``remainder``, which +// computes the IEEE remainder, which are the complement to +// ``round(x1 / x2)``. +// * The MATLAB ``rem`` function and or the C ``%`` operator which is the +// complement to ``int(x1 / x2)``. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend array. +// x2 : array_like +// Divisor array. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The element-wise remainder of the quotient ``floor_divide(x1, x2)``. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// floor_divide : Equivalent of Python ``//`` operator. +// divmod : Simultaneous floor division and remainder. +// fmod : Equivalent of the MATLAB ``rem`` function. +// divide, floor +// +// Notes +// ----- +// Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) +// integers. +// ``mod`` is an alias of ``remainder``. +// +// Examples +// -------- +// >>> np.remainder([4, 7], [2, 3]) +// array([0, 1]) +// >>> np.remainder(np.arange(7), 5) +// array([0, 1, 2, 3, 4, 0, 1]) +// +// The ``%`` operator can be used as a shorthand for ``np.remainder`` on +// ndarrays. +// +// >>> x1 = np.arange(7) +// >>> x1 % 5 +// array([0, 1, 2, 3, 4, 0, 1]) +// +//go:linkname Remainder py.remainder +func Remainder(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// right_shift(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Shift the bits of an integer to the right. +// +// Bits are shifted to the right `x2`. Because the internal +// representation of numbers is in binary format, this operation is +// equivalent to dividing `x1` by ``2**x2``. +// +// Parameters +// ---------- +// x1 : array_like, int +// Input values. +// x2 : array_like, int +// Number of bits to remove at the right of `x1`. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray, int +// Return `x1` with bits shifted `x2` times to the right. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// left_shift : Shift the bits of an integer to the left. +// binary_repr : Return the binary representation of the input number +// as a string. +// +// Examples +// -------- +// >>> np.binary_repr(10) +// '1010' +// >>> np.right_shift(10, 1) +// 5 +// >>> np.binary_repr(5) +// '101' +// +// >>> np.right_shift(10, [1,2,3]) +// array([5, 2, 1]) +// +// The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on +// ndarrays. +// +// >>> x1 = 10 +// >>> x2 = np.array([1,2,3]) +// >>> x1 >> x2 +// array([5, 2, 1]) +// +//go:linkname RightShift py.right_shift +func RightShift(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// rint(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Round elements of the array to the nearest integer. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Output array is same shape and type as `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// fix, ceil, floor, trunc +// +// Notes +// ----- +// For values exactly halfway between rounded decimal values, NumPy +// rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, +// -0.5 and 0.5 round to 0.0, etc. +// +// Examples +// -------- +// >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) +// >>> np.rint(a) +// array([-2., -2., -0., 0., 2., 2., 2.]) +// +//go:linkname Rint py.rint +func Rint(x *py.Object, out *py.Object) *py.Object +// seterrobj(errobj, /) +// +// Set the object that defines floating-point error handling. +// +// The error object contains all information that defines the error handling +// behavior in NumPy. `seterrobj` is used internally by the other +// functions that set error handling behavior (`seterr`, `seterrcall`). +// +// Parameters +// ---------- +// errobj : list +// The error object, a list containing three elements: +// [internal numpy buffer size, error mask, error callback function]. +// +// The error mask is a single integer that holds the treatment information +// on all four floating point errors. The information for each error type +// is contained in three bits of the integer. If we print it in base 8, we +// can see what treatment is set for "invalid", "under", "over", and +// "divide" (in that order). The printed string can be interpreted with +// +// * 0 : 'ignore' +// * 1 : 'warn' +// * 2 : 'raise' +// * 3 : 'call' +// * 4 : 'print' +// * 5 : 'log' +// +// See Also +// -------- +// geterrobj, seterr, geterr, seterrcall, geterrcall +// getbufsize, setbufsize +// +// Notes +// ----- +// For complete documentation of the types of floating-point exceptions and +// treatment options, see `seterr`. +// +// Examples +// -------- +// >>> old_errobj = np.geterrobj() # first get the defaults +// >>> old_errobj +// [8192, 521, None] +// +// >>> def err_handler(type, flag): +// ... print("Floating point error (%s), with flag %s" % (type, flag)) +// ... +// >>> new_errobj = [20000, 12, err_handler] +// >>> np.seterrobj(new_errobj) +// >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') +// '14' +// >>> np.geterr() +// {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} +// >>> np.geterrcall() is err_handler +// True +// +//go:linkname Seterrobj py.seterrobj +func Seterrobj(errobj *py.Object) *py.Object +//go:linkname Sign py.sign +func Sign(__llgo_va_list ...interface{}) *py.Object +// signbit(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Returns element-wise True where signbit is set (less than zero). +// +// Parameters +// ---------- +// x : array_like +// The input value(s). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// result : ndarray of bool +// Output array, or reference to `out` if that was supplied. +// This is a scalar if `x` is a scalar. +// +// Examples +// -------- +// >>> np.signbit(-1.2) +// True +// >>> np.signbit(np.array([1, -2.3, 2.1])) +// array([False, True, False]) +// +//go:linkname Signbit py.signbit +func Signbit(x *py.Object, out *py.Object) *py.Object +// sin(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Trigonometric sine, element-wise. +// +// Parameters +// ---------- +// x : array_like +// Angle, in radians (:math:`2 \pi` rad equals 360 degrees). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : array_like +// The sine of each element of x. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// arcsin, sinh, cos +// +// Notes +// ----- +// The sine is one of the fundamental functions of trigonometry (the +// mathematical study of triangles). Consider a circle of radius 1 +// centered on the origin. A ray comes in from the :math:`+x` axis, makes +// an angle at the origin (measured counter-clockwise from that axis), and +// departs from the origin. The :math:`y` coordinate of the outgoing +// ray's intersection with the unit circle is the sine of that angle. It +// ranges from -1 for :math:`x=3\pi / 2` to +1 for :math:`\pi / 2.` The +// function has zeroes where the angle is a multiple of :math:`\pi`. +// Sines of angles between :math:`\pi` and :math:`2\pi` are negative. +// The numerous properties of the sine and related functions are included +// in any standard trigonometry text. +// +// Examples +// -------- +// Print sine of one angle: +// +// >>> np.sin(np.pi/2.) +// 1.0 +// +// Print sines of an array of angles given in degrees: +// +// >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. ) +// array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ]) +// +// Plot the sine function: +// +// >>> import matplotlib.pylab as plt +// >>> x = np.linspace(-np.pi, np.pi, 201) +// >>> plt.plot(x, np.sin(x)) +// >>> plt.xlabel('Angle [rad]') +// >>> plt.ylabel('sin(x)') +// >>> plt.axis('tight') +// >>> plt.show() +// +//go:linkname Sin py.sin +func Sin(x *py.Object, out *py.Object) *py.Object +// sinh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Hyperbolic sine, element-wise. +// +// Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or +// ``-1j * np.sin(1j*x)``. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding hyperbolic sine values. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// If `out` is provided, the function writes the result into it, +// and returns a reference to `out`. (See Examples) +// +// References +// ---------- +// M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. +// New York, NY: Dover, 1972, pg. 83. +// +// Examples +// -------- +// >>> np.sinh(0) +// 0.0 +// >>> np.sinh(np.pi*1j/2) +// 1j +// >>> np.sinh(np.pi*1j) # (exact value is 0) +// 1.2246063538223773e-016j +// >>> # Discrepancy due to vagaries of floating point arithmetic. +// +// >>> # Example of providing the optional output parameter +// >>> out1 = np.array([0], dtype='d') +// >>> out2 = np.sinh([0.1], out1) +// >>> out2 is out1 +// True +// +// >>> # Example of ValueError due to provision of shape mis-matched `out` +// >>> np.sinh(np.zeros((3,3)),np.zeros((2,2))) +// Traceback (most recent call last): +// File "", line 1, in +// ValueError: operands could not be broadcast together with shapes (3,3) (2,2) +// +//go:linkname Sinh py.sinh +func Sinh(x *py.Object, out *py.Object) *py.Object +// spacing(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the distance between x and the nearest adjacent number. +// +// Parameters +// ---------- +// x : array_like +// Values to find the spacing of. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// The spacing of values of `x`. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// It can be considered as a generalization of EPS: +// ``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there +// should not be any representable number between ``x + spacing(x)`` and +// x for any finite x. +// +// Spacing of +- inf and NaN is NaN. +// +// Examples +// -------- +// >>> np.spacing(1) == np.finfo(np.float64).eps +// True +// +//go:linkname Spacing py.spacing +func Spacing(x *py.Object, out *py.Object) *py.Object +// sqrt(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the non-negative square-root of an array, element-wise. +// +// Parameters +// ---------- +// x : array_like +// The values whose square-roots are required. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// An array of the same shape as `x`, containing the positive +// square-root of each element in `x`. If any element in `x` is +// complex, a complex array is returned (and the square-roots of +// negative reals are calculated). If all of the elements in `x` +// are real, so is `y`, with negative elements returning ``nan``. +// If `out` was provided, `y` is a reference to it. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// emath.sqrt +// A version which returns complex numbers when given negative reals. +// Note that 0.0 and -0.0 are handled differently for complex inputs. +// +// Notes +// ----- +// *sqrt* has--consistent with common convention--as its branch cut the +// real "interval" [`-inf`, 0), and is continuous from above on it. +// A branch cut is a curve in the complex plane across which a given +// complex function fails to be continuous. +// +// Examples +// -------- +// >>> np.sqrt([1,4,9]) +// array([ 1., 2., 3.]) +// +// >>> np.sqrt([4, -1, -3+4J]) +// array([ 2.+0.j, 0.+1.j, 1.+2.j]) +// +// >>> np.sqrt([4, -1, np.inf]) +// array([ 2., nan, inf]) +// +//go:linkname Sqrt py.sqrt +func Sqrt(x *py.Object, out *py.Object) *py.Object +// square(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the element-wise square of the input. +// +// Parameters +// ---------- +// x : array_like +// Input data. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// out : ndarray or scalar +// Element-wise `x*x`, of the same shape and dtype as `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// numpy.linalg.matrix_power +// sqrt +// power +// +// Examples +// -------- +// >>> np.square([-1j, 1]) +// array([-1.-0.j, 1.+0.j]) +// +//go:linkname Square py.square +func Square(x *py.Object, out *py.Object) *py.Object +// subtract(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Subtract arguments, element-wise. +// +// Parameters +// ---------- +// x1, x2 : array_like +// The arrays to be subtracted from each other. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The difference of `x1` and `x2`, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// Notes +// ----- +// Equivalent to ``x1 - x2`` in terms of array broadcasting. +// +// Examples +// -------- +// >>> np.subtract(1.0, 4.0) +// -3.0 +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> np.subtract(x1, x2) +// array([[ 0., 0., 0.], +// [ 3., 3., 3.], +// [ 6., 6., 6.]]) +// +// The ``-`` operator can be used as a shorthand for ``np.subtract`` on +// ndarrays. +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> x1 - x2 +// array([[0., 0., 0.], +// [3., 3., 3.], +// [6., 6., 6.]]) +// +//go:linkname Subtract py.subtract +func Subtract(x1 *py.Object, x2 *py.Object, out *py.Object) *py.Object +// tan(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute tangent element-wise. +// +// Equivalent to ``np.sin(x)/np.cos(x)`` element-wise. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding tangent values. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// If `out` is provided, the function writes the result into it, +// and returns a reference to `out`. (See Examples) +// +// References +// ---------- +// M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. +// New York, NY: Dover, 1972. +// +// Examples +// -------- +// >>> from math import pi +// >>> np.tan(np.array([-pi,pi/2,pi])) +// array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) +// >>> +// >>> # Example of providing the optional output parameter illustrating +// >>> # that what is returned is a reference to said parameter +// >>> out1 = np.array([0], dtype='d') +// >>> out2 = np.cos([0.1], out1) +// >>> out2 is out1 +// True +// >>> +// >>> # Example of ValueError due to provision of shape mis-matched `out` +// >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) +// Traceback (most recent call last): +// File "", line 1, in +// ValueError: operands could not be broadcast together with shapes (3,3) (2,2) +// +//go:linkname Tan py.tan +func Tan(x *py.Object, out *py.Object) *py.Object +// tanh(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Compute hyperbolic tangent element-wise. +// +// Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray +// The corresponding hyperbolic tangent values. +// This is a scalar if `x` is a scalar. +// +// Notes +// ----- +// If `out` is provided, the function writes the result into it, +// and returns a reference to `out`. (See Examples) +// +// References +// ---------- +// .. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions. +// New York, NY: Dover, 1972, pg. 83. +// https://personal.math.ubc.ca/~cbm/aands/page_83.htm +// +// .. [2] Wikipedia, "Hyperbolic function", +// https://en.wikipedia.org/wiki/Hyperbolic_function +// +// Examples +// -------- +// >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) +// array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) +// +// >>> # Example of providing the optional output parameter illustrating +// >>> # that what is returned is a reference to said parameter +// >>> out1 = np.array([0], dtype='d') +// >>> out2 = np.tanh([0.1], out1) +// >>> out2 is out1 +// True +// +// >>> # Example of ValueError due to provision of shape mis-matched `out` +// >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) +// Traceback (most recent call last): +// File "", line 1, in +// ValueError: operands could not be broadcast together with shapes (3,3) (2,2) +// +//go:linkname Tanh py.tanh +func Tanh(x *py.Object, out *py.Object) *py.Object +// divide(x1, x2, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Divide arguments element-wise. +// +// Parameters +// ---------- +// x1 : array_like +// Dividend array. +// x2 : array_like +// Divisor array. +// If ``x1.shape != x2.shape``, they must be broadcastable to a common +// shape (which becomes the shape of the output). +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The quotient ``x1/x2``, element-wise. +// This is a scalar if both `x1` and `x2` are scalars. +// +// See Also +// -------- +// seterr : Set whether to raise or warn on overflow, underflow and +// division by zero. +// +// Notes +// ----- +// Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting. +// +// The ``true_divide(x1, x2)`` function is an alias for +// ``divide(x1, x2)``. +// +// Examples +// -------- +// >>> np.divide(2.0, 4.0) +// 0.5 +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = np.arange(3.0) +// >>> np.divide(x1, x2) +// array([[nan, 1. , 1. ], +// [inf, 4. , 2.5], +// [inf, 7. , 4. ]]) +// +// The ``/`` operator can be used as a shorthand for ``np.divide`` on +// ndarrays. +// +// >>> x1 = np.arange(9.0).reshape((3, 3)) +// >>> x2 = 2 * np.ones(3) +// >>> x1 / x2 +// array([[0. , 0.5, 1. ], +// [1.5, 2. , 2.5], +// [3. , 3.5, 4. ]]) +// +//go:linkname TrueDivide py.true_divide +func TrueDivide(__llgo_va_list ...interface{}) *py.Object +// trunc(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Return the truncated value of the input, element-wise. +// +// The truncated value of the scalar `x` is the nearest integer `i` which +// is closer to zero than `x` is. In short, the fractional part of the +// signed number `x` is discarded. +// +// Parameters +// ---------- +// x : array_like +// Input data. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// y : ndarray or scalar +// The truncated value of each element in `x`. +// This is a scalar if `x` is a scalar. +// +// See Also +// -------- +// ceil, floor, rint, fix +// +// Notes +// ----- +// .. versionadded:: 1.3.0 +// +// Examples +// -------- +// >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) +// >>> np.trunc(a) +// array([-1., -1., -0., 0., 1., 1., 2.]) +// +//go:linkname Trunc py.trunc +func Trunc(x *py.Object, out *py.Object) *py.Object +// +// Return the scalar dtype or NumPy equivalent of Python type of an object. +// +// Parameters +// ---------- +// rep : any +// The object of which the type is returned. +// default : any, optional +// If given, this is returned for objects whose types can not be +// determined. If not given, None is returned for those objects. +// +// Returns +// ------- +// dtype : dtype or Python type +// The data type of `rep`. +// +// See Also +// -------- +// sctype2char, issctype, issubsctype, issubdtype, maximum_sctype +// +// Examples +// -------- +// >>> np.obj2sctype(np.int32) +// +// >>> np.obj2sctype(np.array([1., 2.])) +// +// >>> np.obj2sctype(np.array([1.j])) +// +// +// >>> np.obj2sctype(dict) +// +// >>> np.obj2sctype('string') +// +// >>> np.obj2sctype(1, default=list) +// +// +// +// +//go:linkname Obj2sctype py.obj2sctype +func Obj2sctype(rep *py.Object, default_ *py.Object) *py.Object +// +// Return the string representation of a scalar dtype. +// +// Parameters +// ---------- +// sctype : scalar dtype or object +// If a scalar dtype, the corresponding string character is +// returned. If an object, `sctype2char` tries to infer its scalar type +// and then return the corresponding string character. +// +// Returns +// ------- +// typechar : str +// The string character corresponding to the scalar type. +// +// Raises +// ------ +// ValueError +// If `sctype` is an object for which the type can not be inferred. +// +// See Also +// -------- +// obj2sctype, issctype, issubsctype, mintypecode +// +// Examples +// -------- +// >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]: +// ... print(np.sctype2char(sctype)) +// l # may vary +// d +// D +// S +// O +// +// >>> x = np.array([1., 2-1.j]) +// >>> np.sctype2char(x) +// 'D' +// >>> np.sctype2char(list) +// 'O' +// +// +// +//go:linkname Sctype2char py.sctype2char +func Sctype2char(sctype *py.Object) *py.Object +// +// Return the scalar type of highest precision of the same kind as the input. +// +// Parameters +// ---------- +// t : dtype or dtype specifier +// The input data type. This can be a `dtype` object or an object that +// is convertible to a `dtype`. +// +// Returns +// ------- +// out : dtype +// The highest precision data type of the same kind (`dtype.kind`) as `t`. +// +// See Also +// -------- +// obj2sctype, mintypecode, sctype2char +// dtype +// +// Examples +// -------- +// >>> np.maximum_sctype(int) +// +// >>> np.maximum_sctype(np.uint8) +// +// >>> np.maximum_sctype(complex) +// # may vary +// +// >>> np.maximum_sctype(str) +// +// +// >>> np.maximum_sctype('i2') +// +// >>> np.maximum_sctype('f4') +// # may vary +// +// +// +//go:linkname MaximumSctype py.maximum_sctype +func MaximumSctype(t *py.Object) *py.Object +// +// Determines whether the given object represents a scalar data-type. +// +// Parameters +// ---------- +// rep : any +// If `rep` is an instance of a scalar dtype, True is returned. If not, +// False is returned. +// +// Returns +// ------- +// out : bool +// Boolean result of check whether `rep` is a scalar dtype. +// +// See Also +// -------- +// issubsctype, issubdtype, obj2sctype, sctype2char +// +// Examples +// -------- +// >>> np.issctype(np.int32) +// True +// >>> np.issctype(list) +// False +// >>> np.issctype(1.1) +// False +// +// Strings are also a scalar type: +// +// >>> np.issctype(np.dtype('str')) +// True +// +// +// +//go:linkname Issctype py.issctype +func Issctype(rep *py.Object) *py.Object +// +// Determine common type following standard coercion rules. +// +// .. deprecated:: NumPy 1.25 +// +// This function is deprecated, use `numpy.promote_types` or +// `numpy.result_type` instead. To achieve semantics for the +// `scalar_types` argument, use `numpy.result_type` and pass the Python +// values `0`, `0.0`, or `0j`. +// This will give the same results in almost all cases. +// More information and rare exception can be found in the +// `NumPy 1.25 release notes `_. +// +// Parameters +// ---------- +// array_types : sequence +// A list of dtypes or dtype convertible objects representing arrays. +// scalar_types : sequence +// A list of dtypes or dtype convertible objects representing scalars. +// +// Returns +// ------- +// datatype : dtype +// The common data type, which is the maximum of `array_types` ignoring +// `scalar_types`, unless the maximum of `scalar_types` is of a +// different kind (`dtype.kind`). If the kind is not understood, then +// None is returned. +// +// See Also +// -------- +// dtype, common_type, can_cast, mintypecode +// +// Examples +// -------- +// >>> np.find_common_type([], [np.int64, np.float32, complex]) +// dtype('complex128') +// >>> np.find_common_type([np.int64, np.float32], []) +// dtype('float64') +// +// The standard casting rules ensure that a scalar cannot up-cast an +// array unless the scalar is of a fundamentally different kind of data +// (i.e. under a different hierarchy in the data type hierarchy) then +// the array: +// +// >>> np.find_common_type([np.float32], [np.int64, np.float64]) +// dtype('float32') +// +// Complex is of a different type, so it up-casts the float in the +// `array_types` argument: +// +// >>> np.find_common_type([np.float32], [complex]) +// dtype('complex128') +// +// Type specifier strings are convertible to dtypes and can therefore +// be used instead of dtypes: +// +// >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) +// dtype('complex128') +// +// +// +//go:linkname FindCommonType py.find_common_type +func FindCommonType(arrayTypes *py.Object, scalarTypes *py.Object) *py.Object +// +// Returns True if first argument is a typecode lower/equal in type hierarchy. +// +// This is like the builtin :func:`issubclass`, but for `dtype`\ s. +// +// Parameters +// ---------- +// arg1, arg2 : dtype_like +// `dtype` or object coercible to one +// +// Returns +// ------- +// out : bool +// +// See Also +// -------- +// :ref:`arrays.scalars` : Overview of the numpy type hierarchy. +// issubsctype, issubclass_ +// +// Examples +// -------- +// `issubdtype` can be used to check the type of arrays: +// +// >>> ints = np.array([1, 2, 3], dtype=np.int32) +// >>> np.issubdtype(ints.dtype, np.integer) +// True +// >>> np.issubdtype(ints.dtype, np.floating) +// False +// +// >>> floats = np.array([1, 2, 3], dtype=np.float32) +// >>> np.issubdtype(floats.dtype, np.integer) +// False +// >>> np.issubdtype(floats.dtype, np.floating) +// True +// +// Similar types of different sizes are not subdtypes of each other: +// +// >>> np.issubdtype(np.float64, np.float32) +// False +// >>> np.issubdtype(np.float32, np.float64) +// False +// +// but both are subtypes of `floating`: +// +// >>> np.issubdtype(np.float64, np.floating) +// True +// >>> np.issubdtype(np.float32, np.floating) +// True +// +// For convenience, dtype-like objects are allowed too: +// +// >>> np.issubdtype('S1', np.string_) +// True +// >>> np.issubdtype('i4', np.signedinteger) +// True +// +// +// +//go:linkname Issubdtype py.issubdtype +func Issubdtype(arg1 *py.Object, arg2 *py.Object) *py.Object +// datetime_data(dtype, /) +// +// Get information about the step size of a date or time type. +// +// The returned tuple can be passed as the second argument of `numpy.datetime64` and +// `numpy.timedelta64`. +// +// Parameters +// ---------- +// dtype : dtype +// The dtype object, which must be a `datetime64` or `timedelta64` type. +// +// Returns +// ------- +// unit : str +// The :ref:`datetime unit ` on which this dtype +// is based. +// count : int +// The number of base units in a step. +// +// Examples +// -------- +// >>> dt_25s = np.dtype('timedelta64[25s]') +// >>> np.datetime_data(dt_25s) +// ('s', 25) +// >>> np.array(10, dt_25s).astype('timedelta64[s]') +// array(250, dtype='timedelta64[s]') +// +// The result can be used to construct a datetime that uses the same units +// as a timedelta +// +// >>> np.datetime64('2010', np.datetime_data(dt_25s)) +// numpy.datetime64('2010-01-01T00:00:00','25s') +// +//go:linkname DatetimeData py.datetime_data +func DatetimeData(dtype *py.Object) *py.Object +// +// datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') +// +// Convert an array of datetimes into an array of strings. +// +// Parameters +// ---------- +// arr : array_like of datetime64 +// The array of UTC timestamps to format. +// unit : str +// One of None, 'auto', or a :ref:`datetime unit `. +// timezone : {'naive', 'UTC', 'local'} or tzinfo +// Timezone information to use when displaying the datetime. If 'UTC', end +// with a Z to indicate UTC time. If 'local', convert to the local timezone +// first, and suffix with a +-#### timezone offset. If a tzinfo object, +// then do as with 'local', but use the specified timezone. +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} +// Casting to allow when changing between datetime units. +// +// Returns +// ------- +// str_arr : ndarray +// An array of strings the same shape as `arr`. +// +// Examples +// -------- +// >>> import pytz +// >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') +// >>> d +// array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', +// '2002-10-27T07:30'], dtype='datetime64[m]') +// +// Setting the timezone to UTC shows the same information, but with a Z suffix +// +// >>> np.datetime_as_string(d, timezone='UTC') +// array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', +// '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) +// array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', +// '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') +// array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], +// dtype='>> np.datetime_as_string(d, unit='s') +// array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', +// '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') +// Traceback (most recent call last): +// ... +// TypeError: Cannot create a datetime string as units 'h' from a NumPy +// datetime with units 'm' according to the rule 'safe' +// +// +//go:linkname DatetimeAsString py.datetime_as_string +func DatetimeAsString(arr *py.Object, unit *py.Object, timezone *py.Object, casting *py.Object) *py.Object +// +// busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) +// +// First adjusts the date to fall on a valid day according to +// the ``roll`` rule, then applies offsets to the given dates +// counted in valid days. +// +// .. versionadded:: 1.7.0 +// +// Parameters +// ---------- +// dates : array_like of datetime64[D] +// The array of dates to process. +// offsets : array_like of int +// The array of offsets, which is broadcast with ``dates``. +// roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional +// How to treat dates that do not fall on a valid day. The default +// is 'raise'. +// +// * 'raise' means to raise an exception for an invalid day. +// * 'nat' means to return a NaT (not-a-time) for an invalid day. +// * 'forward' and 'following' mean to take the first valid day +// later in time. +// * 'backward' and 'preceding' mean to take the first valid day +// earlier in time. +// * 'modifiedfollowing' means to take the first valid day +// later in time unless it is across a Month boundary, in which +// case to take the first valid day earlier in time. +// * 'modifiedpreceding' means to take the first valid day +// earlier in time unless it is across a Month boundary, in which +// case to take the first valid day later in time. +// weekmask : str or array_like of bool, optional +// A seven-element array indicating which of Monday through Sunday are +// valid days. May be specified as a length-seven list or array, like +// [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string +// like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for +// weekdays, optionally separated by white space. Valid abbreviations +// are: Mon Tue Wed Thu Fri Sat Sun +// holidays : array_like of datetime64[D], optional +// An array of dates to consider as invalid dates. They may be +// specified in any order, and NaT (not-a-time) dates are ignored. +// This list is saved in a normalized form that is suited for +// fast calculations of valid days. +// busdaycal : busdaycalendar, optional +// A `busdaycalendar` object which specifies the valid days. If this +// parameter is provided, neither weekmask nor holidays may be +// provided. +// out : array of datetime64[D], optional +// If provided, this array is filled with the result. +// +// Returns +// ------- +// out : array of datetime64[D] +// An array with a shape from broadcasting ``dates`` and ``offsets`` +// together, containing the dates with offsets applied. +// +// See Also +// -------- +// busdaycalendar : An object that specifies a custom set of valid days. +// is_busday : Returns a boolean array indicating valid days. +// busday_count : Counts how many valid days are in a half-open date range. +// +// Examples +// -------- +// >>> # First business day in October 2011 (not accounting for holidays) +// ... np.busday_offset('2011-10', 0, roll='forward') +// numpy.datetime64('2011-10-03') +// >>> # Last business day in February 2012 (not accounting for holidays) +// ... np.busday_offset('2012-03', -1, roll='forward') +// numpy.datetime64('2012-02-29') +// >>> # Third Wednesday in January 2011 +// ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') +// numpy.datetime64('2011-01-19') +// >>> # 2012 Mother's Day in Canada and the U.S. +// ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') +// numpy.datetime64('2012-05-13') +// +// >>> # First business day on or after a date +// ... np.busday_offset('2011-03-20', 0, roll='forward') +// numpy.datetime64('2011-03-21') +// >>> np.busday_offset('2011-03-22', 0, roll='forward') +// numpy.datetime64('2011-03-22') +// >>> # First business day after a date +// ... np.busday_offset('2011-03-20', 1, roll='backward') +// numpy.datetime64('2011-03-21') +// >>> np.busday_offset('2011-03-22', 1, roll='backward') +// numpy.datetime64('2011-03-23') +// +// +//go:linkname BusdayOffset py.busday_offset +func BusdayOffset(dates *py.Object, offsets *py.Object, roll *py.Object, weekmask *py.Object, holidays *py.Object, busdaycal *py.Object, out *py.Object) *py.Object +// +// busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) +// +// Counts the number of valid days between `begindates` and +// `enddates`, not including the day of `enddates`. +// +// If ``enddates`` specifies a date value that is earlier than the +// corresponding ``begindates`` date value, the count will be negative. +// +// .. versionadded:: 1.7.0 +// +// Parameters +// ---------- +// begindates : array_like of datetime64[D] +// The array of the first dates for counting. +// enddates : array_like of datetime64[D] +// The array of the end dates for counting, which are excluded +// from the count themselves. +// weekmask : str or array_like of bool, optional +// A seven-element array indicating which of Monday through Sunday are +// valid days. May be specified as a length-seven list or array, like +// [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string +// like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for +// weekdays, optionally separated by white space. Valid abbreviations +// are: Mon Tue Wed Thu Fri Sat Sun +// holidays : array_like of datetime64[D], optional +// An array of dates to consider as invalid dates. They may be +// specified in any order, and NaT (not-a-time) dates are ignored. +// This list is saved in a normalized form that is suited for +// fast calculations of valid days. +// busdaycal : busdaycalendar, optional +// A `busdaycalendar` object which specifies the valid days. If this +// parameter is provided, neither weekmask nor holidays may be +// provided. +// out : array of int, optional +// If provided, this array is filled with the result. +// +// Returns +// ------- +// out : array of int +// An array with a shape from broadcasting ``begindates`` and ``enddates`` +// together, containing the number of valid days between +// the begin and end dates. +// +// See Also +// -------- +// busdaycalendar : An object that specifies a custom set of valid days. +// is_busday : Returns a boolean array indicating valid days. +// busday_offset : Applies an offset counted in valid days. +// +// Examples +// -------- +// >>> # Number of weekdays in January 2011 +// ... np.busday_count('2011-01', '2011-02') +// 21 +// >>> # Number of weekdays in 2011 +// >>> np.busday_count('2011', '2012') +// 260 +// >>> # Number of Saturdays in 2011 +// ... np.busday_count('2011', '2012', weekmask='Sat') +// 53 +// +// +//go:linkname BusdayCount py.busday_count +func BusdayCount(begindates *py.Object, enddates *py.Object, weekmask *py.Object, holidays *py.Object, busdaycal *py.Object, out *py.Object) *py.Object +// +// is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) +// +// Calculates which of the given dates are valid days, and which are not. +// +// .. versionadded:: 1.7.0 +// +// Parameters +// ---------- +// dates : array_like of datetime64[D] +// The array of dates to process. +// weekmask : str or array_like of bool, optional +// A seven-element array indicating which of Monday through Sunday are +// valid days. May be specified as a length-seven list or array, like +// [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string +// like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for +// weekdays, optionally separated by white space. Valid abbreviations +// are: Mon Tue Wed Thu Fri Sat Sun +// holidays : array_like of datetime64[D], optional +// An array of dates to consider as invalid dates. They may be +// specified in any order, and NaT (not-a-time) dates are ignored. +// This list is saved in a normalized form that is suited for +// fast calculations of valid days. +// busdaycal : busdaycalendar, optional +// A `busdaycalendar` object which specifies the valid days. If this +// parameter is provided, neither weekmask nor holidays may be +// provided. +// out : array of bool, optional +// If provided, this array is filled with the result. +// +// Returns +// ------- +// out : array of bool +// An array with the same shape as ``dates``, containing True for +// each valid day, and False for each invalid day. +// +// See Also +// -------- +// busdaycalendar : An object that specifies a custom set of valid days. +// busday_offset : Applies an offset counted in valid days. +// busday_count : Counts how many valid days are in a half-open date range. +// +// Examples +// -------- +// >>> # The weekdays are Friday, Saturday, and Monday +// ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], +// ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) +// array([False, False, True]) +// +// +//go:linkname IsBusday py.is_busday +func IsBusday(dates *py.Object, weekmask *py.Object, holidays *py.Object, busdaycal *py.Object, out *py.Object) *py.Object +// +// Return a string representation of an array. +// +// Parameters +// ---------- +// a : ndarray +// Input array. +// max_line_width : int, optional +// Inserts newlines if text is longer than `max_line_width`. +// Defaults to ``numpy.get_printoptions()['linewidth']``. +// precision : int or None, optional +// Floating point precision. +// Defaults to ``numpy.get_printoptions()['precision']``. +// suppress_small : bool, optional +// Represent numbers "very close" to zero as zero; default is False. +// Very close is defined by precision: if the precision is 8, e.g., +// numbers smaller (in absolute value) than 5e-9 are represented as +// zero. +// Defaults to ``numpy.get_printoptions()['suppress']``. +// separator : str, optional +// Inserted between elements. +// prefix : str, optional +// suffix : str, optional +// The length of the prefix and suffix strings are used to respectively +// align and wrap the output. An array is typically printed as:: +// +// prefix + array2string(a) + suffix +// +// The output is left-padded by the length of the prefix string, and +// wrapping is forced at the column ``max_line_width - len(suffix)``. +// It should be noted that the content of prefix and suffix strings are +// not included in the output. +// style : _NoValue, optional +// Has no effect, do not use. +// +// .. deprecated:: 1.14.0 +// formatter : dict of callables, optional +// If not None, the keys should indicate the type(s) that the respective +// formatting function applies to. Callables should return a string. +// Types that are not specified (by their corresponding keys) are handled +// by the default formatters. Individual types for which a formatter +// can be set are: +// +// - 'bool' +// - 'int' +// - 'timedelta' : a `numpy.timedelta64` +// - 'datetime' : a `numpy.datetime64` +// - 'float' +// - 'longfloat' : 128-bit floats +// - 'complexfloat' +// - 'longcomplexfloat' : composed of two 128-bit floats +// - 'void' : type `numpy.void` +// - 'numpystr' : types `numpy.bytes_` and `numpy.str_` +// +// Other keys that can be used to set a group of types at once are: +// +// - 'all' : sets all types +// - 'int_kind' : sets 'int' +// - 'float_kind' : sets 'float' and 'longfloat' +// - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' +// - 'str_kind' : sets 'numpystr' +// threshold : int, optional +// Total number of array elements which trigger summarization +// rather than full repr. +// Defaults to ``numpy.get_printoptions()['threshold']``. +// edgeitems : int, optional +// Number of array items in summary at beginning and end of +// each dimension. +// Defaults to ``numpy.get_printoptions()['edgeitems']``. +// sign : string, either '-', '+', or ' ', optional +// Controls printing of the sign of floating-point types. If '+', always +// print the sign of positive values. If ' ', always prints a space +// (whitespace character) in the sign position of positive values. If +// '-', omit the sign character of positive values. +// Defaults to ``numpy.get_printoptions()['sign']``. +// floatmode : str, optional +// Controls the interpretation of the `precision` option for +// floating-point types. +// Defaults to ``numpy.get_printoptions()['floatmode']``. +// Can take the following values: +// +// - 'fixed': Always print exactly `precision` fractional digits, +// even if this would print more or fewer digits than +// necessary to specify the value uniquely. +// - 'unique': Print the minimum number of fractional digits necessary +// to represent each value uniquely. Different elements may +// have a different number of digits. The value of the +// `precision` option is ignored. +// - 'maxprec': Print at most `precision` fractional digits, but if +// an element can be uniquely represented with fewer digits +// only print it with that many. +// - 'maxprec_equal': Print at most `precision` fractional digits, +// but if every element in the array can be uniquely +// represented with an equal number of fewer digits, use that +// many digits for all elements. +// legacy : string or `False`, optional +// If set to the string `'1.13'` enables 1.13 legacy printing mode. This +// approximates numpy 1.13 print output by including a space in the sign +// position of floats and different behavior for 0d arrays. If set to +// `False`, disables legacy mode. Unrecognized strings will be ignored +// with a warning for forward compatibility. +// +// .. versionadded:: 1.14.0 +// +// Returns +// ------- +// array_str : str +// String representation of the array. +// +// Raises +// ------ +// TypeError +// if a callable in `formatter` does not return a string. +// +// See Also +// -------- +// array_str, array_repr, set_printoptions, get_printoptions +// +// Notes +// ----- +// If a formatter is specified for a certain type, the `precision` keyword is +// ignored for that type. +// +// This is a very flexible function; `array_repr` and `array_str` are using +// `array2string` internally so keywords with the same name should work +// identically in all three functions. +// +// Examples +// -------- +// >>> x = np.array([1e-16,1,2,3]) +// >>> np.array2string(x, precision=2, separator=',', +// ... suppress_small=True) +// '[0.,1.,2.,3.]' +// +// >>> x = np.arange(3.) +// >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) +// '[0.00 1.00 2.00]' +// +// >>> x = np.arange(3) +// >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) +// '[0x0 0x1 0x2]' +// +// +// +//go:linkname Array2string py.array2string +func Array2string(a *py.Object, maxLineWidth *py.Object, precision *py.Object, suppressSmall *py.Object, separator *py.Object, prefix *py.Object, style *py.Object, formatter *py.Object, threshold *py.Object, edgeitems *py.Object, sign *py.Object, floatmode *py.Object, suffix *py.Object) *py.Object +// +// Return a string representation of the data in an array. +// +// The data in the array is returned as a single string. This function is +// similar to `array_repr`, the difference being that `array_repr` also +// returns information on the kind of array and its data type. +// +// Parameters +// ---------- +// a : ndarray +// Input array. +// max_line_width : int, optional +// Inserts newlines if text is longer than `max_line_width`. +// Defaults to ``numpy.get_printoptions()['linewidth']``. +// precision : int, optional +// Floating point precision. +// Defaults to ``numpy.get_printoptions()['precision']``. +// suppress_small : bool, optional +// Represent numbers "very close" to zero as zero; default is False. +// Very close is defined by precision: if the precision is 8, e.g., +// numbers smaller (in absolute value) than 5e-9 are represented as +// zero. +// Defaults to ``numpy.get_printoptions()['suppress']``. +// +// See Also +// -------- +// array2string, array_repr, set_printoptions +// +// Examples +// -------- +// >>> np.array_str(np.arange(3)) +// '[0 1 2]' +// +// +// +//go:linkname ArrayStr py.array_str +func ArrayStr(a *py.Object, maxLineWidth *py.Object, precision *py.Object, suppressSmall *py.Object) *py.Object +// +// Return the string representation of an array. +// +// Parameters +// ---------- +// arr : ndarray +// Input array. +// max_line_width : int, optional +// Inserts newlines if text is longer than `max_line_width`. +// Defaults to ``numpy.get_printoptions()['linewidth']``. +// precision : int, optional +// Floating point precision. +// Defaults to ``numpy.get_printoptions()['precision']``. +// suppress_small : bool, optional +// Represent numbers "very close" to zero as zero; default is False. +// Very close is defined by precision: if the precision is 8, e.g., +// numbers smaller (in absolute value) than 5e-9 are represented as +// zero. +// Defaults to ``numpy.get_printoptions()['suppress']``. +// +// Returns +// ------- +// string : str +// The string representation of an array. +// +// See Also +// -------- +// array_str, array2string, set_printoptions +// +// Examples +// -------- +// >>> np.array_repr(np.array([1,2])) +// 'array([1, 2])' +// >>> np.array_repr(np.ma.array([0.])) +// 'MaskedArray([0.])' +// >>> np.array_repr(np.array([], np.int32)) +// 'array([], dtype=int32)' +// +// >>> x = np.array([1e-6, 4e-7, 2, 3]) +// >>> np.array_repr(x, precision=6, suppress_small=True) +// 'array([0.000001, 0. , 2. , 3. ])' +// +// +// +//go:linkname ArrayRepr py.array_repr +func ArrayRepr(arr *py.Object, maxLineWidth *py.Object, precision *py.Object, suppressSmall *py.Object) *py.Object +// +// Set a Python function to be used when pretty printing arrays. +// +// Parameters +// ---------- +// f : function or None +// Function to be used to pretty print arrays. The function should expect +// a single array argument and return a string of the representation of +// the array. If None, the function is reset to the default NumPy function +// to print arrays. +// repr : bool, optional +// If True (default), the function for pretty printing (``__repr__``) +// is set, if False the function that returns the default string +// representation (``__str__``) is set. +// +// See Also +// -------- +// set_printoptions, get_printoptions +// +// Examples +// -------- +// >>> def pprint(arr): +// ... return 'HA! - What are you going to do now?' +// ... +// >>> np.set_string_function(pprint) +// >>> a = np.arange(10) +// >>> a +// HA! - What are you going to do now? +// >>> _ = a +// >>> # [0 1 2 3 4 5 6 7 8 9] +// +// We can reset the function to the default: +// +// >>> np.set_string_function(None) +// >>> a +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// +// `repr` affects either pretty printing or normal string representation. +// Note that ``__repr__`` is still affected by setting ``__str__`` +// because the width of each array element in the returned string becomes +// equal to the length of the result of ``__str__()``. +// +// >>> x = np.arange(4) +// >>> np.set_string_function(lambda x:'random', repr=False) +// >>> x.__str__() +// 'random' +// >>> x.__repr__() +// 'array([0, 1, 2, 3])' +// +// +// +//go:linkname SetStringFunction py.set_string_function +func SetStringFunction(f *py.Object, repr *py.Object) *py.Object +// +// Set printing options. +// +// These options determine the way floating point numbers, arrays and +// other NumPy objects are displayed. +// +// Parameters +// ---------- +// precision : int or None, optional +// Number of digits of precision for floating point output (default 8). +// May be None if `floatmode` is not `fixed`, to print as many digits as +// necessary to uniquely specify the value. +// threshold : int, optional +// Total number of array elements which trigger summarization +// rather than full repr (default 1000). +// To always use the full repr without summarization, pass `sys.maxsize`. +// edgeitems : int, optional +// Number of array items in summary at beginning and end of +// each dimension (default 3). +// linewidth : int, optional +// The number of characters per line for the purpose of inserting +// line breaks (default 75). +// suppress : bool, optional +// If True, always print floating point numbers using fixed point +// notation, in which case numbers equal to zero in the current precision +// will print as zero. If False, then scientific notation is used when +// absolute value of the smallest number is < 1e-4 or the ratio of the +// maximum absolute value to the minimum is > 1e3. The default is False. +// nanstr : str, optional +// String representation of floating point not-a-number (default nan). +// infstr : str, optional +// String representation of floating point infinity (default inf). +// sign : string, either '-', '+', or ' ', optional +// Controls printing of the sign of floating-point types. If '+', always +// print the sign of positive values. If ' ', always prints a space +// (whitespace character) in the sign position of positive values. If +// '-', omit the sign character of positive values. (default '-') +// formatter : dict of callables, optional +// If not None, the keys should indicate the type(s) that the respective +// formatting function applies to. Callables should return a string. +// Types that are not specified (by their corresponding keys) are handled +// by the default formatters. Individual types for which a formatter +// can be set are: +// +// - 'bool' +// - 'int' +// - 'timedelta' : a `numpy.timedelta64` +// - 'datetime' : a `numpy.datetime64` +// - 'float' +// - 'longfloat' : 128-bit floats +// - 'complexfloat' +// - 'longcomplexfloat' : composed of two 128-bit floats +// - 'numpystr' : types `numpy.bytes_` and `numpy.str_` +// - 'object' : `np.object_` arrays +// +// Other keys that can be used to set a group of types at once are: +// +// - 'all' : sets all types +// - 'int_kind' : sets 'int' +// - 'float_kind' : sets 'float' and 'longfloat' +// - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' +// - 'str_kind' : sets 'numpystr' +// floatmode : str, optional +// Controls the interpretation of the `precision` option for +// floating-point types. Can take the following values +// (default maxprec_equal): +// +// * 'fixed': Always print exactly `precision` fractional digits, +// even if this would print more or fewer digits than +// necessary to specify the value uniquely. +// * 'unique': Print the minimum number of fractional digits necessary +// to represent each value uniquely. Different elements may +// have a different number of digits. The value of the +// `precision` option is ignored. +// * 'maxprec': Print at most `precision` fractional digits, but if +// an element can be uniquely represented with fewer digits +// only print it with that many. +// * 'maxprec_equal': Print at most `precision` fractional digits, +// but if every element in the array can be uniquely +// represented with an equal number of fewer digits, use that +// many digits for all elements. +// legacy : string or `False`, optional +// If set to the string `'1.13'` enables 1.13 legacy printing mode. This +// approximates numpy 1.13 print output by including a space in the sign +// position of floats and different behavior for 0d arrays. This also +// enables 1.21 legacy printing mode (described below). +// +// If set to the string `'1.21'` enables 1.21 legacy printing mode. This +// approximates numpy 1.21 print output of complex structured dtypes +// by not inserting spaces after commas that separate fields and after +// colons. +// +// If set to `False`, disables legacy mode. +// +// Unrecognized strings will be ignored with a warning for forward +// compatibility. +// +// .. versionadded:: 1.14.0 +// .. versionchanged:: 1.22.0 +// +// See Also +// -------- +// get_printoptions, printoptions, set_string_function, array2string +// +// Notes +// ----- +// `formatter` is always reset with a call to `set_printoptions`. +// +// Use `printoptions` as a context manager to set the values temporarily. +// +// Examples +// -------- +// Floating point precision can be set: +// +// >>> np.set_printoptions(precision=4) +// >>> np.array([1.123456789]) +// [1.1235] +// +// Long arrays can be summarised: +// +// >>> np.set_printoptions(threshold=5) +// >>> np.arange(10) +// array([0, 1, 2, ..., 7, 8, 9]) +// +// Small results can be suppressed: +// +// >>> eps = np.finfo(float).eps +// >>> x = np.arange(4.) +// >>> x**2 - (x + eps)**2 +// array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) +// >>> np.set_printoptions(suppress=True) +// >>> x**2 - (x + eps)**2 +// array([-0., -0., 0., 0.]) +// +// A custom formatter can be used to display array elements as desired: +// +// >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) +// >>> x = np.arange(3) +// >>> x +// array([int: 0, int: -1, int: -2]) +// >>> np.set_printoptions() # formatter gets reset +// >>> x +// array([0, 1, 2]) +// +// To put back the default options, you can use: +// +// >>> np.set_printoptions(edgeitems=3, infstr='inf', +// ... linewidth=75, nanstr='nan', precision=8, +// ... suppress=False, threshold=1000, formatter=None) +// +// Also to temporarily override options, use `printoptions` as a context manager: +// +// >>> with np.printoptions(precision=2, suppress=True, threshold=5): +// ... np.linspace(0, 10, 10) +// array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) +// +// +// +//go:linkname SetPrintoptions py.set_printoptions +func SetPrintoptions(precision *py.Object, threshold *py.Object, edgeitems *py.Object, linewidth *py.Object, suppress *py.Object, nanstr *py.Object, infstr *py.Object, formatter *py.Object, sign *py.Object, floatmode *py.Object) *py.Object +// +// Return the current print options. +// +// Returns +// ------- +// print_opts : dict +// Dictionary of current print options with keys +// +// - precision : int +// - threshold : int +// - edgeitems : int +// - linewidth : int +// - suppress : bool +// - nanstr : str +// - infstr : str +// - formatter : dict of callables +// - sign : str +// +// For a full description of these options, see `set_printoptions`. +// +// See Also +// -------- +// set_printoptions, printoptions, set_string_function +// +// +// +//go:linkname GetPrintoptions py.get_printoptions +func GetPrintoptions() *py.Object +// Context manager for setting print options. +// +// Set print options for the scope of the `with` block, and restore the old +// options at the end. See `set_printoptions` for the full description of +// available options. +// +// Examples +// -------- +// +// >>> from numpy.testing import assert_equal +// >>> with np.printoptions(precision=2): +// ... np.array([2.0]) / 3 +// array([0.67]) +// +// The `as`-clause of the `with`-statement gives the current print options: +// +// >>> with np.printoptions(precision=2) as opts: +// ... assert_equal(opts, np.get_printoptions()) +// +// See Also +// -------- +// set_printoptions, get_printoptions +// +// +// +//go:linkname Printoptions py.printoptions +func Printoptions(__llgo_va_list ...interface{}) *py.Object +// +// Format a floating-point scalar as a decimal string in positional notation. +// +// Provides control over rounding, trimming and padding. Uses and assumes +// IEEE unbiased rounding. Uses the "Dragon4" algorithm. +// +// Parameters +// ---------- +// x : python float or numpy floating scalar +// Value to format. +// precision : non-negative integer or None, optional +// Maximum number of digits to print. May be None if `unique` is +// `True`, but must be an integer if unique is `False`. +// unique : boolean, optional +// If `True`, use a digit-generation strategy which gives the shortest +// representation which uniquely identifies the floating-point number from +// other values of the same type, by judicious rounding. If `precision` +// is given fewer digits than necessary can be printed, or if `min_digits` +// is given more can be printed, in which cases the last digit is rounded +// with unbiased rounding. +// If `False`, digits are generated as if printing an infinite-precision +// value and stopping after `precision` digits, rounding the remaining +// value with unbiased rounding +// fractional : boolean, optional +// If `True`, the cutoffs of `precision` and `min_digits` refer to the +// total number of digits after the decimal point, including leading +// zeros. +// If `False`, `precision` and `min_digits` refer to the total number of +// significant digits, before or after the decimal point, ignoring leading +// zeros. +// trim : one of 'k', '.', '0', '-', optional +// Controls post-processing trimming of trailing digits, as follows: +// +// * 'k' : keep trailing zeros, keep decimal point (no trimming) +// * '.' : trim all trailing zeros, leave decimal point +// * '0' : trim all but the zero before the decimal point. Insert the +// zero if it is missing. +// * '-' : trim trailing zeros and any trailing decimal point +// sign : boolean, optional +// Whether to show the sign for positive values. +// pad_left : non-negative integer, optional +// Pad the left side of the string with whitespace until at least that +// many characters are to the left of the decimal point. +// pad_right : non-negative integer, optional +// Pad the right side of the string with whitespace until at least that +// many characters are to the right of the decimal point. +// min_digits : non-negative integer or None, optional +// Minimum number of digits to print. Only has an effect if `unique=True` +// in which case additional digits past those necessary to uniquely +// identify the value may be printed, rounding the last additional digit. +// +// -- versionadded:: 1.21.0 +// +// Returns +// ------- +// rep : string +// The string representation of the floating point value +// +// See Also +// -------- +// format_float_scientific +// +// Examples +// -------- +// >>> np.format_float_positional(np.float32(np.pi)) +// '3.1415927' +// >>> np.format_float_positional(np.float16(np.pi)) +// '3.14' +// >>> np.format_float_positional(np.float16(0.3)) +// '0.3' +// >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) +// '0.3000488281' +// +// +//go:linkname FormatFloatPositional py.format_float_positional +func FormatFloatPositional(x *py.Object, precision *py.Object, unique *py.Object, fractional *py.Object, trim *py.Object, sign *py.Object, padLeft *py.Object, padRight *py.Object, minDigits *py.Object) *py.Object +// +// Format a floating-point scalar as a decimal string in scientific notation. +// +// Provides control over rounding, trimming and padding. Uses and assumes +// IEEE unbiased rounding. Uses the "Dragon4" algorithm. +// +// Parameters +// ---------- +// x : python float or numpy floating scalar +// Value to format. +// precision : non-negative integer or None, optional +// Maximum number of digits to print. May be None if `unique` is +// `True`, but must be an integer if unique is `False`. +// unique : boolean, optional +// If `True`, use a digit-generation strategy which gives the shortest +// representation which uniquely identifies the floating-point number from +// other values of the same type, by judicious rounding. If `precision` +// is given fewer digits than necessary can be printed. If `min_digits` +// is given more can be printed, in which cases the last digit is rounded +// with unbiased rounding. +// If `False`, digits are generated as if printing an infinite-precision +// value and stopping after `precision` digits, rounding the remaining +// value with unbiased rounding +// trim : one of 'k', '.', '0', '-', optional +// Controls post-processing trimming of trailing digits, as follows: +// +// * 'k' : keep trailing zeros, keep decimal point (no trimming) +// * '.' : trim all trailing zeros, leave decimal point +// * '0' : trim all but the zero before the decimal point. Insert the +// zero if it is missing. +// * '-' : trim trailing zeros and any trailing decimal point +// sign : boolean, optional +// Whether to show the sign for positive values. +// pad_left : non-negative integer, optional +// Pad the left side of the string with whitespace until at least that +// many characters are to the left of the decimal point. +// exp_digits : non-negative integer, optional +// Pad the exponent with zeros until it contains at least this many digits. +// If omitted, the exponent will be at least 2 digits. +// min_digits : non-negative integer or None, optional +// Minimum number of digits to print. This only has an effect for +// `unique=True`. In that case more digits than necessary to uniquely +// identify the value may be printed and rounded unbiased. +// +// -- versionadded:: 1.21.0 +// +// Returns +// ------- +// rep : string +// The string representation of the floating point value +// +// See Also +// -------- +// format_float_positional +// +// Examples +// -------- +// >>> np.format_float_scientific(np.float32(np.pi)) +// '3.1415927e+00' +// >>> s = np.float32(1.23e24) +// >>> np.format_float_scientific(s, unique=False, precision=15) +// '1.230000071797338e+24' +// >>> np.format_float_scientific(s, exp_digits=4) +// '1.23e+0024' +// +// +//go:linkname FormatFloatScientific py.format_float_scientific +func FormatFloatScientific(x *py.Object, precision *py.Object, unique *py.Object, trim *py.Object, sign *py.Object, padLeft *py.Object, expDigits *py.Object, minDigits *py.Object) *py.Object +// +// Return an ndarray of the provided type that satisfies requirements. +// +// This function is useful to be sure that an array with the correct flags +// is returned for passing to compiled code (perhaps through ctypes). +// +// Parameters +// ---------- +// a : array_like +// The object to be converted to a type-and-requirement-satisfying array. +// dtype : data-type +// The required data-type. If None preserve the current dtype. If your +// application requires the data to be in native byteorder, include +// a byteorder specification as a part of the dtype specification. +// requirements : str or sequence of str +// The requirements list can be any of the following +// +// * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array +// * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array +// * 'ALIGNED' ('A') - ensure a data-type aligned array +// * 'WRITEABLE' ('W') - ensure a writable array +// * 'OWNDATA' ('O') - ensure an array that owns its own data +// * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Array with specified requirements and type if given. +// +// See Also +// -------- +// asarray : Convert input to an ndarray. +// asanyarray : Convert to an ndarray, but pass through ndarray subclasses. +// ascontiguousarray : Convert input to a contiguous array. +// asfortranarray : Convert input to an ndarray with column-major +// memory order. +// ndarray.flags : Information about the memory layout of the array. +// +// Notes +// ----- +// The returned array will be guaranteed to have the listed requirements +// by making a copy if needed. +// +// Examples +// -------- +// >>> x = np.arange(6).reshape(2,3) +// >>> x.flags +// C_CONTIGUOUS : True +// F_CONTIGUOUS : False +// OWNDATA : False +// WRITEABLE : True +// ALIGNED : True +// WRITEBACKIFCOPY : False +// +// >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) +// >>> y.flags +// C_CONTIGUOUS : False +// F_CONTIGUOUS : True +// OWNDATA : True +// WRITEABLE : True +// ALIGNED : True +// WRITEBACKIFCOPY : False +// +// +// +//go:linkname Require py.require +func Require(a *py.Object, dtype *py.Object, requirements *py.Object) *py.Object +// +// Set how floating-point errors are handled. +// +// Note that operations on integer scalar types (such as `int16`) are +// handled like floating point, and are affected by these settings. +// +// Parameters +// ---------- +// all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional +// Set treatment for all types of floating-point errors at once: +// +// - ignore: Take no action when the exception occurs. +// - warn: Print a `RuntimeWarning` (via the Python `warnings` module). +// - raise: Raise a `FloatingPointError`. +// - call: Call a function specified using the `seterrcall` function. +// - print: Print a warning directly to ``stdout``. +// - log: Record error in a Log object specified by `seterrcall`. +// +// The default is not to change the current behavior. +// divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional +// Treatment for division by zero. +// over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional +// Treatment for floating-point overflow. +// under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional +// Treatment for floating-point underflow. +// invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional +// Treatment for invalid floating-point operation. +// +// Returns +// ------- +// old_settings : dict +// Dictionary containing the old settings. +// +// See also +// -------- +// seterrcall : Set a callback function for the 'call' mode. +// geterr, geterrcall, errstate +// +// Notes +// ----- +// The floating-point exceptions are defined in the IEEE 754 standard [1]_: +// +// - Division by zero: infinite result obtained from finite numbers. +// - Overflow: result too large to be expressed. +// - Underflow: result so close to zero that some precision +// was lost. +// - Invalid operation: result is not an expressible number, typically +// indicates that a NaN was produced. +// +// .. [1] https://en.wikipedia.org/wiki/IEEE_754 +// +// Examples +// -------- +// >>> old_settings = np.seterr(all='ignore') #seterr to known value +// >>> np.seterr(over='raise') +// {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} +// >>> np.seterr(**old_settings) # reset to default +// {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} +// +// >>> np.int16(32000) * np.int16(3) +// 30464 +// >>> old_settings = np.seterr(all='warn', over='raise') +// >>> np.int16(32000) * np.int16(3) +// Traceback (most recent call last): +// File "", line 1, in +// FloatingPointError: overflow encountered in scalar multiply +// +// >>> old_settings = np.seterr(all='print') +// >>> np.geterr() +// {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} +// >>> np.int16(32000) * np.int16(3) +// 30464 +// +// +// +//go:linkname Seterr py.seterr +func Seterr(all *py.Object, divide *py.Object, over *py.Object, under *py.Object, invalid *py.Object) *py.Object +// +// Get the current way of handling floating-point errors. +// +// Returns +// ------- +// res : dict +// A dictionary with keys "divide", "over", "under", and "invalid", +// whose values are from the strings "ignore", "print", "log", "warn", +// "raise", and "call". The keys represent possible floating-point +// exceptions, and the values define how these exceptions are handled. +// +// See Also +// -------- +// geterrcall, seterr, seterrcall +// +// Notes +// ----- +// For complete documentation of the types of floating-point exceptions and +// treatment options, see `seterr`. +// +// Examples +// -------- +// >>> np.geterr() +// {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} +// >>> np.arange(3.) / np.arange(3.) +// array([nan, 1., 1.]) +// +// >>> oldsettings = np.seterr(all='warn', over='raise') +// >>> np.geterr() +// {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'} +// >>> np.arange(3.) / np.arange(3.) +// array([nan, 1., 1.]) +// +// +// +//go:linkname Geterr py.geterr +func Geterr() *py.Object +// +// Set the size of the buffer used in ufuncs. +// +// Parameters +// ---------- +// size : int +// Size of buffer. +// +// +// +//go:linkname Setbufsize py.setbufsize +func Setbufsize(size *py.Object) *py.Object +// +// Return the size of the buffer used in ufuncs. +// +// Returns +// ------- +// getbufsize : int +// Size of ufunc buffer in bytes. +// +// +// +//go:linkname Getbufsize py.getbufsize +func Getbufsize() *py.Object +// +// Set the floating-point error callback function or log object. +// +// There are two ways to capture floating-point error messages. The first +// is to set the error-handler to 'call', using `seterr`. Then, set +// the function to call using this function. +// +// The second is to set the error-handler to 'log', using `seterr`. +// Floating-point errors then trigger a call to the 'write' method of +// the provided object. +// +// Parameters +// ---------- +// func : callable f(err, flag) or object with write method +// Function to call upon floating-point errors ('call'-mode) or +// object whose 'write' method is used to log such message ('log'-mode). +// +// The call function takes two arguments. The first is a string describing +// the type of error (such as "divide by zero", "overflow", "underflow", +// or "invalid value"), and the second is the status flag. The flag is a +// byte, whose four least-significant bits indicate the type of error, one +// of "divide", "over", "under", "invalid":: +// +// [0 0 0 0 divide over under invalid] +// +// In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. +// +// If an object is provided, its write method should take one argument, +// a string. +// +// Returns +// ------- +// h : callable, log instance or None +// The old error handler. +// +// See Also +// -------- +// seterr, geterr, geterrcall +// +// Examples +// -------- +// Callback upon error: +// +// >>> def err_handler(type, flag): +// ... print("Floating point error (%s), with flag %s" % (type, flag)) +// ... +// +// >>> saved_handler = np.seterrcall(err_handler) +// >>> save_err = np.seterr(all='call') +// +// >>> np.array([1, 2, 3]) / 0.0 +// Floating point error (divide by zero), with flag 1 +// array([inf, inf, inf]) +// +// >>> np.seterrcall(saved_handler) +// +// >>> np.seterr(**save_err) +// {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} +// +// Log error message: +// +// >>> class Log: +// ... def write(self, msg): +// ... print("LOG: %s" % msg) +// ... +// +// >>> log = Log() +// >>> saved_handler = np.seterrcall(log) +// >>> save_err = np.seterr(all='log') +// +// >>> np.array([1, 2, 3]) / 0.0 +// LOG: Warning: divide by zero encountered in divide +// array([inf, inf, inf]) +// +// >>> np.seterrcall(saved_handler) +// +// >>> np.seterr(**save_err) +// {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} +// +// +// +//go:linkname Seterrcall py.seterrcall +func Seterrcall(func_ *py.Object) *py.Object +// +// Return the current callback function used on floating-point errors. +// +// When the error handling for a floating-point error (one of "divide", +// "over", "under", or "invalid") is set to 'call' or 'log', the function +// that is called or the log instance that is written to is returned by +// `geterrcall`. This function or log instance has been set with +// `seterrcall`. +// +// Returns +// ------- +// errobj : callable, log instance or None +// The current error handler. If no handler was set through `seterrcall`, +// ``None`` is returned. +// +// See Also +// -------- +// seterrcall, seterr, geterr +// +// Notes +// ----- +// For complete documentation of the types of floating-point exceptions and +// treatment options, see `seterr`. +// +// Examples +// -------- +// >>> np.geterrcall() # we did not yet set a handler, returns None +// +// >>> oldsettings = np.seterr(all='call') +// >>> def err_handler(type, flag): +// ... print("Floating point error (%s), with flag %s" % (type, flag)) +// >>> oldhandler = np.seterrcall(err_handler) +// >>> np.array([1, 2, 3]) / 0.0 +// Floating point error (divide by zero), with flag 1 +// array([inf, inf, inf]) +// +// >>> cur_handler = np.geterrcall() +// >>> cur_handler is err_handler +// True +// +// +// +//go:linkname Geterrcall py.geterrcall +func Geterrcall() *py.Object +// +// Return numbers spaced evenly on a log scale. +// +// In linear space, the sequence starts at ``base ** start`` +// (`base` to the power of `start`) and ends with ``base ** stop`` +// (see `endpoint` below). +// +// .. versionchanged:: 1.16.0 +// Non-scalar `start` and `stop` are now supported. +// +// .. versionchanged:: 1.25.0 +// Non-scalar 'base` is now supported +// +// Parameters +// ---------- +// start : array_like +// ``base ** start`` is the starting value of the sequence. +// stop : array_like +// ``base ** stop`` is the final value of the sequence, unless `endpoint` +// is False. In that case, ``num + 1`` values are spaced over the +// interval in log-space, of which all but the last (a sequence of +// length `num`) are returned. +// num : integer, optional +// Number of samples to generate. Default is 50. +// endpoint : boolean, optional +// If true, `stop` is the last sample. Otherwise, it is not included. +// Default is True. +// base : array_like, optional +// The base of the log space. The step size between the elements in +// ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. +// Default is 10.0. +// dtype : dtype +// The type of the output array. If `dtype` is not given, the data type +// is inferred from `start` and `stop`. The inferred type will never be +// an integer; `float` is chosen even if the arguments would produce an +// array of integers. +// axis : int, optional +// The axis in the result to store the samples. Relevant only if start, +// stop, or base are array-like. By default (0), the samples will be +// along a new axis inserted at the beginning. Use -1 to get an axis at +// the end. +// +// .. versionadded:: 1.16.0 +// +// +// Returns +// ------- +// samples : ndarray +// `num` samples, equally spaced on a log scale. +// +// See Also +// -------- +// arange : Similar to linspace, with the step size specified instead of the +// number of samples. Note that, when used with a float endpoint, the +// endpoint may or may not be included. +// linspace : Similar to logspace, but with the samples uniformly distributed +// in linear space, instead of log space. +// geomspace : Similar to logspace, but with endpoints specified directly. +// :ref:`how-to-partition` +// +// Notes +// ----- +// If base is a scalar, logspace is equivalent to the code +// +// >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) +// ... # doctest: +SKIP +// >>> power(base, y).astype(dtype) +// ... # doctest: +SKIP +// +// Examples +// -------- +// >>> np.logspace(2.0, 3.0, num=4) +// array([ 100. , 215.443469 , 464.15888336, 1000. ]) +// >>> np.logspace(2.0, 3.0, num=4, endpoint=False) +// array([100. , 177.827941 , 316.22776602, 562.34132519]) +// >>> np.logspace(2.0, 3.0, num=4, base=2.0) +// array([4. , 5.0396842 , 6.34960421, 8. ]) +// >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) +// array([[ 4. , 5.0396842 , 6.34960421, 8. ], +// [ 9. , 12.98024613, 18.72075441, 27. ]]) +// +// Graphical illustration: +// +// >>> import matplotlib.pyplot as plt +// >>> N = 10 +// >>> x1 = np.logspace(0.1, 1, N, endpoint=True) +// >>> x2 = np.logspace(0.1, 1, N, endpoint=False) +// >>> y = np.zeros(N) +// >>> plt.plot(x1, y, 'o') +// [] +// >>> plt.plot(x2, y + 0.5, 'o') +// [] +// >>> plt.ylim([-0.5, 1]) +// (-0.5, 1) +// >>> plt.show() +// +// +// +//go:linkname Logspace py.logspace +func Logspace(start *py.Object, stop *py.Object, num *py.Object, endpoint *py.Object, base *py.Object, dtype *py.Object, axis *py.Object) *py.Object +// +// Return evenly spaced numbers over a specified interval. +// +// Returns `num` evenly spaced samples, calculated over the +// interval [`start`, `stop`]. +// +// The endpoint of the interval can optionally be excluded. +// +// .. versionchanged:: 1.16.0 +// Non-scalar `start` and `stop` are now supported. +// +// .. versionchanged:: 1.20.0 +// Values are rounded towards ``-inf`` instead of ``0`` when an +// integer ``dtype`` is specified. The old behavior can +// still be obtained with ``np.linspace(start, stop, num).astype(int)`` +// +// Parameters +// ---------- +// start : array_like +// The starting value of the sequence. +// stop : array_like +// The end value of the sequence, unless `endpoint` is set to False. +// In that case, the sequence consists of all but the last of ``num + 1`` +// evenly spaced samples, so that `stop` is excluded. Note that the step +// size changes when `endpoint` is False. +// num : int, optional +// Number of samples to generate. Default is 50. Must be non-negative. +// endpoint : bool, optional +// If True, `stop` is the last sample. Otherwise, it is not included. +// Default is True. +// retstep : bool, optional +// If True, return (`samples`, `step`), where `step` is the spacing +// between samples. +// dtype : dtype, optional +// The type of the output array. If `dtype` is not given, the data type +// is inferred from `start` and `stop`. The inferred dtype will never be +// an integer; `float` is chosen even if the arguments would produce an +// array of integers. +// +// .. versionadded:: 1.9.0 +// +// axis : int, optional +// The axis in the result to store the samples. Relevant only if start +// or stop are array-like. By default (0), the samples will be along a +// new axis inserted at the beginning. Use -1 to get an axis at the end. +// +// .. versionadded:: 1.16.0 +// +// Returns +// ------- +// samples : ndarray +// There are `num` equally spaced samples in the closed interval +// ``[start, stop]`` or the half-open interval ``[start, stop)`` +// (depending on whether `endpoint` is True or False). +// step : float, optional +// Only returned if `retstep` is True +// +// Size of spacing between samples. +// +// +// See Also +// -------- +// arange : Similar to `linspace`, but uses a step size (instead of the +// number of samples). +// geomspace : Similar to `linspace`, but with numbers spaced evenly on a log +// scale (a geometric progression). +// logspace : Similar to `geomspace`, but with the end points specified as +// logarithms. +// :ref:`how-to-partition` +// +// Examples +// -------- +// >>> np.linspace(2.0, 3.0, num=5) +// array([2. , 2.25, 2.5 , 2.75, 3. ]) +// >>> np.linspace(2.0, 3.0, num=5, endpoint=False) +// array([2. , 2.2, 2.4, 2.6, 2.8]) +// >>> np.linspace(2.0, 3.0, num=5, retstep=True) +// (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) +// +// Graphical illustration: +// +// >>> import matplotlib.pyplot as plt +// >>> N = 8 +// >>> y = np.zeros(N) +// >>> x1 = np.linspace(0, 10, N, endpoint=True) +// >>> x2 = np.linspace(0, 10, N, endpoint=False) +// >>> plt.plot(x1, y, 'o') +// [] +// >>> plt.plot(x2, y + 0.5, 'o') +// [] +// >>> plt.ylim([-0.5, 1]) +// (-0.5, 1) +// >>> plt.show() +// +// +// +//go:linkname Linspace py.linspace +func Linspace(start *py.Object, stop *py.Object, num *py.Object, endpoint *py.Object, retstep *py.Object, dtype *py.Object, axis *py.Object) *py.Object +// +// Return numbers spaced evenly on a log scale (a geometric progression). +// +// This is similar to `logspace`, but with endpoints specified directly. +// Each output sample is a constant multiple of the previous. +// +// .. versionchanged:: 1.16.0 +// Non-scalar `start` and `stop` are now supported. +// +// Parameters +// ---------- +// start : array_like +// The starting value of the sequence. +// stop : array_like +// The final value of the sequence, unless `endpoint` is False. +// In that case, ``num + 1`` values are spaced over the +// interval in log-space, of which all but the last (a sequence of +// length `num`) are returned. +// num : integer, optional +// Number of samples to generate. Default is 50. +// endpoint : boolean, optional +// If true, `stop` is the last sample. Otherwise, it is not included. +// Default is True. +// dtype : dtype +// The type of the output array. If `dtype` is not given, the data type +// is inferred from `start` and `stop`. The inferred dtype will never be +// an integer; `float` is chosen even if the arguments would produce an +// array of integers. +// axis : int, optional +// The axis in the result to store the samples. Relevant only if start +// or stop are array-like. By default (0), the samples will be along a +// new axis inserted at the beginning. Use -1 to get an axis at the end. +// +// .. versionadded:: 1.16.0 +// +// Returns +// ------- +// samples : ndarray +// `num` samples, equally spaced on a log scale. +// +// See Also +// -------- +// logspace : Similar to geomspace, but with endpoints specified using log +// and base. +// linspace : Similar to geomspace, but with arithmetic instead of geometric +// progression. +// arange : Similar to linspace, with the step size specified instead of the +// number of samples. +// :ref:`how-to-partition` +// +// Notes +// ----- +// If the inputs or dtype are complex, the output will follow a logarithmic +// spiral in the complex plane. (There are an infinite number of spirals +// passing through two points; the output will follow the shortest such path.) +// +// Examples +// -------- +// >>> np.geomspace(1, 1000, num=4) +// array([ 1., 10., 100., 1000.]) +// >>> np.geomspace(1, 1000, num=3, endpoint=False) +// array([ 1., 10., 100.]) +// >>> np.geomspace(1, 1000, num=4, endpoint=False) +// array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) +// >>> np.geomspace(1, 256, num=9) +// array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) +// +// Note that the above may not produce exact integers: +// +// >>> np.geomspace(1, 256, num=9, dtype=int) +// array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) +// >>> np.around(np.geomspace(1, 256, num=9)).astype(int) +// array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) +// +// Negative, decreasing, and complex inputs are allowed: +// +// >>> np.geomspace(1000, 1, num=4) +// array([1000., 100., 10., 1.]) +// >>> np.geomspace(-1000, -1, num=4) +// array([-1000., -100., -10., -1.]) +// >>> np.geomspace(1j, 1000j, num=4) # Straight line +// array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) +// >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle +// array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, +// 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, +// 1.00000000e+00+0.00000000e+00j]) +// +// Graphical illustration of `endpoint` parameter: +// +// >>> import matplotlib.pyplot as plt +// >>> N = 10 +// >>> y = np.zeros(N) +// >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') +// [] +// >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') +// [] +// >>> plt.axis([0.5, 2000, 0, 3]) +// [0.5, 2000, 0, 3] +// >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') +// >>> plt.show() +// +// +// +//go:linkname Geomspace py.geomspace +func Geomspace(start *py.Object, stop *py.Object, num *py.Object, endpoint *py.Object, dtype *py.Object, axis *py.Object) *py.Object +// +// Convert inputs to arrays with at least one dimension. +// +// Scalar inputs are converted to 1-dimensional arrays, whilst +// higher-dimensional inputs are preserved. +// +// Parameters +// ---------- +// arys1, arys2, ... : array_like +// One or more input arrays. +// +// Returns +// ------- +// ret : ndarray +// An array, or list of arrays, each with ``a.ndim >= 1``. +// Copies are made only if necessary. +// +// See Also +// -------- +// atleast_2d, atleast_3d +// +// Examples +// -------- +// >>> np.atleast_1d(1.0) +// array([1.]) +// +// >>> x = np.arange(9.0).reshape(3,3) +// >>> np.atleast_1d(x) +// array([[0., 1., 2.], +// [3., 4., 5.], +// [6., 7., 8.]]) +// >>> np.atleast_1d(x) is x +// True +// +// >>> np.atleast_1d(1, [3, 4]) +// [array([1]), array([3, 4])] +// +// +// +//go:linkname Atleast1d py.atleast_1d +func Atleast1d(__llgo_va_list ...interface{}) *py.Object +// +// View inputs as arrays with at least two dimensions. +// +// Parameters +// ---------- +// arys1, arys2, ... : array_like +// One or more array-like sequences. Non-array inputs are converted +// to arrays. Arrays that already have two or more dimensions are +// preserved. +// +// Returns +// ------- +// res, res2, ... : ndarray +// An array, or list of arrays, each with ``a.ndim >= 2``. +// Copies are avoided where possible, and views with two or more +// dimensions are returned. +// +// See Also +// -------- +// atleast_1d, atleast_3d +// +// Examples +// -------- +// >>> np.atleast_2d(3.0) +// array([[3.]]) +// +// >>> x = np.arange(3.0) +// >>> np.atleast_2d(x) +// array([[0., 1., 2.]]) +// >>> np.atleast_2d(x).base is x +// True +// +// >>> np.atleast_2d(1, [1, 2], [[1, 2]]) +// [array([[1]]), array([[1, 2]]), array([[1, 2]])] +// +// +// +//go:linkname Atleast2d py.atleast_2d +func Atleast2d(__llgo_va_list ...interface{}) *py.Object +// +// View inputs as arrays with at least three dimensions. +// +// Parameters +// ---------- +// arys1, arys2, ... : array_like +// One or more array-like sequences. Non-array inputs are converted to +// arrays. Arrays that already have three or more dimensions are +// preserved. +// +// Returns +// ------- +// res1, res2, ... : ndarray +// An array, or list of arrays, each with ``a.ndim >= 3``. Copies are +// avoided where possible, and views with three or more dimensions are +// returned. For example, a 1-D array of shape ``(N,)`` becomes a view +// of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a +// view of shape ``(M, N, 1)``. +// +// See Also +// -------- +// atleast_1d, atleast_2d +// +// Examples +// -------- +// >>> np.atleast_3d(3.0) +// array([[[3.]]]) +// +// >>> x = np.arange(3.0) +// >>> np.atleast_3d(x).shape +// (1, 3, 1) +// +// >>> x = np.arange(12.0).reshape(4,3) +// >>> np.atleast_3d(x).shape +// (4, 3, 1) +// >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself +// True +// +// >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): +// ... print(arr, arr.shape) # doctest: +SKIP +// ... +// [[[1] +// [2]]] (1, 2, 1) +// [[[1] +// [2]]] (1, 2, 1) +// [[[1 2]]] (1, 1, 2) +// +// +// +//go:linkname Atleast3d py.atleast_3d +func Atleast3d(__llgo_va_list ...interface{}) *py.Object +// +// Assemble an nd-array from nested lists of blocks. +// +// Blocks in the innermost lists are concatenated (see `concatenate`) along +// the last dimension (-1), then these are concatenated along the +// second-last dimension (-2), and so on until the outermost list is reached. +// +// Blocks can be of any dimension, but will not be broadcasted using the normal +// rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` +// the same for all blocks. This is primarily useful for working with scalars, +// and means that code like ``np.block([v, 1])`` is valid, where +// ``v.ndim == 1``. +// +// When the nested list is two levels deep, this allows block matrices to be +// constructed from their components. +// +// .. versionadded:: 1.13.0 +// +// Parameters +// ---------- +// arrays : nested list of array_like or scalars (but not tuples) +// If passed a single ndarray or scalar (a nested list of depth 0), this +// is returned unmodified (and not copied). +// +// Elements shapes must match along the appropriate axes (without +// broadcasting), but leading 1s will be prepended to the shape as +// necessary to make the dimensions match. +// +// Returns +// ------- +// block_array : ndarray +// The array assembled from the given blocks. +// +// The dimensionality of the output is equal to the greatest of: +// * the dimensionality of all the inputs +// * the depth to which the input list is nested +// +// Raises +// ------ +// ValueError +// * If list depths are mismatched - for instance, ``[[a, b], c]`` is +// illegal, and should be spelt ``[[a, b], [c]]`` +// * If lists are empty - for instance, ``[[a, b], []]`` +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// vstack : Stack arrays in sequence vertically (row wise). +// hstack : Stack arrays in sequence horizontally (column wise). +// dstack : Stack arrays in sequence depth wise (along third axis). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// vsplit : Split an array into multiple sub-arrays vertically (row-wise). +// +// Notes +// ----- +// +// When called with only scalars, ``np.block`` is equivalent to an ndarray +// call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to +// ``np.array([[1, 2], [3, 4]])``. +// +// This function does not enforce that the blocks lie on a fixed grid. +// ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: +// +// AAAbb +// AAAbb +// cccDD +// +// But is also allowed to produce, for some ``a, b, c, d``:: +// +// AAAbb +// AAAbb +// cDDDD +// +// Since concatenation happens along the last axis first, `block` is _not_ +// capable of producing the following directly:: +// +// AAAbb +// cccbb +// cccDD +// +// Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is +// equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. +// +// Examples +// -------- +// The most common use of this function is to build a block matrix +// +// >>> A = np.eye(2) * 2 +// >>> B = np.eye(3) * 3 +// >>> np.block([ +// ... [A, np.zeros((2, 3))], +// ... [np.ones((3, 2)), B ] +// ... ]) +// array([[2., 0., 0., 0., 0.], +// [0., 2., 0., 0., 0.], +// [1., 1., 3., 0., 0.], +// [1., 1., 0., 3., 0.], +// [1., 1., 0., 0., 3.]]) +// +// With a list of depth 1, `block` can be used as `hstack` +// +// >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) +// array([1, 2, 3]) +// +// >>> a = np.array([1, 2, 3]) +// >>> b = np.array([4, 5, 6]) +// >>> np.block([a, b, 10]) # hstack([a, b, 10]) +// array([ 1, 2, 3, 4, 5, 6, 10]) +// +// >>> A = np.ones((2, 2), int) +// >>> B = 2 * A +// >>> np.block([A, B]) # hstack([A, B]) +// array([[1, 1, 2, 2], +// [1, 1, 2, 2]]) +// +// With a list of depth 2, `block` can be used in place of `vstack`: +// +// >>> a = np.array([1, 2, 3]) +// >>> b = np.array([4, 5, 6]) +// >>> np.block([[a], [b]]) # vstack([a, b]) +// array([[1, 2, 3], +// [4, 5, 6]]) +// +// >>> A = np.ones((2, 2), int) +// >>> B = 2 * A +// >>> np.block([[A], [B]]) # vstack([A, B]) +// array([[1, 1], +// [1, 1], +// [2, 2], +// [2, 2]]) +// +// It can also be used in places of `atleast_1d` and `atleast_2d` +// +// >>> a = np.array(0) +// >>> b = np.array([1]) +// >>> np.block([a]) # atleast_1d(a) +// array([0]) +// >>> np.block([b]) # atleast_1d(b) +// array([1]) +// +// >>> np.block([[a]]) # atleast_2d(a) +// array([[0]]) +// >>> np.block([[b]]) # atleast_2d(b) +// array([[1]]) +// +// +// +// +//go:linkname Block py.block +func Block(arrays *py.Object) *py.Object +// +// Stack arrays in sequence horizontally (column wise). +// +// This is equivalent to concatenation along the second axis, except for 1-D +// arrays where it concatenates along the first axis. Rebuilds arrays divided +// by `hsplit`. +// +// This function makes most sense for arrays with up to 3 dimensions. For +// instance, for pixel-data with a height (first axis), width (second axis), +// and r/g/b channels (third axis). The functions `concatenate`, `stack` and +// `block` provide more general stacking and concatenation operations. +// +// Parameters +// ---------- +// tup : sequence of ndarrays +// The arrays must have the same shape along all but the second axis, +// except 1-D arrays which can be any length. +// +// dtype : str or dtype +// If provided, the destination array will have this dtype. Cannot be +// provided together with `out`. +// +// .. versionadded:: 1.24 +// +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Defaults to 'same_kind'. +// +// .. versionadded:: 1.24 +// +// Returns +// ------- +// stacked : ndarray +// The array formed by stacking the given arrays. +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// block : Assemble an nd-array from nested lists of blocks. +// vstack : Stack arrays in sequence vertically (row wise). +// dstack : Stack arrays in sequence depth wise (along third axis). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// hsplit : Split an array into multiple sub-arrays horizontally (column-wise). +// +// Examples +// -------- +// >>> a = np.array((1,2,3)) +// >>> b = np.array((4,5,6)) +// >>> np.hstack((a,b)) +// array([1, 2, 3, 4, 5, 6]) +// >>> a = np.array([[1],[2],[3]]) +// >>> b = np.array([[4],[5],[6]]) +// >>> np.hstack((a,b)) +// array([[1, 4], +// [2, 5], +// [3, 6]]) +// +// +// +//go:linkname Hstack py.hstack +func Hstack(tup *py.Object) *py.Object +// +// Join a sequence of arrays along a new axis. +// +// The ``axis`` parameter specifies the index of the new axis in the +// dimensions of the result. For example, if ``axis=0`` it will be the first +// dimension and if ``axis=-1`` it will be the last dimension. +// +// .. versionadded:: 1.10.0 +// +// Parameters +// ---------- +// arrays : sequence of array_like +// Each array must have the same shape. +// +// axis : int, optional +// The axis in the result array along which the input arrays are stacked. +// +// out : ndarray, optional +// If provided, the destination to place the result. The shape must be +// correct, matching that of what stack would have returned if no +// out argument were specified. +// +// dtype : str or dtype +// If provided, the destination array will have this dtype. Cannot be +// provided together with `out`. +// +// .. versionadded:: 1.24 +// +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Defaults to 'same_kind'. +// +// .. versionadded:: 1.24 +// +// +// Returns +// ------- +// stacked : ndarray +// The stacked array has one more dimension than the input arrays. +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// block : Assemble an nd-array from nested lists of blocks. +// split : Split array into a list of multiple sub-arrays of equal size. +// +// Examples +// -------- +// >>> arrays = [np.random.randn(3, 4) for _ in range(10)] +// >>> np.stack(arrays, axis=0).shape +// (10, 3, 4) +// +// >>> np.stack(arrays, axis=1).shape +// (3, 10, 4) +// +// >>> np.stack(arrays, axis=2).shape +// (3, 4, 10) +// +// >>> a = np.array([1, 2, 3]) +// >>> b = np.array([4, 5, 6]) +// >>> np.stack((a, b)) +// array([[1, 2, 3], +// [4, 5, 6]]) +// +// >>> np.stack((a, b), axis=-1) +// array([[1, 4], +// [2, 5], +// [3, 6]]) +// +// +// +//go:linkname Stack py.stack +func Stack(arrays *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Stack arrays in sequence vertically (row wise). +// +// This is equivalent to concatenation along the first axis after 1-D arrays +// of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by +// `vsplit`. +// +// This function makes most sense for arrays with up to 3 dimensions. For +// instance, for pixel-data with a height (first axis), width (second axis), +// and r/g/b channels (third axis). The functions `concatenate`, `stack` and +// `block` provide more general stacking and concatenation operations. +// +// ``np.row_stack`` is an alias for `vstack`. They are the same function. +// +// Parameters +// ---------- +// tup : sequence of ndarrays +// The arrays must have the same shape along all but the first axis. +// 1-D arrays must have the same length. +// +// dtype : str or dtype +// If provided, the destination array will have this dtype. Cannot be +// provided together with `out`. +// +// .. versionadded:: 1.24 +// +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Defaults to 'same_kind'. +// +// .. versionadded:: 1.24 +// +// Returns +// ------- +// stacked : ndarray +// The array formed by stacking the given arrays, will be at least 2-D. +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// block : Assemble an nd-array from nested lists of blocks. +// hstack : Stack arrays in sequence horizontally (column wise). +// dstack : Stack arrays in sequence depth wise (along third axis). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// vsplit : Split an array into multiple sub-arrays vertically (row-wise). +// +// Examples +// -------- +// >>> a = np.array([1, 2, 3]) +// >>> b = np.array([4, 5, 6]) +// >>> np.vstack((a,b)) +// array([[1, 2, 3], +// [4, 5, 6]]) +// +// >>> a = np.array([[1], [2], [3]]) +// >>> b = np.array([[4], [5], [6]]) +// >>> np.vstack((a,b)) +// array([[1], +// [2], +// [3], +// [4], +// [5], +// [6]]) +// +// +// +//go:linkname Vstack py.vstack +func Vstack(tup *py.Object) *py.Object +// +// einsum(subscripts, *operands, out=None, dtype=None, order='K', +// casting='safe', optimize=False) +// +// Evaluates the Einstein summation convention on the operands. +// +// Using the Einstein summation convention, many common multi-dimensional, +// linear algebraic array operations can be represented in a simple fashion. +// In *implicit* mode `einsum` computes these values. +// +// In *explicit* mode, `einsum` provides further flexibility to compute +// other array operations that might not be considered classical Einstein +// summation operations, by disabling, or forcing summation over specified +// subscript labels. +// +// See the notes and examples for clarification. +// +// Parameters +// ---------- +// subscripts : str +// Specifies the subscripts for summation as comma separated list of +// subscript labels. An implicit (classical Einstein summation) +// calculation is performed unless the explicit indicator '->' is +// included as well as subscript labels of the precise output form. +// operands : list of array_like +// These are the arrays for the operation. +// out : ndarray, optional +// If provided, the calculation is done into this array. +// dtype : {data-type, None}, optional +// If provided, forces the calculation to use the data type specified. +// Note that you may have to also give a more liberal `casting` +// parameter to allow the conversions. Default is None. +// order : {'C', 'F', 'A', 'K'}, optional +// Controls the memory layout of the output. 'C' means it should +// be C contiguous. 'F' means it should be Fortran contiguous, +// 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. +// 'K' means it should be as close to the layout as the inputs as +// is possible, including arbitrarily permuted axes. +// Default is 'K'. +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Setting this to +// 'unsafe' is not recommended, as it can adversely affect accumulations. +// +// * 'no' means the data types should not be cast at all. +// * 'equiv' means only byte-order changes are allowed. +// * 'safe' means only casts which can preserve values are allowed. +// * 'same_kind' means only safe casts or casts within a kind, +// like float64 to float32, are allowed. +// * 'unsafe' means any data conversions may be done. +// +// Default is 'safe'. +// optimize : {False, True, 'greedy', 'optimal'}, optional +// Controls if intermediate optimization should occur. No optimization +// will occur if False and True will default to the 'greedy' algorithm. +// Also accepts an explicit contraction list from the ``np.einsum_path`` +// function. See ``np.einsum_path`` for more details. Defaults to False. +// +// Returns +// ------- +// output : ndarray +// The calculation based on the Einstein summation convention. +// +// See Also +// -------- +// einsum_path, dot, inner, outer, tensordot, linalg.multi_dot +// einops : +// similar verbose interface is provided by +// `einops `_ package to cover +// additional operations: transpose, reshape/flatten, repeat/tile, +// squeeze/unsqueeze and reductions. +// opt_einsum : +// `opt_einsum `_ +// optimizes contraction order for einsum-like expressions +// in backend-agnostic manner. +// +// Notes +// ----- +// .. versionadded:: 1.6.0 +// +// The Einstein summation convention can be used to compute +// many multi-dimensional, linear algebraic array operations. `einsum` +// provides a succinct way of representing these. +// +// A non-exhaustive list of these operations, +// which can be computed by `einsum`, is shown below along with examples: +// +// * Trace of an array, :py:func:`numpy.trace`. +// * Return a diagonal, :py:func:`numpy.diag`. +// * Array axis summations, :py:func:`numpy.sum`. +// * Transpositions and permutations, :py:func:`numpy.transpose`. +// * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. +// * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. +// * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. +// * Tensor contractions, :py:func:`numpy.tensordot`. +// * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. +// +// The subscripts string is a comma-separated list of subscript labels, +// where each label refers to a dimension of the corresponding operand. +// Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` +// is equivalent to :py:func:`np.inner(a,b) `. If a label +// appears only once, it is not summed, so ``np.einsum('i', a)`` produces a +// view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` +// describes traditional matrix multiplication and is equivalent to +// :py:func:`np.matmul(a,b) `. Repeated subscript labels in one +// operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent +// to :py:func:`np.trace(a) `. +// +// In *implicit mode*, the chosen subscripts are important +// since the axes of the output are reordered alphabetically. This +// means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while +// ``np.einsum('ji', a)`` takes its transpose. Additionally, +// ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, +// ``np.einsum('ij,jh', a, b)`` returns the transpose of the +// multiplication since subscript 'h' precedes subscript 'i'. +// +// In *explicit mode* the output can be directly controlled by +// specifying output subscript labels. This requires the +// identifier '->' as well as the list of output subscript labels. +// This feature increases the flexibility of the function since +// summing can be disabled or forced when required. The call +// ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, +// and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. +// The difference is that `einsum` does not allow broadcasting by default. +// Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the +// order of the output subscript labels and therefore returns matrix +// multiplication, unlike the example above in implicit mode. +// +// To enable and control broadcasting, use an ellipsis. Default +// NumPy-style broadcasting is done by adding an ellipsis +// to the left of each term, like ``np.einsum('...ii->...i', a)``. +// To take the trace along the first and last axes, +// you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix +// product with the left-most indices instead of rightmost, one can do +// ``np.einsum('ij...,jk...->ik...', a, b)``. +// +// When there is only one operand, no axes are summed, and no output +// parameter is provided, a view into the operand is returned instead +// of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` +// produces a view (changed in version 1.10.0). +// +// `einsum` also provides an alternative way to provide the subscripts +// and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. +// If the output shape is not provided in this format `einsum` will be +// calculated in implicit mode, otherwise it will be performed explicitly. +// The examples below have corresponding `einsum` calls with the two +// parameter methods. +// +// .. versionadded:: 1.10.0 +// +// Views returned from einsum are now writeable whenever the input array +// is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now +// have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` +// and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal +// of a 2D array. +// +// .. versionadded:: 1.12.0 +// +// Added the ``optimize`` argument which will optimize the contraction order +// of an einsum expression. For a contraction with three or more operands this +// can greatly increase the computational efficiency at the cost of a larger +// memory footprint during computation. +// +// Typically a 'greedy' algorithm is applied which empirical tests have shown +// returns the optimal path in the majority of cases. In some cases 'optimal' +// will return the superlative path through a more expensive, exhaustive search. +// For iterative calculations it may be advisable to calculate the optimal path +// once and reuse that path by supplying it as an argument. An example is given +// below. +// +// See :py:func:`numpy.einsum_path` for more details. +// +// Examples +// -------- +// >>> a = np.arange(25).reshape(5,5) +// >>> b = np.arange(5) +// >>> c = np.arange(6).reshape(2,3) +// +// Trace of a matrix: +// +// >>> np.einsum('ii', a) +// 60 +// >>> np.einsum(a, [0,0]) +// 60 +// >>> np.trace(a) +// 60 +// +// Extract the diagonal (requires explicit form): +// +// >>> np.einsum('ii->i', a) +// array([ 0, 6, 12, 18, 24]) +// >>> np.einsum(a, [0,0], [0]) +// array([ 0, 6, 12, 18, 24]) +// >>> np.diag(a) +// array([ 0, 6, 12, 18, 24]) +// +// Sum over an axis (requires explicit form): +// +// >>> np.einsum('ij->i', a) +// array([ 10, 35, 60, 85, 110]) +// >>> np.einsum(a, [0,1], [0]) +// array([ 10, 35, 60, 85, 110]) +// >>> np.sum(a, axis=1) +// array([ 10, 35, 60, 85, 110]) +// +// For higher dimensional arrays summing a single axis can be done with ellipsis: +// +// >>> np.einsum('...j->...', a) +// array([ 10, 35, 60, 85, 110]) +// >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) +// array([ 10, 35, 60, 85, 110]) +// +// Compute a matrix transpose, or reorder any number of axes: +// +// >>> np.einsum('ji', c) +// array([[0, 3], +// [1, 4], +// [2, 5]]) +// >>> np.einsum('ij->ji', c) +// array([[0, 3], +// [1, 4], +// [2, 5]]) +// >>> np.einsum(c, [1,0]) +// array([[0, 3], +// [1, 4], +// [2, 5]]) +// >>> np.transpose(c) +// array([[0, 3], +// [1, 4], +// [2, 5]]) +// +// Vector inner products: +// +// >>> np.einsum('i,i', b, b) +// 30 +// >>> np.einsum(b, [0], b, [0]) +// 30 +// >>> np.inner(b,b) +// 30 +// +// Matrix vector multiplication: +// +// >>> np.einsum('ij,j', a, b) +// array([ 30, 80, 130, 180, 230]) +// >>> np.einsum(a, [0,1], b, [1]) +// array([ 30, 80, 130, 180, 230]) +// >>> np.dot(a, b) +// array([ 30, 80, 130, 180, 230]) +// >>> np.einsum('...j,j', a, b) +// array([ 30, 80, 130, 180, 230]) +// +// Broadcasting and scalar multiplication: +// +// >>> np.einsum('..., ...', 3, c) +// array([[ 0, 3, 6], +// [ 9, 12, 15]]) +// >>> np.einsum(',ij', 3, c) +// array([[ 0, 3, 6], +// [ 9, 12, 15]]) +// >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) +// array([[ 0, 3, 6], +// [ 9, 12, 15]]) +// >>> np.multiply(3, c) +// array([[ 0, 3, 6], +// [ 9, 12, 15]]) +// +// Vector outer product: +// +// >>> np.einsum('i,j', np.arange(2)+1, b) +// array([[0, 1, 2, 3, 4], +// [0, 2, 4, 6, 8]]) +// >>> np.einsum(np.arange(2)+1, [0], b, [1]) +// array([[0, 1, 2, 3, 4], +// [0, 2, 4, 6, 8]]) +// >>> np.outer(np.arange(2)+1, b) +// array([[0, 1, 2, 3, 4], +// [0, 2, 4, 6, 8]]) +// +// Tensor contraction: +// +// >>> a = np.arange(60.).reshape(3,4,5) +// >>> b = np.arange(24.).reshape(4,3,2) +// >>> np.einsum('ijk,jil->kl', a, b) +// array([[4400., 4730.], +// [4532., 4874.], +// [4664., 5018.], +// [4796., 5162.], +// [4928., 5306.]]) +// >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) +// array([[4400., 4730.], +// [4532., 4874.], +// [4664., 5018.], +// [4796., 5162.], +// [4928., 5306.]]) +// >>> np.tensordot(a,b, axes=([1,0],[0,1])) +// array([[4400., 4730.], +// [4532., 4874.], +// [4664., 5018.], +// [4796., 5162.], +// [4928., 5306.]]) +// +// Writeable returned arrays (since version 1.10.0): +// +// >>> a = np.zeros((3, 3)) +// >>> np.einsum('ii->i', a)[:] = 1 +// >>> a +// array([[1., 0., 0.], +// [0., 1., 0.], +// [0., 0., 1.]]) +// +// Example of ellipsis use: +// +// >>> a = np.arange(6).reshape((3,2)) +// >>> b = np.arange(12).reshape((4,3)) +// >>> np.einsum('ki,jk->ij', a, b) +// array([[10, 28, 46, 64], +// [13, 40, 67, 94]]) +// >>> np.einsum('ki,...k->i...', a, b) +// array([[10, 28, 46, 64], +// [13, 40, 67, 94]]) +// >>> np.einsum('k...,jk', a, b) +// array([[10, 28, 46, 64], +// [13, 40, 67, 94]]) +// +// Chained array operations. For more complicated contractions, speed ups +// might be achieved by repeatedly computing a 'greedy' path or pre-computing the +// 'optimal' path and repeatedly applying it, using an +// `einsum_path` insertion (since version 1.12.0). Performance improvements can be +// particularly significant with larger arrays: +// +// >>> a = np.ones(64).reshape(2,4,8) +// +// Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) +// +// >>> for iteration in range(500): +// ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) +// +// Sub-optimal `einsum` (due to repeated path calculation time): ~330ms +// +// >>> for iteration in range(500): +// ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') +// +// Greedy `einsum` (faster optimal path approximation): ~160ms +// +// >>> for iteration in range(500): +// ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') +// +// Optimal `einsum` (best usage pattern in some use cases): ~110ms +// +// >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] +// >>> for iteration in range(500): +// ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) +// +// +// +//go:linkname Einsum py.einsum +func Einsum(__llgo_va_list ...interface{}) *py.Object +// +// einsum_path(subscripts, *operands, optimize='greedy') +// +// Evaluates the lowest cost contraction order for an einsum expression by +// considering the creation of intermediate arrays. +// +// Parameters +// ---------- +// subscripts : str +// Specifies the subscripts for summation. +// *operands : list of array_like +// These are the arrays for the operation. +// optimize : {bool, list, tuple, 'greedy', 'optimal'} +// Choose the type of path. If a tuple is provided, the second argument is +// assumed to be the maximum intermediate size created. If only a single +// argument is provided the largest input or output array size is used +// as a maximum intermediate size. +// +// * if a list is given that starts with ``einsum_path``, uses this as the +// contraction path +// * if False no optimization is taken +// * if True defaults to the 'greedy' algorithm +// * 'optimal' An algorithm that combinatorially explores all possible +// ways of contracting the listed tensors and chooses the least costly +// path. Scales exponentially with the number of terms in the +// contraction. +// * 'greedy' An algorithm that chooses the best pair contraction +// at each step. Effectively, this algorithm searches the largest inner, +// Hadamard, and then outer products at each step. Scales cubically with +// the number of terms in the contraction. Equivalent to the 'optimal' +// path for most contractions. +// +// Default is 'greedy'. +// +// Returns +// ------- +// path : list of tuples +// A list representation of the einsum path. +// string_repr : str +// A printable representation of the einsum path. +// +// Notes +// ----- +// The resulting path indicates which terms of the input contraction should be +// contracted first, the result of this contraction is then appended to the +// end of the contraction list. This list can then be iterated over until all +// intermediate contractions are complete. +// +// See Also +// -------- +// einsum, linalg.multi_dot +// +// Examples +// -------- +// +// We can begin with a chain dot example. In this case, it is optimal to +// contract the ``b`` and ``c`` tensors first as represented by the first +// element of the path ``(1, 2)``. The resulting tensor is added to the end +// of the contraction and the remaining contraction ``(0, 1)`` is then +// completed. +// +// >>> np.random.seed(123) +// >>> a = np.random.rand(2, 2) +// >>> b = np.random.rand(2, 5) +// >>> c = np.random.rand(5, 2) +// >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') +// >>> print(path_info[0]) +// ['einsum_path', (1, 2), (0, 1)] +// >>> print(path_info[1]) +// Complete contraction: ij,jk,kl->il # may vary +// Naive scaling: 4 +// Optimized scaling: 3 +// Naive FLOP count: 1.600e+02 +// Optimized FLOP count: 5.600e+01 +// Theoretical speedup: 2.857 +// Largest intermediate: 4.000e+00 elements +// ------------------------------------------------------------------------- +// scaling current remaining +// ------------------------------------------------------------------------- +// 3 kl,jk->jl ij,jl->il +// 3 jl,ij->il il->il +// +// +// A more complex index transformation example. +// +// >>> I = np.random.rand(10, 10, 10, 10) +// >>> C = np.random.rand(10, 10) +// >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, +// ... optimize='greedy') +// +// >>> print(path_info[0]) +// ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] +// >>> print(path_info[1]) +// Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary +// Naive scaling: 8 +// Optimized scaling: 5 +// Naive FLOP count: 8.000e+08 +// Optimized FLOP count: 8.000e+05 +// Theoretical speedup: 1000.000 +// Largest intermediate: 1.000e+04 elements +// -------------------------------------------------------------------------- +// scaling current remaining +// -------------------------------------------------------------------------- +// 5 abcd,ea->bcde fb,gc,hd,bcde->efgh +// 5 bcde,fb->cdef gc,hd,cdef->efgh +// 5 cdef,gc->defg hd,defg->efgh +// 5 defg,hd->efgh efgh->efgh +// +// +//go:linkname EinsumPath py.einsum_path +func EinsumPath(__llgo_va_list ...interface{}) *py.Object +// +// Check for a complex type or an array of complex numbers. +// +// The type of the input is checked, not the value. Even if the input +// has an imaginary part equal to zero, `iscomplexobj` evaluates to True. +// +// Parameters +// ---------- +// x : any +// The input can be of any type and shape. +// +// Returns +// ------- +// iscomplexobj : bool +// The return value, True if `x` is of a complex type or has at least +// one complex element. +// +// See Also +// -------- +// isrealobj, iscomplex +// +// Examples +// -------- +// >>> np.iscomplexobj(1) +// False +// >>> np.iscomplexobj(1+0j) +// True +// >>> np.iscomplexobj([3, 1+0j, True]) +// True +// +// +// +//go:linkname Iscomplexobj py.iscomplexobj +func Iscomplexobj(x *py.Object) *py.Object +// +// Return True if x is a not complex type or an array of complex numbers. +// +// The type of the input is checked, not the value. So even if the input +// has an imaginary part equal to zero, `isrealobj` evaluates to False +// if the data type is complex. +// +// Parameters +// ---------- +// x : any +// The input can be of any type and shape. +// +// Returns +// ------- +// y : bool +// The return value, False if `x` is of a complex type. +// +// See Also +// -------- +// iscomplexobj, isreal +// +// Notes +// ----- +// The function is only meant for arrays with numerical values but it +// accepts all other objects. Since it assumes array input, the return +// value of other objects may be True. +// +// >>> np.isrealobj('A string') +// True +// >>> np.isrealobj(False) +// True +// >>> np.isrealobj(None) +// True +// +// Examples +// -------- +// >>> np.isrealobj(1) +// True +// >>> np.isrealobj(1+0j) +// False +// >>> np.isrealobj([3, 1+0j, True]) +// False +// +// +// +//go:linkname Isrealobj py.isrealobj +func Isrealobj(x *py.Object) *py.Object +// +// Return the imaginary part of the complex argument. +// +// Parameters +// ---------- +// val : array_like +// Input array. +// +// Returns +// ------- +// out : ndarray or scalar +// The imaginary component of the complex argument. If `val` is real, +// the type of `val` is used for the output. If `val` has complex +// elements, the returned type is float. +// +// See Also +// -------- +// real, angle, real_if_close +// +// Examples +// -------- +// >>> a = np.array([1+2j, 3+4j, 5+6j]) +// >>> a.imag +// array([2., 4., 6.]) +// >>> a.imag = np.array([8, 10, 12]) +// >>> a +// array([1. +8.j, 3.+10.j, 5.+12.j]) +// >>> np.imag(1 + 1j) +// 1.0 +// +// +// +//go:linkname Imag py.imag +func Imag(val *py.Object) *py.Object +// +// Returns a bool array, where True if input element is complex. +// +// What is tested is whether the input has a non-zero imaginary part, not if +// the input type is complex. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// +// Returns +// ------- +// out : ndarray of bools +// Output array. +// +// See Also +// -------- +// isreal +// iscomplexobj : Return True if x is a complex type or an array of complex +// numbers. +// +// Examples +// -------- +// >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) +// array([ True, False, False, False, False, True]) +// +// +// +//go:linkname Iscomplex py.iscomplex +func Iscomplex(x *py.Object) *py.Object +// +// Returns a bool array, where True if input element is real. +// +// If element has complex type with zero complex part, the return value +// for that element is True. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// +// Returns +// ------- +// out : ndarray, bool +// Boolean array of same shape as `x`. +// +// Notes +// ----- +// `isreal` may behave unexpectedly for string or object arrays (see examples) +// +// See Also +// -------- +// iscomplex +// isrealobj : Return True if x is not a complex type. +// +// Examples +// -------- +// >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) +// >>> np.isreal(a) +// array([False, True, True, True, True, False]) +// +// The function does not work on string arrays. +// +// >>> a = np.array([2j, "a"], dtype="U") +// >>> np.isreal(a) # Warns about non-elementwise comparison +// False +// +// Returns True for all elements in input array of ``dtype=object`` even if +// any of the elements is complex. +// +// >>> a = np.array([1, "2", 3+4j], dtype=object) +// >>> np.isreal(a) +// array([ True, True, True]) +// +// isreal should not be used with object arrays +// +// >>> a = np.array([1+2j, 2+1j], dtype=object) +// >>> np.isreal(a) +// array([ True, True]) +// +// +// +//go:linkname Isreal py.isreal +func Isreal(x *py.Object) *py.Object +// +// Replace NaN with zero and infinity with large finite numbers (default +// behaviour) or with the numbers defined by the user using the `nan`, +// `posinf` and/or `neginf` keywords. +// +// If `x` is inexact, NaN is replaced by zero or by the user defined value in +// `nan` keyword, infinity is replaced by the largest finite floating point +// values representable by ``x.dtype`` or by the user defined value in +// `posinf` keyword and -infinity is replaced by the most negative finite +// floating point values representable by ``x.dtype`` or by the user defined +// value in `neginf` keyword. +// +// For complex dtypes, the above is applied to each of the real and +// imaginary components of `x` separately. +// +// If `x` is not inexact, then no replacements are made. +// +// Parameters +// ---------- +// x : scalar or array_like +// Input data. +// copy : bool, optional +// Whether to create a copy of `x` (True) or to replace values +// in-place (False). The in-place operation only occurs if +// casting to an array does not require a copy. +// Default is True. +// +// .. versionadded:: 1.13 +// nan : int, float, optional +// Value to be used to fill NaN values. If no value is passed +// then NaN values will be replaced with 0.0. +// +// .. versionadded:: 1.17 +// posinf : int, float, optional +// Value to be used to fill positive infinity values. If no value is +// passed then positive infinity values will be replaced with a very +// large number. +// +// .. versionadded:: 1.17 +// neginf : int, float, optional +// Value to be used to fill negative infinity values. If no value is +// passed then negative infinity values will be replaced with a very +// small (or negative) number. +// +// .. versionadded:: 1.17 +// +// +// +// Returns +// ------- +// out : ndarray +// `x`, with the non-finite values replaced. If `copy` is False, this may +// be `x` itself. +// +// See Also +// -------- +// isinf : Shows which elements are positive or negative infinity. +// isneginf : Shows which elements are negative infinity. +// isposinf : Shows which elements are positive infinity. +// isnan : Shows which elements are Not a Number (NaN). +// isfinite : Shows which elements are finite (not NaN, not infinity) +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). This means that Not a Number is not equivalent to infinity. +// +// Examples +// -------- +// >>> np.nan_to_num(np.inf) +// 1.7976931348623157e+308 +// >>> np.nan_to_num(-np.inf) +// -1.7976931348623157e+308 +// >>> np.nan_to_num(np.nan) +// 0.0 +// >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) +// >>> np.nan_to_num(x) +// array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary +// -1.28000000e+002, 1.28000000e+002]) +// >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) +// array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, +// -1.2800000e+02, 1.2800000e+02]) +// >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) +// array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary +// -1.28000000e+002, 1.28000000e+002]) +// >>> np.nan_to_num(y) +// array([ 1.79769313e+308 +0.00000000e+000j, # may vary +// 0.00000000e+000 +0.00000000e+000j, +// 0.00000000e+000 +1.79769313e+308j]) +// >>> np.nan_to_num(y, nan=111111, posinf=222222) +// array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) +// +// +//go:linkname NanToNum py.nan_to_num +func NanToNum(x *py.Object, copy *py.Object, nan *py.Object, posinf *py.Object, neginf *py.Object) *py.Object +// +// Return the real part of the complex argument. +// +// Parameters +// ---------- +// val : array_like +// Input array. +// +// Returns +// ------- +// out : ndarray or scalar +// The real component of the complex argument. If `val` is real, the type +// of `val` is used for the output. If `val` has complex elements, the +// returned type is float. +// +// See Also +// -------- +// real_if_close, imag, angle +// +// Examples +// -------- +// >>> a = np.array([1+2j, 3+4j, 5+6j]) +// >>> a.real +// array([1., 3., 5.]) +// >>> a.real = 9 +// >>> a +// array([9.+2.j, 9.+4.j, 9.+6.j]) +// >>> a.real = np.array([9, 8, 7]) +// >>> a +// array([9.+2.j, 8.+4.j, 7.+6.j]) +// >>> np.real(1 + 1j) +// 1.0 +// +// +// +//go:linkname Real py.real +func Real(val *py.Object) *py.Object +// +// If input is complex with all imaginary parts close to zero, return +// real parts. +// +// "Close to zero" is defined as `tol` * (machine epsilon of the type for +// `a`). +// +// Parameters +// ---------- +// a : array_like +// Input array. +// tol : float +// Tolerance in machine epsilons for the complex part of the elements +// in the array. If the tolerance is <=1, then the absolute tolerance +// is used. +// +// Returns +// ------- +// out : ndarray +// If `a` is real, the type of `a` is used for the output. If `a` +// has complex elements, the returned type is float. +// +// See Also +// -------- +// real, imag, angle +// +// Notes +// ----- +// Machine epsilon varies from machine to machine and between data types +// but Python floats on most platforms have a machine epsilon equal to +// 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print +// out the machine epsilon for floats. +// +// Examples +// -------- +// >>> np.finfo(float).eps +// 2.2204460492503131e-16 # may vary +// +// >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) +// array([2.1, 5.2]) +// >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) +// array([2.1+4.e-13j, 5.2 + 3e-15j]) +// +// +// +//go:linkname RealIfClose py.real_if_close +func RealIfClose(a *py.Object, tol *py.Object) *py.Object +// +// Return a description for the given data type code. +// +// Parameters +// ---------- +// char : str +// Data type code. +// +// Returns +// ------- +// out : str +// Description of the input data type code. +// +// See Also +// -------- +// dtype, typecodes +// +// Examples +// -------- +// >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', +// ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] +// >>> for typechar in typechars: +// ... print(typechar, ' : ', np.typename(typechar)) +// ... +// S1 : character +// ? : bool +// B : unsigned char +// D : complex double precision +// G : complex long double precision +// F : complex single precision +// I : unsigned integer +// H : unsigned short +// L : unsigned long integer +// O : object +// Q : unsigned long long integer +// S : string +// U : unicode +// V : void +// b : signed char +// d : double precision +// g : long precision +// f : single precision +// i : integer +// h : short +// l : long integer +// q : long long integer +// +// +// +//go:linkname Typename py.typename +func Typename(char *py.Object) *py.Object +// +// Return an array converted to a float type. +// +// Parameters +// ---------- +// a : array_like +// The input array. +// dtype : str or dtype object, optional +// Float type code to coerce input array `a`. If `dtype` is one of the +// 'int' dtypes, it is replaced with float64. +// +// Returns +// ------- +// out : ndarray +// The input `a` as a float ndarray. +// +// Examples +// -------- +// >>> np.asfarray([2, 3]) +// array([2., 3.]) +// >>> np.asfarray([2, 3], dtype='float') +// array([2., 3.]) +// >>> np.asfarray([2, 3], dtype='int8') +// array([2., 3.]) +// +// +// +//go:linkname Asfarray py.asfarray +func Asfarray(a *py.Object, dtype *py.Object) *py.Object +// +// Return the character for the minimum-size type to which given types can +// be safely cast. +// +// The returned type character must represent the smallest size dtype such +// that an array of the returned type can handle the data from an array of +// all types in `typechars` (or if `typechars` is an array, then its +// dtype.char). +// +// Parameters +// ---------- +// typechars : list of str or array_like +// If a list of strings, each string should represent a dtype. +// If array_like, the character representation of the array dtype is used. +// typeset : str or list of str, optional +// The set of characters that the returned character is chosen from. +// The default set is 'GDFgdf'. +// default : str, optional +// The default character, this is returned if none of the characters in +// `typechars` matches a character in `typeset`. +// +// Returns +// ------- +// typechar : str +// The character representing the minimum-size type that was found. +// +// See Also +// -------- +// dtype, sctype2char, maximum_sctype +// +// Examples +// -------- +// >>> np.mintypecode(['d', 'f', 'S']) +// 'd' +// >>> x = np.array([1.1, 2-3.j]) +// >>> np.mintypecode(x) +// 'D' +// +// >>> np.mintypecode('abceh', default='G') +// 'G' +// +// +// +//go:linkname Mintypecode py.mintypecode +func Mintypecode(typechars *py.Object, typeset *py.Object, default_ *py.Object) *py.Object +// +// Return a scalar type which is common to the input arrays. +// +// The return type will always be an inexact (i.e. floating point) scalar +// type, even if all the arrays are integer arrays. If one of the inputs is +// an integer array, the minimum precision type that is returned is a +// 64-bit floating point dtype. +// +// All input arrays except int64 and uint64 can be safely cast to the +// returned dtype without loss of information. +// +// Parameters +// ---------- +// array1, array2, ... : ndarrays +// Input arrays. +// +// Returns +// ------- +// out : data type code +// Data type code. +// +// See Also +// -------- +// dtype, mintypecode +// +// Examples +// -------- +// >>> np.common_type(np.arange(2, dtype=np.float32)) +// +// >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) +// +// >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) +// +// +// +// +//go:linkname CommonType py.common_type +func CommonType(__llgo_va_list ...interface{}) *py.Object +// +// ravel_multi_index(multi_index, dims, mode='raise', order='C') +// +// Converts a tuple of index arrays into an array of flat +// indices, applying boundary modes to the multi-index. +// +// Parameters +// ---------- +// multi_index : tuple of array_like +// A tuple of integer arrays, one array for each dimension. +// dims : tuple of ints +// The shape of array into which the indices from ``multi_index`` apply. +// mode : {'raise', 'wrap', 'clip'}, optional +// Specifies how out-of-bounds indices are handled. Can specify +// either one mode or a tuple of modes, one mode per index. +// +// * 'raise' -- raise an error (default) +// * 'wrap' -- wrap around +// * 'clip' -- clip to the range +// +// In 'clip' mode, a negative index which would normally +// wrap will clip to 0 instead. +// order : {'C', 'F'}, optional +// Determines whether the multi-index should be viewed as +// indexing in row-major (C-style) or column-major +// (Fortran-style) order. +// +// Returns +// ------- +// raveled_indices : ndarray +// An array of indices into the flattened version of an array +// of dimensions ``dims``. +// +// See Also +// -------- +// unravel_index +// +// Notes +// ----- +// .. versionadded:: 1.6.0 +// +// Examples +// -------- +// >>> arr = np.array([[3,6,6],[4,5,1]]) +// >>> np.ravel_multi_index(arr, (7,6)) +// array([22, 41, 37]) +// >>> np.ravel_multi_index(arr, (7,6), order='F') +// array([31, 41, 13]) +// >>> np.ravel_multi_index(arr, (4,6), mode='clip') +// array([22, 23, 19]) +// >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) +// array([12, 13, 13]) +// +// >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) +// 1621 +// +// +//go:linkname RavelMultiIndex py.ravel_multi_index +func RavelMultiIndex(multiIndex *py.Object, dims *py.Object, mode *py.Object, order *py.Object) *py.Object +// +// unravel_index(indices, shape, order='C') +// +// Converts a flat index or array of flat indices into a tuple +// of coordinate arrays. +// +// Parameters +// ---------- +// indices : array_like +// An integer array whose elements are indices into the flattened +// version of an array of dimensions ``shape``. Before version 1.6.0, +// this function accepted just one index value. +// shape : tuple of ints +// The shape of the array to use for unraveling ``indices``. +// +// .. versionchanged:: 1.16.0 +// Renamed from ``dims`` to ``shape``. +// +// order : {'C', 'F'}, optional +// Determines whether the indices should be viewed as indexing in +// row-major (C-style) or column-major (Fortran-style) order. +// +// .. versionadded:: 1.6.0 +// +// Returns +// ------- +// unraveled_coords : tuple of ndarray +// Each array in the tuple has the same shape as the ``indices`` +// array. +// +// See Also +// -------- +// ravel_multi_index +// +// Examples +// -------- +// >>> np.unravel_index([22, 41, 37], (7,6)) +// (array([3, 6, 6]), array([4, 5, 1])) +// >>> np.unravel_index([31, 41, 13], (7,6), order='F') +// (array([3, 6, 6]), array([4, 5, 1])) +// +// >>> np.unravel_index(1621, (6,7,8,9)) +// (3, 1, 4, 1) +// +// +// +//go:linkname UnravelIndex py.unravel_index +func UnravelIndex(indices *py.Object, shape *py.Object, order *py.Object) *py.Object +// +// Construct an open mesh from multiple sequences. +// +// This function takes N 1-D sequences and returns N outputs with N +// dimensions each, such that the shape is 1 in all but one dimension +// and the dimension with the non-unit shape value cycles through all +// N dimensions. +// +// Using `ix_` one can quickly construct index arrays that will index +// the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array +// ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. +// +// Parameters +// ---------- +// args : 1-D sequences +// Each sequence should be of integer or boolean type. +// Boolean sequences will be interpreted as boolean masks for the +// corresponding dimension (equivalent to passing in +// ``np.nonzero(boolean_sequence)``). +// +// Returns +// ------- +// out : tuple of ndarrays +// N arrays with N dimensions each, with N the number of input +// sequences. Together these arrays form an open mesh. +// +// See Also +// -------- +// ogrid, mgrid, meshgrid +// +// Examples +// -------- +// >>> a = np.arange(10).reshape(2, 5) +// >>> a +// array([[0, 1, 2, 3, 4], +// [5, 6, 7, 8, 9]]) +// >>> ixgrid = np.ix_([0, 1], [2, 4]) +// >>> ixgrid +// (array([[0], +// [1]]), array([[2, 4]])) +// >>> ixgrid[0].shape, ixgrid[1].shape +// ((2, 1), (1, 2)) +// >>> a[ixgrid] +// array([[2, 4], +// [7, 9]]) +// +// >>> ixgrid = np.ix_([True, True], [2, 4]) +// >>> a[ixgrid] +// array([[2, 4], +// [7, 9]]) +// >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) +// >>> a[ixgrid] +// array([[2, 4], +// [7, 9]]) +// +// +// +//go:linkname Ix_ py.ix_ +func Ix_(__llgo_va_list ...interface{}) *py.Object +// Fill the main diagonal of the given array of any dimensionality. +// +// For an array `a` with ``a.ndim >= 2``, the diagonal is the list of +// locations with indices ``a[i, ..., i]`` all identical. This function +// modifies the input array in-place, it does not return a value. +// +// Parameters +// ---------- +// a : array, at least 2-D. +// Array whose diagonal is to be filled, it gets modified in-place. +// +// val : scalar or array_like +// Value(s) to write on the diagonal. If `val` is scalar, the value is +// written along the diagonal. If array-like, the flattened `val` is +// written along the diagonal, repeating if necessary to fill all +// diagonal entries. +// +// wrap : bool +// For tall matrices in NumPy version up to 1.6.2, the +// diagonal "wrapped" after N columns. You can have this behavior +// with this option. This affects only tall matrices. +// +// See also +// -------- +// diag_indices, diag_indices_from +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// This functionality can be obtained via `diag_indices`, but internally +// this version uses a much faster implementation that never constructs the +// indices and uses simple slicing. +// +// Examples +// -------- +// >>> a = np.zeros((3, 3), int) +// >>> np.fill_diagonal(a, 5) +// >>> a +// array([[5, 0, 0], +// [0, 5, 0], +// [0, 0, 5]]) +// +// The same function can operate on a 4-D array: +// +// >>> a = np.zeros((3, 3, 3, 3), int) +// >>> np.fill_diagonal(a, 4) +// +// We only show a few blocks for clarity: +// +// >>> a[0, 0] +// array([[4, 0, 0], +// [0, 0, 0], +// [0, 0, 0]]) +// >>> a[1, 1] +// array([[0, 0, 0], +// [0, 4, 0], +// [0, 0, 0]]) +// >>> a[2, 2] +// array([[0, 0, 0], +// [0, 0, 0], +// [0, 0, 4]]) +// +// The wrap option affects only tall matrices: +// +// >>> # tall matrices no wrap +// >>> a = np.zeros((5, 3), int) +// >>> np.fill_diagonal(a, 4) +// >>> a +// array([[4, 0, 0], +// [0, 4, 0], +// [0, 0, 4], +// [0, 0, 0], +// [0, 0, 0]]) +// +// >>> # tall matrices wrap +// >>> a = np.zeros((5, 3), int) +// >>> np.fill_diagonal(a, 4, wrap=True) +// >>> a +// array([[4, 0, 0], +// [0, 4, 0], +// [0, 0, 4], +// [0, 0, 0], +// [4, 0, 0]]) +// +// >>> # wide matrices +// >>> a = np.zeros((3, 5), int) +// >>> np.fill_diagonal(a, 4, wrap=True) +// >>> a +// array([[4, 0, 0, 0, 0], +// [0, 4, 0, 0, 0], +// [0, 0, 4, 0, 0]]) +// +// The anti-diagonal can be filled by reversing the order of elements +// using either `numpy.flipud` or `numpy.fliplr`. +// +// >>> a = np.zeros((3, 3), int); +// >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip +// >>> a +// array([[0, 0, 1], +// [0, 2, 0], +// [3, 0, 0]]) +// >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip +// >>> a +// array([[0, 0, 3], +// [0, 2, 0], +// [1, 0, 0]]) +// +// Note that the order in which the diagonal is filled varies depending +// on the flip function. +// +// +//go:linkname FillDiagonal py.fill_diagonal +func FillDiagonal(a *py.Object, val *py.Object, wrap *py.Object) *py.Object +// +// Return the indices to access the main diagonal of an array. +// +// This returns a tuple of indices that can be used to access the main +// diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape +// (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for +// ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` +// for ``i = [0..n-1]``. +// +// Parameters +// ---------- +// n : int +// The size, along each dimension, of the arrays for which the returned +// indices can be used. +// +// ndim : int, optional +// The number of dimensions. +// +// See Also +// -------- +// diag_indices_from +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// Create a set of indices to access the diagonal of a (4, 4) array: +// +// >>> di = np.diag_indices(4) +// >>> di +// (array([0, 1, 2, 3]), array([0, 1, 2, 3])) +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// >>> a[di] = 100 +// >>> a +// array([[100, 1, 2, 3], +// [ 4, 100, 6, 7], +// [ 8, 9, 100, 11], +// [ 12, 13, 14, 100]]) +// +// Now, we create indices to manipulate a 3-D array: +// +// >>> d3 = np.diag_indices(2, 3) +// >>> d3 +// (array([0, 1]), array([0, 1]), array([0, 1])) +// +// And use it to set the diagonal of an array of zeros to 1: +// +// >>> a = np.zeros((2, 2, 2), dtype=int) +// >>> a[d3] = 1 +// >>> a +// array([[[1, 0], +// [0, 0]], +// [[0, 0], +// [0, 1]]]) +// +// +// +//go:linkname DiagIndices py.diag_indices +func DiagIndices(n *py.Object, ndim *py.Object) *py.Object +// +// Return the indices to access the main diagonal of an n-dimensional array. +// +// See `diag_indices` for full details. +// +// Parameters +// ---------- +// arr : array, at least 2-D +// +// See Also +// -------- +// diag_indices +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// +// Create a 4 by 4 array. +// +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// +// Get the indices of the diagonal elements. +// +// >>> di = np.diag_indices_from(a) +// >>> di +// (array([0, 1, 2, 3]), array([0, 1, 2, 3])) +// +// >>> a[di] +// array([ 0, 5, 10, 15]) +// +// This is simply syntactic sugar for diag_indices. +// +// >>> np.diag_indices(a.shape[0]) +// (array([0, 1, 2, 3]), array([0, 1, 2, 3])) +// +// +// +//go:linkname DiagIndicesFrom py.diag_indices_from +func DiagIndicesFrom(arr *py.Object) *py.Object +// +// Return an array drawn from elements in choicelist, depending on conditions. +// +// Parameters +// ---------- +// condlist : list of bool ndarrays +// The list of conditions which determine from which array in `choicelist` +// the output elements are taken. When multiple conditions are satisfied, +// the first one encountered in `condlist` is used. +// choicelist : list of ndarrays +// The list of arrays from which the output elements are taken. It has +// to be of the same length as `condlist`. +// default : scalar, optional +// The element inserted in `output` when all conditions evaluate to False. +// +// Returns +// ------- +// output : ndarray +// The output at position m is the m-th element of the array in +// `choicelist` where the m-th element of the corresponding array in +// `condlist` is True. +// +// See Also +// -------- +// where : Return elements from one of two arrays depending on condition. +// take, choose, compress, diag, diagonal +// +// Examples +// -------- +// >>> x = np.arange(6) +// >>> condlist = [x<3, x>3] +// >>> choicelist = [x, x**2] +// >>> np.select(condlist, choicelist, 42) +// array([ 0, 1, 2, 42, 16, 25]) +// +// >>> condlist = [x<=4, x>3] +// >>> choicelist = [x, x**2] +// >>> np.select(condlist, choicelist, 55) +// array([ 0, 1, 2, 3, 4, 25]) +// +// +// +//go:linkname Select py.select +func Select(condlist *py.Object, choicelist *py.Object, default_ *py.Object) *py.Object +// +// Evaluate a piecewise-defined function. +// +// Given a set of conditions and corresponding functions, evaluate each +// function on the input data wherever its condition is true. +// +// Parameters +// ---------- +// x : ndarray or scalar +// The input domain. +// condlist : list of bool arrays or bool scalars +// Each boolean array corresponds to a function in `funclist`. Wherever +// `condlist[i]` is True, `funclist[i](x)` is used as the output value. +// +// Each boolean array in `condlist` selects a piece of `x`, +// and should therefore be of the same shape as `x`. +// +// The length of `condlist` must correspond to that of `funclist`. +// If one extra function is given, i.e. if +// ``len(funclist) == len(condlist) + 1``, then that extra function +// is the default value, used wherever all conditions are false. +// funclist : list of callables, f(x,*args,**kw), or scalars +// Each function is evaluated over `x` wherever its corresponding +// condition is True. It should take a 1d array as input and give an 1d +// array or a scalar value as output. If, instead of a callable, +// a scalar is provided then a constant function (``lambda x: scalar``) is +// assumed. +// args : tuple, optional +// Any further arguments given to `piecewise` are passed to the functions +// upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then +// each function is called as ``f(x, 1, 'a')``. +// kw : dict, optional +// Keyword arguments used in calling `piecewise` are passed to the +// functions upon execution, i.e., if called +// ``piecewise(..., ..., alpha=1)``, then each function is called as +// ``f(x, alpha=1)``. +// +// Returns +// ------- +// out : ndarray +// The output is the same shape and type as x and is found by +// calling the functions in `funclist` on the appropriate portions of `x`, +// as defined by the boolean arrays in `condlist`. Portions not covered +// by any condition have a default value of 0. +// +// +// See Also +// -------- +// choose, select, where +// +// Notes +// ----- +// This is similar to choose or select, except that functions are +// evaluated on elements of `x` that satisfy the corresponding condition from +// `condlist`. +// +// The result is:: +// +// |-- +// |funclist[0](x[condlist[0]]) +// out = |funclist[1](x[condlist[1]]) +// |... +// |funclist[n2](x[condlist[n2]]) +// |-- +// +// Examples +// -------- +// Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. +// +// >>> x = np.linspace(-2.5, 2.5, 6) +// >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) +// array([-1., -1., -1., 1., 1., 1.]) +// +// Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for +// ``x >= 0``. +// +// >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) +// array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) +// +// Apply the same function to a scalar value. +// +// >>> y = -2 +// >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) +// array(2) +// +// +// +//go:linkname Piecewise py.piecewise +func Piecewise(x *py.Object, condlist *py.Object, funclist *py.Object, __llgo_va_list ...interface{}) *py.Object +// +// Trim the leading and/or trailing zeros from a 1-D array or sequence. +// +// Parameters +// ---------- +// filt : 1-D array or sequence +// Input array. +// trim : str, optional +// A string with 'f' representing trim from front and 'b' to trim from +// back. Default is 'fb', trim zeros from both front and back of the +// array. +// +// Returns +// ------- +// trimmed : 1-D array or sequence +// The result of trimming the input. The input data type is preserved. +// +// Examples +// -------- +// >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) +// >>> np.trim_zeros(a) +// array([1, 2, 3, 0, 2, 1]) +// +// >>> np.trim_zeros(a, 'b') +// array([0, 0, 0, ..., 0, 2, 1]) +// +// The input data type is preserved, list/tuple in means list/tuple out. +// +// >>> np.trim_zeros([0, 1, 2, 0]) +// [1, 2] +// +// +// +//go:linkname TrimZeros py.trim_zeros +func TrimZeros(filt *py.Object, trim *py.Object) *py.Object +// +// Return an array copy of the given object. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// order : {'C', 'F', 'A', 'K'}, optional +// Controls the memory layout of the copy. 'C' means C-order, +// 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, +// 'C' otherwise. 'K' means match the layout of `a` as closely +// as possible. (Note that this function and :meth:`ndarray.copy` are very +// similar, but have different default values for their order= +// arguments.) +// subok : bool, optional +// If True, then sub-classes will be passed-through, otherwise the +// returned array will be forced to be a base-class array (defaults to False). +// +// .. versionadded:: 1.19.0 +// +// Returns +// ------- +// arr : ndarray +// Array interpretation of `a`. +// +// See Also +// -------- +// ndarray.copy : Preferred method for creating an array copy +// +// Notes +// ----- +// This is equivalent to: +// +// >>> np.array(a, copy=True) #doctest: +SKIP +// +// Examples +// -------- +// Create an array x, with a reference y and a copy z: +// +// >>> x = np.array([1, 2, 3]) +// >>> y = x +// >>> z = np.copy(x) +// +// Note that, when we modify x, y changes, but not z: +// +// >>> x[0] = 10 +// >>> x[0] == y[0] +// True +// >>> x[0] == z[0] +// False +// +// Note that, np.copy clears previously set WRITEABLE=False flag. +// +// >>> a = np.array([1, 2, 3]) +// >>> a.flags["WRITEABLE"] = False +// >>> b = np.copy(a) +// >>> b.flags["WRITEABLE"] +// True +// >>> b[0] = 3 +// >>> b +// array([3, 2, 3]) +// +// Note that np.copy is a shallow copy and will not copy object +// elements within arrays. This is mainly important for arrays +// containing Python objects. The new array will contain the +// same object which may lead to surprises if that object can +// be modified (is mutable): +// +// >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) +// >>> b = np.copy(a) +// >>> b[2][0] = 10 +// >>> a +// array([1, 'm', list([10, 3, 4])], dtype=object) +// +// To ensure all elements within an ``object`` array are copied, +// use `copy.deepcopy`: +// +// >>> import copy +// >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) +// >>> c = copy.deepcopy(a) +// >>> c[2][0] = 10 +// >>> c +// array([1, 'm', list([10, 3, 4])], dtype=object) +// >>> a +// array([1, 'm', list([2, 3, 4])], dtype=object) +// +// +// +//go:linkname Copy py.copy +func Copy(a *py.Object, order *py.Object, subok *py.Object) *py.Object +// +// Check whether or not an object can be iterated over. +// +// Parameters +// ---------- +// y : object +// Input object. +// +// Returns +// ------- +// b : bool +// Return ``True`` if the object has an iterator method or is a +// sequence and ``False`` otherwise. +// +// +// Examples +// -------- +// >>> np.iterable([1, 2, 3]) +// True +// >>> np.iterable(2) +// False +// +// Notes +// ----- +// In most cases, the results of ``np.iterable(obj)`` are consistent with +// ``isinstance(obj, collections.abc.Iterable)``. One notable exception is +// the treatment of 0-dimensional arrays:: +// +// >>> from collections.abc import Iterable +// >>> a = np.array(1.0) # 0-dimensional numpy array +// >>> isinstance(a, Iterable) +// True +// >>> np.iterable(a) +// False +// +// +// +//go:linkname Iterable py.iterable +func Iterable(y *py.Object) *py.Object +// +// Compute the q-th percentile of the data along the specified axis. +// +// Returns the q-th percentile(s) of the array elements. +// +// Parameters +// ---------- +// a : array_like of real numbers +// Input array or object that can be converted to an array. +// q : array_like of float +// Percentage or sequence of percentages for the percentiles to compute. +// Values must be between 0 and 100 inclusive. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the percentiles are computed. The +// default is to compute the percentile(s) along a flattened +// version of the array. +// +// .. versionchanged:: 1.9.0 +// A tuple of axes is supported +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output, +// but the type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow the input array `a` to be modified by intermediate +// calculations, to save memory. In this case, the contents of the input +// `a` after this function completes is undefined. +// method : str, optional +// This parameter specifies the method to use for estimating the +// percentile. There are many different methods, some unique to NumPy. +// See the notes for explanation. The options sorted by their R type +// as summarized in the H&F paper [1]_ are: +// +// 1. 'inverted_cdf' +// 2. 'averaged_inverted_cdf' +// 3. 'closest_observation' +// 4. 'interpolated_inverted_cdf' +// 5. 'hazen' +// 6. 'weibull' +// 7. 'linear' (default) +// 8. 'median_unbiased' +// 9. 'normal_unbiased' +// +// The first three methods are discontinuous. NumPy further defines the +// following discontinuous variations of the default 'linear' (7.) option: +// +// * 'lower' +// * 'higher', +// * 'midpoint' +// * 'nearest' +// +// .. versionchanged:: 1.22.0 +// This argument was previously called "interpolation" and only +// offered the "linear" default and last four options. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left in +// the result as dimensions with size one. With this option, the +// result will broadcast correctly against the original array `a`. +// +// .. versionadded:: 1.9.0 +// +// interpolation : str, optional +// Deprecated name for the method keyword argument. +// +// .. deprecated:: 1.22.0 +// +// Returns +// ------- +// percentile : scalar or ndarray +// If `q` is a single percentile and `axis=None`, then the result +// is a scalar. If multiple percentiles are given, first axis of +// the result corresponds to the percentiles. The other axes are +// the axes that remain after the reduction of `a`. If the input +// contains integers or floats smaller than ``float64``, the output +// data-type is ``float64``. Otherwise, the output data-type is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// mean +// median : equivalent to ``percentile(..., 50)`` +// nanpercentile +// quantile : equivalent to percentile, except q in the range [0, 1]. +// +// Notes +// ----- +// Given a vector ``V`` of length ``n``, the q-th percentile of ``V`` is +// the value ``q/100`` of the way from the minimum to the maximum in a +// sorted copy of ``V``. The values and distances of the two nearest +// neighbors as well as the `method` parameter will determine the +// percentile if the normalized ranking does not match the location of +// ``q`` exactly. This function is the same as the median if ``q=50``, the +// same as the minimum if ``q=0`` and the same as the maximum if +// ``q=100``. +// +// The optional `method` parameter specifies the method to use when the +// desired percentile lies between two indexes ``i`` and ``j = i + 1``. +// In that case, we first determine ``i + g``, a virtual index that lies +// between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the +// fractional part of the index. The final result is, then, an interpolation +// of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, +// ``i`` and ``j`` are modified using correction constants ``alpha`` and +// ``beta`` whose choices depend on the ``method`` used. Finally, note that +// since Python uses 0-based indexing, the code subtracts another 1 from the +// index internally. +// +// The following formula determines the virtual index ``i + g``, the location +// of the percentile in the sorted sample: +// +// .. math:: +// i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha +// +// The different methods then work as follows +// +// inverted_cdf: +// method 1 of H&F [1]_. +// This method gives discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 ; then take i +// +// averaged_inverted_cdf: +// method 2 of H&F [1]_. +// This method give discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 ; then average between bounds +// +// closest_observation: +// method 3 of H&F [1]_. +// This method give discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 and index is odd ; then take j +// * if g = 0 and index is even ; then take i +// +// interpolated_inverted_cdf: +// method 4 of H&F [1]_. +// This method give continuous results using: +// +// * alpha = 0 +// * beta = 1 +// +// hazen: +// method 5 of H&F [1]_. +// This method give continuous results using: +// +// * alpha = 1/2 +// * beta = 1/2 +// +// weibull: +// method 6 of H&F [1]_. +// This method give continuous results using: +// +// * alpha = 0 +// * beta = 0 +// +// linear: +// method 7 of H&F [1]_. +// This method give continuous results using: +// +// * alpha = 1 +// * beta = 1 +// +// median_unbiased: +// method 8 of H&F [1]_. +// This method is probably the best method if the sample +// distribution function is unknown (see reference). +// This method give continuous results using: +// +// * alpha = 1/3 +// * beta = 1/3 +// +// normal_unbiased: +// method 9 of H&F [1]_. +// This method is probably the best method if the sample +// distribution function is known to be normal. +// This method give continuous results using: +// +// * alpha = 3/8 +// * beta = 3/8 +// +// lower: +// NumPy method kept for backwards compatibility. +// Takes ``i`` as the interpolation point. +// +// higher: +// NumPy method kept for backwards compatibility. +// Takes ``j`` as the interpolation point. +// +// nearest: +// NumPy method kept for backwards compatibility. +// Takes ``i`` or ``j``, whichever is nearest. +// +// midpoint: +// NumPy method kept for backwards compatibility. +// Uses ``(i + j) / 2``. +// +// Examples +// -------- +// >>> a = np.array([[10, 7, 4], [3, 2, 1]]) +// >>> a +// array([[10, 7, 4], +// [ 3, 2, 1]]) +// >>> np.percentile(a, 50) +// 3.5 +// >>> np.percentile(a, 50, axis=0) +// array([6.5, 4.5, 2.5]) +// >>> np.percentile(a, 50, axis=1) +// array([7., 2.]) +// >>> np.percentile(a, 50, axis=1, keepdims=True) +// array([[7.], +// [2.]]) +// +// >>> m = np.percentile(a, 50, axis=0) +// >>> out = np.zeros_like(m) +// >>> np.percentile(a, 50, axis=0, out=out) +// array([6.5, 4.5, 2.5]) +// >>> m +// array([6.5, 4.5, 2.5]) +// +// >>> b = a.copy() +// >>> np.percentile(b, 50, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a == b) +// +// The different methods can be visualized graphically: +// +// .. plot:: +// +// import matplotlib.pyplot as plt +// +// a = np.arange(4) +// p = np.linspace(0, 100, 6001) +// ax = plt.gca() +// lines = [ +// ('linear', '-', 'C0'), +// ('inverted_cdf', ':', 'C1'), +// # Almost the same as `inverted_cdf`: +// ('averaged_inverted_cdf', '-.', 'C1'), +// ('closest_observation', ':', 'C2'), +// ('interpolated_inverted_cdf', '--', 'C1'), +// ('hazen', '--', 'C3'), +// ('weibull', '-.', 'C4'), +// ('median_unbiased', '--', 'C5'), +// ('normal_unbiased', '-.', 'C6'), +// ] +// for method, style, color in lines: +// ax.plot( +// p, np.percentile(a, p, method=method), +// label=method, linestyle=style, color=color) +// ax.set( +// title='Percentiles for different methods and data: ' + str(a), +// xlabel='Percentile', +// ylabel='Estimated percentile value', +// yticks=a) +// ax.legend(bbox_to_anchor=(1.03, 1)) +// plt.tight_layout() +// plt.show() +// +// References +// ---------- +// .. [1] R. J. Hyndman and Y. Fan, +// "Sample quantiles in statistical packages," +// The American Statistician, 50(4), pp. 361-365, 1996 +// +// +// +//go:linkname Percentile py.percentile +func Percentile(a *py.Object, q *py.Object, axis *py.Object, out *py.Object, overwriteInput *py.Object, method *py.Object, keepdims *py.Object) *py.Object +// +// Calculate the n-th discrete difference along the given axis. +// +// The first difference is given by ``out[i] = a[i+1] - a[i]`` along +// the given axis, higher differences are calculated by using `diff` +// recursively. +// +// Parameters +// ---------- +// a : array_like +// Input array +// n : int, optional +// The number of times values are differenced. If zero, the input +// is returned as-is. +// axis : int, optional +// The axis along which the difference is taken, default is the +// last axis. +// prepend, append : array_like, optional +// Values to prepend or append to `a` along axis prior to +// performing the difference. Scalar values are expanded to +// arrays with length 1 in the direction of axis and the shape +// of the input array in along all other axes. Otherwise the +// dimension and shape must match `a` except along axis. +// +// .. versionadded:: 1.16.0 +// +// Returns +// ------- +// diff : ndarray +// The n-th differences. The shape of the output is the same as `a` +// except along `axis` where the dimension is smaller by `n`. The +// type of the output is the same as the type of the difference +// between any two elements of `a`. This is the same as the type of +// `a` in most cases. A notable exception is `datetime64`, which +// results in a `timedelta64` output array. +// +// See Also +// -------- +// gradient, ediff1d, cumsum +// +// Notes +// ----- +// Type is preserved for boolean arrays, so the result will contain +// `False` when consecutive elements are the same and `True` when they +// differ. +// +// For unsigned integer arrays, the results will also be unsigned. This +// should not be surprising, as the result is consistent with +// calculating the difference directly: +// +// >>> u8_arr = np.array([1, 0], dtype=np.uint8) +// >>> np.diff(u8_arr) +// array([255], dtype=uint8) +// >>> u8_arr[1,...] - u8_arr[0,...] +// 255 +// +// If this is not desirable, then the array should be cast to a larger +// integer type first: +// +// >>> i16_arr = u8_arr.astype(np.int16) +// >>> np.diff(i16_arr) +// array([-1], dtype=int16) +// +// Examples +// -------- +// >>> x = np.array([1, 2, 4, 7, 0]) +// >>> np.diff(x) +// array([ 1, 2, 3, -7]) +// >>> np.diff(x, n=2) +// array([ 1, 1, -10]) +// +// >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) +// >>> np.diff(x) +// array([[2, 3, 4], +// [5, 1, 2]]) +// >>> np.diff(x, axis=0) +// array([[-1, 2, 0, -2]]) +// +// >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) +// >>> np.diff(x) +// array([1, 1], dtype='timedelta64[D]') +// +// +// +//go:linkname Diff py.diff +func Diff(a *py.Object, n *py.Object, axis *py.Object, prepend *py.Object, append *py.Object) *py.Object +// +// Return the gradient of an N-dimensional array. +// +// The gradient is computed using second order accurate central differences +// in the interior points and either first or second order accurate one-sides +// (forward or backwards) differences at the boundaries. +// The returned gradient hence has the same shape as the input array. +// +// Parameters +// ---------- +// f : array_like +// An N-dimensional array containing samples of a scalar function. +// varargs : list of scalar or array, optional +// Spacing between f values. Default unitary spacing for all dimensions. +// Spacing can be specified using: +// +// 1. single scalar to specify a sample distance for all dimensions. +// 2. N scalars to specify a constant sample distance for each dimension. +// i.e. `dx`, `dy`, `dz`, ... +// 3. N arrays to specify the coordinates of the values along each +// dimension of F. The length of the array must match the size of +// the corresponding dimension +// 4. Any combination of N scalars/arrays with the meaning of 2. and 3. +// +// If `axis` is given, the number of varargs must equal the number of axes. +// Default: 1. +// +// edge_order : {1, 2}, optional +// Gradient is calculated using N-th order accurate differences +// at the boundaries. Default: 1. +// +// .. versionadded:: 1.9.1 +// +// axis : None or int or tuple of ints, optional +// Gradient is calculated only along the given axis or axes +// The default (axis = None) is to calculate the gradient for all the axes +// of the input array. axis may be negative, in which case it counts from +// the last to the first axis. +// +// .. versionadded:: 1.11.0 +// +// Returns +// ------- +// gradient : ndarray or list of ndarray +// A list of ndarrays (or a single ndarray if there is only one dimension) +// corresponding to the derivatives of f with respect to each dimension. +// Each derivative has the same shape as f. +// +// Examples +// -------- +// >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) +// >>> np.gradient(f) +// array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) +// >>> np.gradient(f, 2) +// array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) +// +// Spacing can be also specified with an array that represents the coordinates +// of the values F along the dimensions. +// For instance a uniform spacing: +// +// >>> x = np.arange(f.size) +// >>> np.gradient(f, x) +// array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) +// +// Or a non uniform one: +// +// >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) +// >>> np.gradient(f, x) +// array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) +// +// For two dimensional arrays, the return will be two arrays ordered by +// axis. In this example the first array stands for the gradient in +// rows and the second one in columns direction: +// +// >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) +// [array([[ 2., 2., -1.], +// [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], +// [1. , 1. , 1. ]])] +// +// In this example the spacing is also specified: +// uniform for axis=0 and non uniform for axis=1 +// +// >>> dx = 2. +// >>> y = [1., 1.5, 3.5] +// >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) +// [array([[ 1. , 1. , -0.5], +// [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], +// [2. , 1.7, 0.5]])] +// +// It is possible to specify how boundaries are treated using `edge_order` +// +// >>> x = np.array([0, 1, 2, 3, 4]) +// >>> f = x**2 +// >>> np.gradient(f, edge_order=1) +// array([1., 2., 4., 6., 7.]) +// >>> np.gradient(f, edge_order=2) +// array([0., 2., 4., 6., 8.]) +// +// The `axis` keyword can be used to specify a subset of axes of which the +// gradient is calculated +// +// >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) +// array([[ 2., 2., -1.], +// [ 2., 2., -1.]]) +// +// Notes +// ----- +// Assuming that :math:`f\in C^{3}` (i.e., :math:`f` has at least 3 continuous +// derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we +// minimize the "consistency error" :math:`\eta_{i}` between the true gradient +// and its estimate from a linear combination of the neighboring grid-points: +// +// .. math:: +// +// \eta_{i} = f_{i}^{\left(1\right)} - +// \left[ \alpha f\left(x_{i}\right) + +// \beta f\left(x_{i} + h_{d}\right) + +// \gamma f\left(x_{i}-h_{s}\right) +// \right] +// +// By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` +// with their Taylor series expansion, this translates into solving +// the following the linear system: +// +// .. math:: +// +// \left\{ +// \begin{array}{r} +// \alpha+\beta+\gamma=0 \\ +// \beta h_{d}-\gamma h_{s}=1 \\ +// \beta h_{d}^{2}+\gamma h_{s}^{2}=0 +// \end{array} +// \right. +// +// The resulting approximation of :math:`f_{i}^{(1)}` is the following: +// +// .. math:: +// +// \hat f_{i}^{(1)} = +// \frac{ +// h_{s}^{2}f\left(x_{i} + h_{d}\right) +// + \left(h_{d}^{2} - h_{s}^{2}\right)f\left(x_{i}\right) +// - h_{d}^{2}f\left(x_{i}-h_{s}\right)} +// { h_{s}h_{d}\left(h_{d} + h_{s}\right)} +// + \mathcal{O}\left(\frac{h_{d}h_{s}^{2} +// + h_{s}h_{d}^{2}}{h_{d} +// + h_{s}}\right) +// +// It is worth noting that if :math:`h_{s}=h_{d}` +// (i.e., data are evenly spaced) +// we find the standard second order approximation: +// +// .. math:: +// +// \hat f_{i}^{(1)}= +// \frac{f\left(x_{i+1}\right) - f\left(x_{i-1}\right)}{2h} +// + \mathcal{O}\left(h^{2}\right) +// +// With a similar procedure the forward/backward approximations used for +// boundaries can be derived. +// +// References +// ---------- +// .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics +// (Texts in Applied Mathematics). New York: Springer. +// .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations +// in Geophysical Fluid Dynamics. New York: Springer. +// .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on +// Arbitrarily Spaced Grids, +// Mathematics of Computation 51, no. 184 : 699-706. +// `PDF `_. +// +// +//go:linkname Gradient py.gradient +func Gradient(f *py.Object, __llgo_va_list ...interface{}) *py.Object +// +// Return the angle of the complex argument. +// +// Parameters +// ---------- +// z : array_like +// A complex number or sequence of complex numbers. +// deg : bool, optional +// Return angle in degrees if True, radians if False (default). +// +// Returns +// ------- +// angle : ndarray or scalar +// The counterclockwise angle from the positive real axis on the complex +// plane in the range ``(-pi, pi]``, with dtype as numpy.float64. +// +// .. versionchanged:: 1.16.0 +// This function works on subclasses of ndarray like `ma.array`. +// +// See Also +// -------- +// arctan2 +// absolute +// +// Notes +// ----- +// Although the angle of the complex number 0 is undefined, ``numpy.angle(0)`` +// returns the value 0. +// +// Examples +// -------- +// >>> np.angle([1.0, 1.0j, 1+1j]) # in radians +// array([ 0. , 1.57079633, 0.78539816]) # may vary +// >>> np.angle(1+1j, deg=True) # in degrees +// 45.0 +// +// +// +//go:linkname Angle py.angle +func Angle(z *py.Object, deg *py.Object) *py.Object +// +// Unwrap by taking the complement of large deltas with respect to the period. +// +// This unwraps a signal `p` by changing elements which have an absolute +// difference from their predecessor of more than ``max(discont, period/2)`` +// to their `period`-complementary values. +// +// For the default case where `period` is :math:`2\pi` and `discont` is +// :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences +// are never greater than :math:`\pi` by adding :math:`2k\pi` for some +// integer :math:`k`. +// +// Parameters +// ---------- +// p : array_like +// Input array. +// discont : float, optional +// Maximum discontinuity between values, default is ``period/2``. +// Values below ``period/2`` are treated as if they were ``period/2``. +// To have an effect different from the default, `discont` should be +// larger than ``period/2``. +// axis : int, optional +// Axis along which unwrap will operate, default is the last axis. +// period : float, optional +// Size of the range over which the input wraps. By default, it is +// ``2 pi``. +// +// .. versionadded:: 1.21.0 +// +// Returns +// ------- +// out : ndarray +// Output array. +// +// See Also +// -------- +// rad2deg, deg2rad +// +// Notes +// ----- +// If the discontinuity in `p` is smaller than ``period/2``, +// but larger than `discont`, no unwrapping is done because taking +// the complement would only make the discontinuity larger. +// +// Examples +// -------- +// >>> phase = np.linspace(0, np.pi, num=5) +// >>> phase[3:] += np.pi +// >>> phase +// array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary +// >>> np.unwrap(phase) +// array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary +// >>> np.unwrap([0, 1, 2, -1, 0], period=4) +// array([0, 1, 2, 3, 4]) +// >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) +// array([1, 2, 3, 4, 5, 6, 7, 8, 9]) +// >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) +// array([2, 3, 4, 5, 6, 7, 8, 9]) +// >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 +// >>> np.unwrap(phase_deg, period=360) +// array([-180., -140., -100., -60., -20., 20., 60., 100., 140., +// 180., 220., 260., 300., 340., 380., 420., 460., 500., +// 540.]) +// +// +//go:linkname Unwrap py.unwrap +func Unwrap(p *py.Object, discont *py.Object, axis *py.Object) *py.Object +// +// Sort a complex array using the real part first, then the imaginary part. +// +// Parameters +// ---------- +// a : array_like +// Input array +// +// Returns +// ------- +// out : complex ndarray +// Always returns a sorted complex array. +// +// Examples +// -------- +// >>> np.sort_complex([5, 3, 6, 2, 1]) +// array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) +// +// >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) +// array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) +// +// +// +//go:linkname SortComplex py.sort_complex +func SortComplex(a *py.Object) *py.Object +// +// Display a message on a device. +// +// Parameters +// ---------- +// mesg : str +// Message to display. +// device : object +// Device to write message. If None, defaults to ``sys.stdout`` which is +// very similar to ``print``. `device` needs to have ``write()`` and +// ``flush()`` methods. +// linefeed : bool, optional +// Option whether to print a line feed or not. Defaults to True. +// +// Raises +// ------ +// AttributeError +// If `device` does not have a ``write()`` or ``flush()`` method. +// +// Examples +// -------- +// Besides ``sys.stdout``, a file-like object can also be used as it has +// both required methods: +// +// >>> from io import StringIO +// >>> buf = StringIO() +// >>> np.disp(u'"Display" in a file', device=buf) +// >>> buf.getvalue() +// '"Display" in a file\n' +// +// +// +//go:linkname Disp py.disp +func Disp(mesg *py.Object, device *py.Object, linefeed *py.Object) *py.Object +// +// Reverse the order of elements in an array along the given axis. +// +// The shape of the array is preserved, but the elements are reordered. +// +// .. versionadded:: 1.12.0 +// +// Parameters +// ---------- +// m : array_like +// Input array. +// axis : None or int or tuple of ints, optional +// Axis or axes along which to flip over. The default, +// axis=None, will flip over all of the axes of the input array. +// If axis is negative it counts from the last to the first axis. +// +// If axis is a tuple of ints, flipping is performed on all of the axes +// specified in the tuple. +// +// .. versionchanged:: 1.15.0 +// None and tuples of axes are supported +// +// Returns +// ------- +// out : array_like +// A view of `m` with the entries of axis reversed. Since a view is +// returned, this operation is done in constant time. +// +// See Also +// -------- +// flipud : Flip an array vertically (axis=0). +// fliplr : Flip an array horizontally (axis=1). +// +// Notes +// ----- +// flip(m, 0) is equivalent to flipud(m). +// +// flip(m, 1) is equivalent to fliplr(m). +// +// flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. +// +// flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all +// positions. +// +// flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at +// position 0 and position 1. +// +// Examples +// -------- +// >>> A = np.arange(8).reshape((2,2,2)) +// >>> A +// array([[[0, 1], +// [2, 3]], +// [[4, 5], +// [6, 7]]]) +// >>> np.flip(A, 0) +// array([[[4, 5], +// [6, 7]], +// [[0, 1], +// [2, 3]]]) +// >>> np.flip(A, 1) +// array([[[2, 3], +// [0, 1]], +// [[6, 7], +// [4, 5]]]) +// >>> np.flip(A) +// array([[[7, 6], +// [5, 4]], +// [[3, 2], +// [1, 0]]]) +// >>> np.flip(A, (0, 2)) +// array([[[5, 4], +// [7, 6]], +// [[1, 0], +// [3, 2]]]) +// >>> A = np.random.randn(3,4,5) +// >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) +// True +// +// +//go:linkname Flip py.flip +func Flip(m *py.Object, axis *py.Object) *py.Object +// +// Rotate an array by 90 degrees in the plane specified by axes. +// +// Rotation direction is from the first towards the second axis. +// This means for a 2D array with the default `k` and `axes`, the +// rotation will be counterclockwise. +// +// Parameters +// ---------- +// m : array_like +// Array of two or more dimensions. +// k : integer +// Number of times the array is rotated by 90 degrees. +// axes : (2,) array_like +// The array is rotated in the plane defined by the axes. +// Axes must be different. +// +// .. versionadded:: 1.12.0 +// +// Returns +// ------- +// y : ndarray +// A rotated view of `m`. +// +// See Also +// -------- +// flip : Reverse the order of elements in an array along the given axis. +// fliplr : Flip an array horizontally. +// flipud : Flip an array vertically. +// +// Notes +// ----- +// ``rot90(m, k=1, axes=(1,0))`` is the reverse of +// ``rot90(m, k=1, axes=(0,1))`` +// +// ``rot90(m, k=1, axes=(1,0))`` is equivalent to +// ``rot90(m, k=-1, axes=(0,1))`` +// +// Examples +// -------- +// >>> m = np.array([[1,2],[3,4]], int) +// >>> m +// array([[1, 2], +// [3, 4]]) +// >>> np.rot90(m) +// array([[2, 4], +// [1, 3]]) +// >>> np.rot90(m, 2) +// array([[4, 3], +// [2, 1]]) +// >>> m = np.arange(8).reshape((2,2,2)) +// >>> np.rot90(m, 1, (1,2)) +// array([[[1, 3], +// [0, 2]], +// [[5, 7], +// [4, 6]]]) +// +// +// +//go:linkname Rot90 py.rot90 +func Rot90(m *py.Object, k *py.Object, axes *py.Object) *py.Object +// +// Return the elements of an array that satisfy some condition. +// +// This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If +// `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. +// +// Note that `place` does the exact opposite of `extract`. +// +// Parameters +// ---------- +// condition : array_like +// An array whose nonzero or True entries indicate the elements of `arr` +// to extract. +// arr : array_like +// Input array of the same size as `condition`. +// +// Returns +// ------- +// extract : ndarray +// Rank 1 array of values from `arr` where `condition` is True. +// +// See Also +// -------- +// take, put, copyto, compress, place +// +// Examples +// -------- +// >>> arr = np.arange(12).reshape((3, 4)) +// >>> arr +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11]]) +// >>> condition = np.mod(arr, 3)==0 +// >>> condition +// array([[ True, False, False, True], +// [False, False, True, False], +// [False, True, False, False]]) +// >>> np.extract(condition, arr) +// array([0, 3, 6, 9]) +// +// +// If `condition` is boolean: +// +// >>> arr[condition] +// array([0, 3, 6, 9]) +// +// +// +//go:linkname Extract py.extract +func Extract(condition *py.Object, arr *py.Object) *py.Object +// +// Change elements of an array based on conditional and input values. +// +// Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that +// `place` uses the first N elements of `vals`, where N is the number of +// True values in `mask`, while `copyto` uses the elements where `mask` +// is True. +// +// Note that `extract` does the exact opposite of `place`. +// +// Parameters +// ---------- +// arr : ndarray +// Array to put data into. +// mask : array_like +// Boolean mask array. Must have the same size as `a`. +// vals : 1-D sequence +// Values to put into `a`. Only the first N elements are used, where +// N is the number of True values in `mask`. If `vals` is smaller +// than N, it will be repeated, and if elements of `a` are to be masked, +// this sequence must be non-empty. +// +// See Also +// -------- +// copyto, put, take, extract +// +// Examples +// -------- +// >>> arr = np.arange(6).reshape(2, 3) +// >>> np.place(arr, arr>2, [44, 55]) +// >>> arr +// array([[ 0, 1, 2], +// [44, 55, 44]]) +// +// +// +//go:linkname Place py.place +func Place(arr *py.Object, mask *py.Object, vals *py.Object) *py.Object +// Convert the input to an array, checking for NaNs or Infs. +// +// Parameters +// ---------- +// a : array_like +// Input data, in any form that can be converted to an array. This +// includes lists, lists of tuples, tuples, tuples of tuples, tuples +// of lists and ndarrays. Success requires no NaNs or Infs. +// dtype : data-type, optional +// By default, the data-type is inferred from the input data. +// order : {'C', 'F', 'A', 'K'}, optional +// Memory layout. 'A' and 'K' depend on the order of input array a. +// 'C' row-major (C-style), +// 'F' column-major (Fortran-style) memory representation. +// 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise +// 'K' (keep) preserve input order +// Defaults to 'C'. +// +// Returns +// ------- +// out : ndarray +// Array interpretation of `a`. No copy is performed if the input +// is already an ndarray. If `a` is a subclass of ndarray, a base +// class ndarray is returned. +// +// Raises +// ------ +// ValueError +// Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). +// +// See Also +// -------- +// asarray : Create and array. +// asanyarray : Similar function which passes through subclasses. +// ascontiguousarray : Convert input to a contiguous array. +// asfarray : Convert input to a floating point ndarray. +// asfortranarray : Convert input to an ndarray with column-major +// memory order. +// fromiter : Create an array from an iterator. +// fromfunction : Construct an array by executing a function on grid +// positions. +// +// Examples +// -------- +// Convert a list into an array. If all elements are finite +// ``asarray_chkfinite`` is identical to ``asarray``. +// +// >>> a = [1, 2] +// >>> np.asarray_chkfinite(a, dtype=float) +// array([1., 2.]) +// +// Raises ValueError if array_like contains Nans or Infs. +// +// >>> a = [1, 2, np.inf] +// >>> try: +// ... np.asarray_chkfinite(a) +// ... except ValueError: +// ... print('ValueError') +// ... +// ValueError +// +// +// +//go:linkname AsarrayChkfinite py.asarray_chkfinite +func AsarrayChkfinite(a *py.Object, dtype *py.Object, order *py.Object) *py.Object +// +// Compute the weighted average along the specified axis. +// +// Parameters +// ---------- +// a : array_like +// Array containing data to be averaged. If `a` is not an array, a +// conversion is attempted. +// axis : None or int or tuple of ints, optional +// Axis or axes along which to average `a`. The default, +// axis=None, will average over all of the elements of the input array. +// If axis is negative it counts from the last to the first axis. +// +// .. versionadded:: 1.7.0 +// +// If axis is a tuple of ints, averaging is performed on all of the axes +// specified in the tuple instead of a single axis or all the axes as +// before. +// weights : array_like, optional +// An array of weights associated with the values in `a`. Each value in +// `a` contributes to the average according to its associated weight. +// The weights array can either be 1-D (in which case its length must be +// the size of `a` along the given axis) or of the same shape as `a`. +// If `weights=None`, then all data in `a` are assumed to have a +// weight equal to one. The 1-D calculation is:: +// +// avg = sum(a * weights) / sum(weights) +// +// The only constraint on `weights` is that `sum(weights)` must not be 0. +// returned : bool, optional +// Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) +// is returned, otherwise only the average is returned. +// If `weights=None`, `sum_of_weights` is equivalent to the number of +// elements over which the average is taken. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// *Note:* `keepdims` will not work with instances of `numpy.matrix` +// or other classes whose methods do not support `keepdims`. +// +// .. versionadded:: 1.23.0 +// +// Returns +// ------- +// retval, [sum_of_weights] : array_type or double +// Return the average along the specified axis. When `returned` is `True`, +// return a tuple with the average as the first element and the sum +// of the weights as the second element. `sum_of_weights` is of the +// same type as `retval`. The result dtype follows a genereal pattern. +// If `weights` is None, the result dtype will be that of `a` , or ``float64`` +// if `a` is integral. Otherwise, if `weights` is not None and `a` is non- +// integral, the result type will be the type of lowest precision capable of +// representing values of both `a` and `weights`. If `a` happens to be +// integral, the previous rules still applies but the result dtype will +// at least be ``float64``. +// +// Raises +// ------ +// ZeroDivisionError +// When all weights along axis are zero. See `numpy.ma.average` for a +// version robust to this type of error. +// TypeError +// When the length of 1D `weights` is not the same as the shape of `a` +// along axis. +// +// See Also +// -------- +// mean +// +// ma.average : average for masked arrays -- useful if your data contains +// "missing" values +// numpy.result_type : Returns the type that results from applying the +// numpy type promotion rules to the arguments. +// +// Examples +// -------- +// >>> data = np.arange(1, 5) +// >>> data +// array([1, 2, 3, 4]) +// >>> np.average(data) +// 2.5 +// >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) +// 4.0 +// +// >>> data = np.arange(6).reshape((3, 2)) +// >>> data +// array([[0, 1], +// [2, 3], +// [4, 5]]) +// >>> np.average(data, axis=1, weights=[1./4, 3./4]) +// array([0.75, 2.75, 4.75]) +// >>> np.average(data, weights=[1./4, 3./4]) +// Traceback (most recent call last): +// ... +// TypeError: Axis must be specified when shapes of a and weights differ. +// +// >>> a = np.ones(5, dtype=np.float128) +// >>> w = np.ones(5, dtype=np.complex64) +// >>> avg = np.average(a, weights=w) +// >>> print(avg.dtype) +// complex256 +// +// With ``keepdims=True``, the following result has shape (3, 1). +// +// >>> np.average(data, axis=1, keepdims=True) +// array([[0.5], +// [2.5], +// [4.5]]) +// +// +//go:linkname Average py.average +func Average(a *py.Object, axis *py.Object, weights *py.Object, returned *py.Object) *py.Object +// +// bincount(x, /, weights=None, minlength=0) +// +// Count number of occurrences of each value in array of non-negative ints. +// +// The number of bins (of size 1) is one larger than the largest value in +// `x`. If `minlength` is specified, there will be at least this number +// of bins in the output array (though it will be longer if necessary, +// depending on the contents of `x`). +// Each bin gives the number of occurrences of its index value in `x`. +// If `weights` is specified the input array is weighted by it, i.e. if a +// value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead +// of ``out[n] += 1``. +// +// Parameters +// ---------- +// x : array_like, 1 dimension, nonnegative ints +// Input array. +// weights : array_like, optional +// Weights, array of the same shape as `x`. +// minlength : int, optional +// A minimum number of bins for the output array. +// +// .. versionadded:: 1.6.0 +// +// Returns +// ------- +// out : ndarray of ints +// The result of binning the input array. +// The length of `out` is equal to ``np.amax(x)+1``. +// +// Raises +// ------ +// ValueError +// If the input is not 1-dimensional, or contains elements with negative +// values, or if `minlength` is negative. +// TypeError +// If the type of the input is float or complex. +// +// See Also +// -------- +// histogram, digitize, unique +// +// Examples +// -------- +// >>> np.bincount(np.arange(5)) +// array([1, 1, 1, 1, 1]) +// >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) +// array([1, 3, 1, 1, 0, 0, 0, 1]) +// +// >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) +// >>> np.bincount(x).size == np.amax(x)+1 +// True +// +// The input array needs to be of integer dtype, otherwise a +// TypeError is raised: +// +// >>> np.bincount(np.arange(5, dtype=float)) +// Traceback (most recent call last): +// ... +// TypeError: Cannot cast array data from dtype('float64') to dtype('int64') +// according to the rule 'safe' +// +// A possible use of ``bincount`` is to perform sums over +// variable-size chunks of an array, using the ``weights`` keyword. +// +// >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights +// >>> x = np.array([0, 1, 1, 2, 2, 2]) +// >>> np.bincount(x, weights=w) +// array([ 0.3, 0.7, 1.1]) +// +// +// +//go:linkname Bincount py.bincount +func Bincount(x *py.Object, weights *py.Object, minlength *py.Object) *py.Object +// +// Return the indices of the bins to which each value in input array belongs. +// +// ========= ============= ============================ +// `right` order of bins returned index `i` satisfies +// ========= ============= ============================ +// ``False`` increasing ``bins[i-1] <= x < bins[i]`` +// ``True`` increasing ``bins[i-1] < x <= bins[i]`` +// ``False`` decreasing ``bins[i-1] > x >= bins[i]`` +// ``True`` decreasing ``bins[i-1] >= x > bins[i]`` +// ========= ============= ============================ +// +// If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is +// returned as appropriate. +// +// Parameters +// ---------- +// x : array_like +// Input array to be binned. Prior to NumPy 1.10.0, this array had to +// be 1-dimensional, but can now have any shape. +// bins : array_like +// Array of bins. It has to be 1-dimensional and monotonic. +// right : bool, optional +// Indicating whether the intervals include the right or the left bin +// edge. Default behavior is (right==False) indicating that the interval +// does not include the right edge. The left bin end is open in this +// case, i.e., bins[i-1] <= x < bins[i] is the default behavior for +// monotonically increasing bins. +// +// Returns +// ------- +// indices : ndarray of ints +// Output array of indices, of same shape as `x`. +// +// Raises +// ------ +// ValueError +// If `bins` is not monotonic. +// TypeError +// If the type of the input is complex. +// +// See Also +// -------- +// bincount, histogram, unique, searchsorted +// +// Notes +// ----- +// If values in `x` are such that they fall outside the bin range, +// attempting to index `bins` with the indices that `digitize` returns +// will result in an IndexError. +// +// .. versionadded:: 1.10.0 +// +// `np.digitize` is implemented in terms of `np.searchsorted`. This means +// that a binary search is used to bin the values, which scales much better +// for larger number of bins than the previous linear search. It also removes +// the requirement for the input array to be 1-dimensional. +// +// For monotonically _increasing_ `bins`, the following are equivalent:: +// +// np.digitize(x, bins, right=True) +// np.searchsorted(bins, x, side='left') +// +// Note that as the order of the arguments are reversed, the side must be too. +// The `searchsorted` call is marginally faster, as it does not do any +// monotonicity checks. Perhaps more importantly, it supports all dtypes. +// +// Examples +// -------- +// >>> x = np.array([0.2, 6.4, 3.0, 1.6]) +// >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) +// >>> inds = np.digitize(x, bins) +// >>> inds +// array([1, 4, 3, 2]) +// >>> for n in range(x.size): +// ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) +// ... +// 0.0 <= 0.2 < 1.0 +// 4.0 <= 6.4 < 10.0 +// 2.5 <= 3.0 < 4.0 +// 1.0 <= 1.6 < 2.5 +// +// >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) +// >>> bins = np.array([0, 5, 10, 15, 20]) +// >>> np.digitize(x,bins,right=True) +// array([1, 2, 3, 4, 4]) +// >>> np.digitize(x,bins,right=False) +// array([1, 3, 3, 4, 5]) +// +// +//go:linkname Digitize py.digitize +func Digitize(x *py.Object, bins *py.Object, right *py.Object) *py.Object +// +// Estimate a covariance matrix, given data and weights. +// +// Covariance indicates the level to which two variables vary together. +// If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, +// then the covariance matrix element :math:`C_{ij}` is the covariance of +// :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance +// of :math:`x_i`. +// +// See the notes for an outline of the algorithm. +// +// Parameters +// ---------- +// m : array_like +// A 1-D or 2-D array containing multiple variables and observations. +// Each row of `m` represents a variable, and each column a single +// observation of all those variables. Also see `rowvar` below. +// y : array_like, optional +// An additional set of variables and observations. `y` has the same form +// as that of `m`. +// rowvar : bool, optional +// If `rowvar` is True (default), then each row represents a +// variable, with observations in the columns. Otherwise, the relationship +// is transposed: each column represents a variable, while the rows +// contain observations. +// bias : bool, optional +// Default normalization (False) is by ``(N - 1)``, where ``N`` is the +// number of observations given (unbiased estimate). If `bias` is True, +// then normalization is by ``N``. These values can be overridden by using +// the keyword ``ddof`` in numpy versions >= 1.5. +// ddof : int, optional +// If not ``None`` the default value implied by `bias` is overridden. +// Note that ``ddof=1`` will return the unbiased estimate, even if both +// `fweights` and `aweights` are specified, and ``ddof=0`` will return +// the simple average. See the notes for the details. The default value +// is ``None``. +// +// .. versionadded:: 1.5 +// fweights : array_like, int, optional +// 1-D array of integer frequency weights; the number of times each +// observation vector should be repeated. +// +// .. versionadded:: 1.10 +// aweights : array_like, optional +// 1-D array of observation vector weights. These relative weights are +// typically large for observations considered "important" and smaller for +// observations considered less "important". If ``ddof=0`` the array of +// weights can be used to assign probabilities to observation vectors. +// +// .. versionadded:: 1.10 +// dtype : data-type, optional +// Data-type of the result. By default, the return data-type will have +// at least `numpy.float64` precision. +// +// .. versionadded:: 1.20 +// +// Returns +// ------- +// out : ndarray +// The covariance matrix of the variables. +// +// See Also +// -------- +// corrcoef : Normalized covariance matrix +// +// Notes +// ----- +// Assume that the observations are in the columns of the observation +// array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The +// steps to compute the weighted covariance are as follows:: +// +// >>> m = np.arange(10, dtype=np.float64) +// >>> f = np.arange(10) * 2 +// >>> a = np.arange(10) ** 2. +// >>> ddof = 1 +// >>> w = f * a +// >>> v1 = np.sum(w) +// >>> v2 = np.sum(w * a) +// >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 +// >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) +// +// Note that when ``a == 1``, the normalization factor +// ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` +// as it should. +// +// Examples +// -------- +// Consider two variables, :math:`x_0` and :math:`x_1`, which +// correlate perfectly, but in opposite directions: +// +// >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T +// >>> x +// array([[0, 1, 2], +// [2, 1, 0]]) +// +// Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance +// matrix shows this clearly: +// +// >>> np.cov(x) +// array([[ 1., -1.], +// [-1., 1.]]) +// +// Note that element :math:`C_{0,1}`, which shows the correlation between +// :math:`x_0` and :math:`x_1`, is negative. +// +// Further, note how `x` and `y` are combined: +// +// >>> x = [-2.1, -1, 4.3] +// >>> y = [3, 1.1, 0.12] +// >>> X = np.stack((x, y), axis=0) +// >>> np.cov(X) +// array([[11.71 , -4.286 ], # may vary +// [-4.286 , 2.144133]]) +// >>> np.cov(x, y) +// array([[11.71 , -4.286 ], # may vary +// [-4.286 , 2.144133]]) +// >>> np.cov(x) +// array(11.71) +// +// +// +//go:linkname Cov py.cov +func Cov(m *py.Object, y *py.Object, rowvar *py.Object, bias *py.Object, ddof *py.Object, fweights *py.Object, aweights *py.Object) *py.Object +// +// Return Pearson product-moment correlation coefficients. +// +// Please refer to the documentation for `cov` for more detail. The +// relationship between the correlation coefficient matrix, `R`, and the +// covariance matrix, `C`, is +// +// .. math:: R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} C_{jj} } } +// +// The values of `R` are between -1 and 1, inclusive. +// +// Parameters +// ---------- +// x : array_like +// A 1-D or 2-D array containing multiple variables and observations. +// Each row of `x` represents a variable, and each column a single +// observation of all those variables. Also see `rowvar` below. +// y : array_like, optional +// An additional set of variables and observations. `y` has the same +// shape as `x`. +// rowvar : bool, optional +// If `rowvar` is True (default), then each row represents a +// variable, with observations in the columns. Otherwise, the relationship +// is transposed: each column represents a variable, while the rows +// contain observations. +// bias : _NoValue, optional +// Has no effect, do not use. +// +// .. deprecated:: 1.10.0 +// ddof : _NoValue, optional +// Has no effect, do not use. +// +// .. deprecated:: 1.10.0 +// dtype : data-type, optional +// Data-type of the result. By default, the return data-type will have +// at least `numpy.float64` precision. +// +// .. versionadded:: 1.20 +// +// Returns +// ------- +// R : ndarray +// The correlation coefficient matrix of the variables. +// +// See Also +// -------- +// cov : Covariance matrix +// +// Notes +// ----- +// Due to floating point rounding the resulting array may not be Hermitian, +// the diagonal elements may not be 1, and the elements may not satisfy the +// inequality abs(a) <= 1. The real and imaginary parts are clipped to the +// interval [-1, 1] in an attempt to improve on that situation but is not +// much help in the complex case. +// +// This function accepts but discards arguments `bias` and `ddof`. This is +// for backwards compatibility with previous versions of this function. These +// arguments had no effect on the return values of the function and can be +// safely ignored in this and previous versions of numpy. +// +// Examples +// -------- +// In this example we generate two random arrays, ``xarr`` and ``yarr``, and +// compute the row-wise and column-wise Pearson correlation coefficients, +// ``R``. Since ``rowvar`` is true by default, we first find the row-wise +// Pearson correlation coefficients between the variables of ``xarr``. +// +// >>> import numpy as np +// >>> rng = np.random.default_rng(seed=42) +// >>> xarr = rng.random((3, 3)) +// >>> xarr +// array([[0.77395605, 0.43887844, 0.85859792], +// [0.69736803, 0.09417735, 0.97562235], +// [0.7611397 , 0.78606431, 0.12811363]]) +// >>> R1 = np.corrcoef(xarr) +// >>> R1 +// array([[ 1. , 0.99256089, -0.68080986], +// [ 0.99256089, 1. , -0.76492172], +// [-0.68080986, -0.76492172, 1. ]]) +// +// If we add another set of variables and observations ``yarr``, we can +// compute the row-wise Pearson correlation coefficients between the +// variables in ``xarr`` and ``yarr``. +// +// >>> yarr = rng.random((3, 3)) +// >>> yarr +// array([[0.45038594, 0.37079802, 0.92676499], +// [0.64386512, 0.82276161, 0.4434142 ], +// [0.22723872, 0.55458479, 0.06381726]]) +// >>> R2 = np.corrcoef(xarr, yarr) +// >>> R2 +// array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , +// -0.99004057], +// [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, +// -0.99981569], +// [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, +// 0.77714685], +// [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, +// -0.83571711], +// [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , +// 0.97517215], +// [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, +// 1. ]]) +// +// Finally if we use the option ``rowvar=False``, the columns are now +// being treated as the variables and we will find the column-wise Pearson +// correlation coefficients between variables in ``xarr`` and ``yarr``. +// +// >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) +// >>> R3 +// array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , +// 0.22423734], +// [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, +// -0.44069024], +// [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, +// 0.75137473], +// [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, +// 0.47536961], +// [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , +// -0.46666491], +// [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, +// 1. ]]) +// +// +// +//go:linkname Corrcoef py.corrcoef +func Corrcoef(x *py.Object, y *py.Object, rowvar *py.Object, bias *py.Object, ddof *py.Object) *py.Object +// +// Return a copy of an array sorted along the first axis. +// +// .. deprecated:: 1.24 +// +// msort is deprecated, use ``np.sort(a, axis=0)`` instead. +// +// Parameters +// ---------- +// a : array_like +// Array to be sorted. +// +// Returns +// ------- +// sorted_array : ndarray +// Array of the same type and shape as `a`. +// +// See Also +// -------- +// sort +// +// Notes +// ----- +// ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. +// +// Examples +// -------- +// >>> a = np.array([[1, 4], [3, 1]]) +// >>> np.msort(a) # sort along the first axis +// array([[1, 1], +// [3, 4]]) +// +// +// +//go:linkname Msort py.msort +func Msort(a *py.Object) *py.Object +// +// Compute the median along the specified axis. +// +// Returns the median of the array elements. +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array. +// axis : {int, sequence of int, None}, optional +// Axis or axes along which the medians are computed. The default +// is to compute the median along a flattened version of the array. +// A sequence of axes is supported since version 1.9.0. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output, +// but the type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow use of memory of input array `a` for +// calculations. The input array will be modified by the call to +// `median`. This will save memory when you do not need to preserve +// the contents of the input array. Treat the input as undefined, +// but it will probably be fully or partially sorted. Default is +// False. If `overwrite_input` is ``True`` and `a` is not already an +// `ndarray`, an error will be raised. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `arr`. +// +// .. versionadded:: 1.9.0 +// +// Returns +// ------- +// median : ndarray +// A new array holding the result. If the input contains integers +// or floats smaller than ``float64``, then the output data-type is +// ``np.float64``. Otherwise, the data-type of the output is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// mean, percentile +// +// Notes +// ----- +// Given a vector ``V`` of length ``N``, the median of ``V`` is the +// middle value of a sorted copy of ``V``, ``V_sorted`` - i +// e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the +// two middle values of ``V_sorted`` when ``N`` is even. +// +// Examples +// -------- +// >>> a = np.array([[10, 7, 4], [3, 2, 1]]) +// >>> a +// array([[10, 7, 4], +// [ 3, 2, 1]]) +// >>> np.median(a) +// 3.5 +// >>> np.median(a, axis=0) +// array([6.5, 4.5, 2.5]) +// >>> np.median(a, axis=1) +// array([7., 2.]) +// >>> m = np.median(a, axis=0) +// >>> out = np.zeros_like(m) +// >>> np.median(a, axis=0, out=m) +// array([6.5, 4.5, 2.5]) +// >>> m +// array([6.5, 4.5, 2.5]) +// >>> b = a.copy() +// >>> np.median(b, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a==b) +// >>> b = a.copy() +// >>> np.median(b, axis=None, overwrite_input=True) +// 3.5 +// >>> assert not np.all(a==b) +// +// +// +//go:linkname Median py.median +func Median(a *py.Object, axis *py.Object, out *py.Object, overwriteInput *py.Object, keepdims *py.Object) *py.Object +// +// Return the normalized sinc function. +// +// The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument +// :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not +// only everywhere continuous but also infinitely differentiable. +// +// .. note:: +// +// Note the normalization factor of ``pi`` used in the definition. +// This is the most commonly used definition in signal processing. +// Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function +// :math:`\sin(x)/x` that is more common in mathematics. +// +// Parameters +// ---------- +// x : ndarray +// Array (possibly multi-dimensional) of values for which to calculate +// ``sinc(x)``. +// +// Returns +// ------- +// out : ndarray +// ``sinc(x)``, which has the same shape as the input. +// +// Notes +// ----- +// The name sinc is short for "sine cardinal" or "sinus cardinalis". +// +// The sinc function is used in various signal processing applications, +// including in anti-aliasing, in the construction of a Lanczos resampling +// filter, and in interpolation. +// +// For bandlimited interpolation of discrete-time signals, the ideal +// interpolation kernel is proportional to the sinc function. +// +// References +// ---------- +// .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web +// Resource. http://mathworld.wolfram.com/SincFunction.html +// .. [2] Wikipedia, "Sinc function", +// https://en.wikipedia.org/wiki/Sinc_function +// +// Examples +// -------- +// >>> import matplotlib.pyplot as plt +// >>> x = np.linspace(-4, 4, 41) +// >>> np.sinc(x) +// array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary +// -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, +// 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, +// 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, +// -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, +// 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, +// 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, +// 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, +// 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, +// -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, +// -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, +// 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, +// -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, +// -4.92362781e-02, -3.89804309e-17]) +// +// >>> plt.plot(x, np.sinc(x)) +// [] +// >>> plt.title("Sinc Function") +// Text(0.5, 1.0, 'Sinc Function') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("X") +// Text(0.5, 0, 'X') +// >>> plt.show() +// +// +// +//go:linkname Sinc py.sinc +func Sinc(x *py.Object) *py.Object +// +// Return the Hamming window. +// +// The Hamming window is a taper formed by using a weighted cosine. +// +// Parameters +// ---------- +// M : int +// Number of points in the output window. If zero or less, an +// empty array is returned. +// +// Returns +// ------- +// out : ndarray +// The window, with the maximum value normalized to one (the value +// one appears only if the number of samples is odd). +// +// See Also +// -------- +// bartlett, blackman, hanning, kaiser +// +// Notes +// ----- +// The Hamming window is defined as +// +// .. math:: w(n) = 0.54 - 0.46\cos\left(\frac{2\pi{n}}{M-1}\right) +// \qquad 0 \leq n \leq M-1 +// +// The Hamming was named for R. W. Hamming, an associate of J. W. Tukey +// and is described in Blackman and Tukey. It was recommended for +// smoothing the truncated autocovariance function in the time domain. +// Most references to the Hamming window come from the signal processing +// literature, where it is used as one of many windowing functions for +// smoothing values. It is also known as an apodization (which means +// "removing the foot", i.e. smoothing discontinuities at the beginning +// and end of the sampled signal) or tapering function. +// +// References +// ---------- +// .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power +// spectra, Dover Publications, New York. +// .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The +// University of Alberta Press, 1975, pp. 109-110. +// .. [3] Wikipedia, "Window function", +// https://en.wikipedia.org/wiki/Window_function +// .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, +// "Numerical Recipes", Cambridge University Press, 1986, page 425. +// +// Examples +// -------- +// >>> np.hamming(12) +// array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary +// 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, +// 0.15302337, 0.08 ]) +// +// Plot the window and the frequency response: +// +// >>> import matplotlib.pyplot as plt +// >>> from numpy.fft import fft, fftshift +// >>> window = np.hamming(51) +// >>> plt.plot(window) +// [] +// >>> plt.title("Hamming window") +// Text(0.5, 1.0, 'Hamming window') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("Sample") +// Text(0.5, 0, 'Sample') +// >>> plt.show() +// +// >>> plt.figure() +//
+// >>> A = fft(window, 2048) / 25.5 +// >>> mag = np.abs(fftshift(A)) +// >>> freq = np.linspace(-0.5, 0.5, len(A)) +// >>> response = 20 * np.log10(mag) +// >>> response = np.clip(response, -100, 100) +// >>> plt.plot(freq, response) +// [] +// >>> plt.title("Frequency response of Hamming window") +// Text(0.5, 1.0, 'Frequency response of Hamming window') +// >>> plt.ylabel("Magnitude [dB]") +// Text(0, 0.5, 'Magnitude [dB]') +// >>> plt.xlabel("Normalized frequency [cycles per sample]") +// Text(0.5, 0, 'Normalized frequency [cycles per sample]') +// >>> plt.axis('tight') +// ... +// >>> plt.show() +// +// +// +//go:linkname Hamming py.hamming +func Hamming(M *py.Object) *py.Object +// +// Return the Hanning window. +// +// The Hanning window is a taper formed by using a weighted cosine. +// +// Parameters +// ---------- +// M : int +// Number of points in the output window. If zero or less, an +// empty array is returned. +// +// Returns +// ------- +// out : ndarray, shape(M,) +// The window, with the maximum value normalized to one (the value +// one appears only if `M` is odd). +// +// See Also +// -------- +// bartlett, blackman, hamming, kaiser +// +// Notes +// ----- +// The Hanning window is defined as +// +// .. math:: w(n) = 0.5 - 0.5\cos\left(\frac{2\pi{n}}{M-1}\right) +// \qquad 0 \leq n \leq M-1 +// +// The Hanning was named for Julius von Hann, an Austrian meteorologist. +// It is also known as the Cosine Bell. Some authors prefer that it be +// called a Hann window, to help avoid confusion with the very similar +// Hamming window. +// +// Most references to the Hanning window come from the signal processing +// literature, where it is used as one of many windowing functions for +// smoothing values. It is also known as an apodization (which means +// "removing the foot", i.e. smoothing discontinuities at the beginning +// and end of the sampled signal) or tapering function. +// +// References +// ---------- +// .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power +// spectra, Dover Publications, New York. +// .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", +// The University of Alberta Press, 1975, pp. 106-108. +// .. [3] Wikipedia, "Window function", +// https://en.wikipedia.org/wiki/Window_function +// .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, +// "Numerical Recipes", Cambridge University Press, 1986, page 425. +// +// Examples +// -------- +// >>> np.hanning(12) +// array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, +// 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, +// 0.07937323, 0. ]) +// +// Plot the window and its frequency response: +// +// >>> import matplotlib.pyplot as plt +// >>> from numpy.fft import fft, fftshift +// >>> window = np.hanning(51) +// >>> plt.plot(window) +// [] +// >>> plt.title("Hann window") +// Text(0.5, 1.0, 'Hann window') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("Sample") +// Text(0.5, 0, 'Sample') +// >>> plt.show() +// +// >>> plt.figure() +//
+// >>> A = fft(window, 2048) / 25.5 +// >>> mag = np.abs(fftshift(A)) +// >>> freq = np.linspace(-0.5, 0.5, len(A)) +// >>> with np.errstate(divide='ignore', invalid='ignore'): +// ... response = 20 * np.log10(mag) +// ... +// >>> response = np.clip(response, -100, 100) +// >>> plt.plot(freq, response) +// [] +// >>> plt.title("Frequency response of the Hann window") +// Text(0.5, 1.0, 'Frequency response of the Hann window') +// >>> plt.ylabel("Magnitude [dB]") +// Text(0, 0.5, 'Magnitude [dB]') +// >>> plt.xlabel("Normalized frequency [cycles per sample]") +// Text(0.5, 0, 'Normalized frequency [cycles per sample]') +// >>> plt.axis('tight') +// ... +// >>> plt.show() +// +// +// +//go:linkname Hanning py.hanning +func Hanning(M *py.Object) *py.Object +// +// Return the Bartlett window. +// +// The Bartlett window is very similar to a triangular window, except +// that the end points are at zero. It is often used in signal +// processing for tapering a signal, without generating too much +// ripple in the frequency domain. +// +// Parameters +// ---------- +// M : int +// Number of points in the output window. If zero or less, an +// empty array is returned. +// +// Returns +// ------- +// out : array +// The triangular window, with the maximum value normalized to one +// (the value one appears only if the number of samples is odd), with +// the first and last samples equal to zero. +// +// See Also +// -------- +// blackman, hamming, hanning, kaiser +// +// Notes +// ----- +// The Bartlett window is defined as +// +// .. math:: w(n) = \frac{2}{M-1} \left( +// \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right| +// \right) +// +// Most references to the Bartlett window come from the signal processing +// literature, where it is used as one of many windowing functions for +// smoothing values. Note that convolution with this window produces linear +// interpolation. It is also known as an apodization (which means "removing +// the foot", i.e. smoothing discontinuities at the beginning and end of the +// sampled signal) or tapering function. The Fourier transform of the +// Bartlett window is the product of two sinc functions. Note the excellent +// discussion in Kanasewich [2]_. +// +// References +// ---------- +// .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", +// Biometrika 37, 1-16, 1950. +// .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", +// The University of Alberta Press, 1975, pp. 109-110. +// .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal +// Processing", Prentice-Hall, 1999, pp. 468-471. +// .. [4] Wikipedia, "Window function", +// https://en.wikipedia.org/wiki/Window_function +// .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, +// "Numerical Recipes", Cambridge University Press, 1986, page 429. +// +// Examples +// -------- +// >>> import matplotlib.pyplot as plt +// >>> np.bartlett(12) +// array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary +// 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, +// 0.18181818, 0. ]) +// +// Plot the window and its frequency response (requires SciPy and matplotlib): +// +// >>> from numpy.fft import fft, fftshift +// >>> window = np.bartlett(51) +// >>> plt.plot(window) +// [] +// >>> plt.title("Bartlett window") +// Text(0.5, 1.0, 'Bartlett window') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("Sample") +// Text(0.5, 0, 'Sample') +// >>> plt.show() +// +// >>> plt.figure() +//
+// >>> A = fft(window, 2048) / 25.5 +// >>> mag = np.abs(fftshift(A)) +// >>> freq = np.linspace(-0.5, 0.5, len(A)) +// >>> with np.errstate(divide='ignore', invalid='ignore'): +// ... response = 20 * np.log10(mag) +// ... +// >>> response = np.clip(response, -100, 100) +// >>> plt.plot(freq, response) +// [] +// >>> plt.title("Frequency response of Bartlett window") +// Text(0.5, 1.0, 'Frequency response of Bartlett window') +// >>> plt.ylabel("Magnitude [dB]") +// Text(0, 0.5, 'Magnitude [dB]') +// >>> plt.xlabel("Normalized frequency [cycles per sample]") +// Text(0.5, 0, 'Normalized frequency [cycles per sample]') +// >>> _ = plt.axis('tight') +// >>> plt.show() +// +// +// +//go:linkname Bartlett py.bartlett +func Bartlett(M *py.Object) *py.Object +// +// Return the Blackman window. +// +// The Blackman window is a taper formed by using the first three +// terms of a summation of cosines. It was designed to have close to the +// minimal leakage possible. It is close to optimal, only slightly worse +// than a Kaiser window. +// +// Parameters +// ---------- +// M : int +// Number of points in the output window. If zero or less, an empty +// array is returned. +// +// Returns +// ------- +// out : ndarray +// The window, with the maximum value normalized to one (the value one +// appears only if the number of samples is odd). +// +// See Also +// -------- +// bartlett, hamming, hanning, kaiser +// +// Notes +// ----- +// The Blackman window is defined as +// +// .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M) +// +// Most references to the Blackman window come from the signal processing +// literature, where it is used as one of many windowing functions for +// smoothing values. It is also known as an apodization (which means +// "removing the foot", i.e. smoothing discontinuities at the beginning +// and end of the sampled signal) or tapering function. It is known as a +// "near optimal" tapering function, almost as good (by some measures) +// as the kaiser window. +// +// References +// ---------- +// Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, +// Dover Publications, New York. +// +// Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. +// Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. +// +// Examples +// -------- +// >>> import matplotlib.pyplot as plt +// >>> np.blackman(12) +// array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary +// 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, +// 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, +// 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) +// +// Plot the window and the frequency response: +// +// >>> from numpy.fft import fft, fftshift +// >>> window = np.blackman(51) +// >>> plt.plot(window) +// [] +// >>> plt.title("Blackman window") +// Text(0.5, 1.0, 'Blackman window') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("Sample") +// Text(0.5, 0, 'Sample') +// >>> plt.show() +// +// >>> plt.figure() +//
+// >>> A = fft(window, 2048) / 25.5 +// >>> mag = np.abs(fftshift(A)) +// >>> freq = np.linspace(-0.5, 0.5, len(A)) +// >>> with np.errstate(divide='ignore', invalid='ignore'): +// ... response = 20 * np.log10(mag) +// ... +// >>> response = np.clip(response, -100, 100) +// >>> plt.plot(freq, response) +// [] +// >>> plt.title("Frequency response of Blackman window") +// Text(0.5, 1.0, 'Frequency response of Blackman window') +// >>> plt.ylabel("Magnitude [dB]") +// Text(0, 0.5, 'Magnitude [dB]') +// >>> plt.xlabel("Normalized frequency [cycles per sample]") +// Text(0.5, 0, 'Normalized frequency [cycles per sample]') +// >>> _ = plt.axis('tight') +// >>> plt.show() +// +// +// +//go:linkname Blackman py.blackman +func Blackman(M *py.Object) *py.Object +// +// Return the Kaiser window. +// +// The Kaiser window is a taper formed by using a Bessel function. +// +// Parameters +// ---------- +// M : int +// Number of points in the output window. If zero or less, an +// empty array is returned. +// beta : float +// Shape parameter for window. +// +// Returns +// ------- +// out : array +// The window, with the maximum value normalized to one (the value +// one appears only if the number of samples is odd). +// +// See Also +// -------- +// bartlett, blackman, hamming, hanning +// +// Notes +// ----- +// The Kaiser window is defined as +// +// .. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} +// \right)/I_0(\beta) +// +// with +// +// .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2}, +// +// where :math:`I_0` is the modified zeroth-order Bessel function. +// +// The Kaiser was named for Jim Kaiser, who discovered a simple +// approximation to the DPSS window based on Bessel functions. The Kaiser +// window is a very good approximation to the Digital Prolate Spheroidal +// Sequence, or Slepian window, which is the transform which maximizes the +// energy in the main lobe of the window relative to total energy. +// +// The Kaiser can approximate many other windows by varying the beta +// parameter. +// +// ==== ======================= +// beta Window shape +// ==== ======================= +// 0 Rectangular +// 5 Similar to a Hamming +// 6 Similar to a Hanning +// 8.6 Similar to a Blackman +// ==== ======================= +// +// A beta value of 14 is probably a good starting point. Note that as beta +// gets large, the window narrows, and so the number of samples needs to be +// large enough to sample the increasingly narrow spike, otherwise NaNs will +// get returned. +// +// Most references to the Kaiser window come from the signal processing +// literature, where it is used as one of many windowing functions for +// smoothing values. It is also known as an apodization (which means +// "removing the foot", i.e. smoothing discontinuities at the beginning +// and end of the sampled signal) or tapering function. +// +// References +// ---------- +// .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by +// digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. +// John Wiley and Sons, New York, (1966). +// .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The +// University of Alberta Press, 1975, pp. 177-178. +// .. [3] Wikipedia, "Window function", +// https://en.wikipedia.org/wiki/Window_function +// +// Examples +// -------- +// >>> import matplotlib.pyplot as plt +// >>> np.kaiser(12, 14) +// array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary +// 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, +// 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, +// 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) +// +// +// Plot the window and the frequency response: +// +// >>> from numpy.fft import fft, fftshift +// >>> window = np.kaiser(51, 14) +// >>> plt.plot(window) +// [] +// >>> plt.title("Kaiser window") +// Text(0.5, 1.0, 'Kaiser window') +// >>> plt.ylabel("Amplitude") +// Text(0, 0.5, 'Amplitude') +// >>> plt.xlabel("Sample") +// Text(0.5, 0, 'Sample') +// >>> plt.show() +// +// >>> plt.figure() +//
+// >>> A = fft(window, 2048) / 25.5 +// >>> mag = np.abs(fftshift(A)) +// >>> freq = np.linspace(-0.5, 0.5, len(A)) +// >>> response = 20 * np.log10(mag) +// >>> response = np.clip(response, -100, 100) +// >>> plt.plot(freq, response) +// [] +// >>> plt.title("Frequency response of Kaiser window") +// Text(0.5, 1.0, 'Frequency response of Kaiser window') +// >>> plt.ylabel("Magnitude [dB]") +// Text(0, 0.5, 'Magnitude [dB]') +// >>> plt.xlabel("Normalized frequency [cycles per sample]") +// Text(0.5, 0, 'Normalized frequency [cycles per sample]') +// >>> plt.axis('tight') +// (-0.5, 0.5, -100.0, ...) # may vary +// >>> plt.show() +// +// +// +//go:linkname Kaiser py.kaiser +func Kaiser(M *py.Object, beta *py.Object) *py.Object +// +// Integrate along the given axis using the composite trapezoidal rule. +// +// If `x` is provided, the integration happens in sequence along its +// elements - they are not sorted. +// +// Integrate `y` (`x`) along each 1d slice on the given axis, compute +// :math:`\int y(x) dx`. +// When `x` is specified, this integrates along the parametric curve, +// computing :math:`\int_t y(t) dt = +// \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. +// +// Parameters +// ---------- +// y : array_like +// Input array to integrate. +// x : array_like, optional +// The sample points corresponding to the `y` values. If `x` is None, +// the sample points are assumed to be evenly spaced `dx` apart. The +// default is None. +// dx : scalar, optional +// The spacing between sample points when `x` is None. The default is 1. +// axis : int, optional +// The axis along which to integrate. +// +// Returns +// ------- +// trapz : float or ndarray +// Definite integral of `y` = n-dimensional array as approximated along +// a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, +// then the result is a float. If `n` is greater than 1, then the result +// is an `n`-1 dimensional array. +// +// See Also +// -------- +// sum, cumsum +// +// Notes +// ----- +// Image [2]_ illustrates trapezoidal rule -- y-axis locations of points +// will be taken from `y` array, by default x-axis distances between +// points will be 1.0, alternatively they can be provided with `x` array +// or with `dx` scalar. Return value will be equal to combined area under +// the red lines. +// +// +// References +// ---------- +// .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule +// +// .. [2] Illustration image: +// https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png +// +// Examples +// -------- +// Use the trapezoidal rule on evenly spaced points: +// +// >>> np.trapz([1, 2, 3]) +// 4.0 +// +// The spacing between sample points can be selected by either the +// ``x`` or ``dx`` arguments: +// +// >>> np.trapz([1, 2, 3], x=[4, 6, 8]) +// 8.0 +// >>> np.trapz([1, 2, 3], dx=2) +// 8.0 +// +// Using a decreasing ``x`` corresponds to integrating in reverse: +// +// >>> np.trapz([1, 2, 3], x=[8, 6, 4]) +// -8.0 +// +// More generally ``x`` is used to integrate along a parametric curve. We can +// estimate the integral :math:`\int_0^1 x^2 = 1/3` using: +// +// >>> x = np.linspace(0, 1, num=50) +// >>> y = x**2 +// >>> np.trapz(y, x) +// 0.33340274885464394 +// +// Or estimate the area of a circle, noting we repeat the sample which closes +// the curve: +// +// >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) +// >>> np.trapz(np.cos(theta), x=np.sin(theta)) +// 3.141571941375841 +// +// ``np.trapz`` can be applied along a specified axis to do multiple +// computations in one call: +// +// >>> a = np.arange(6).reshape(2, 3) +// >>> a +// array([[0, 1, 2], +// [3, 4, 5]]) +// >>> np.trapz(a, axis=0) +// array([1.5, 2.5, 3.5]) +// >>> np.trapz(a, axis=1) +// array([2., 8.]) +// +// +//go:linkname Trapz py.trapz +func Trapz(y *py.Object, x *py.Object, dx *py.Object, axis *py.Object) *py.Object +// +// Modified Bessel function of the first kind, order 0. +// +// Usually denoted :math:`I_0`. +// +// Parameters +// ---------- +// x : array_like of float +// Argument of the Bessel function. +// +// Returns +// ------- +// out : ndarray, shape = x.shape, dtype = float +// The modified Bessel function evaluated at each of the elements of `x`. +// +// See Also +// -------- +// scipy.special.i0, scipy.special.iv, scipy.special.ive +// +// Notes +// ----- +// The scipy implementation is recommended over this function: it is a +// proper ufunc written in C, and more than an order of magnitude faster. +// +// We use the algorithm published by Clenshaw [1]_ and referenced by +// Abramowitz and Stegun [2]_, for which the function domain is +// partitioned into the two intervals [0,8] and (8,inf), and Chebyshev +// polynomial expansions are employed in each interval. Relative error on +// the domain [0,30] using IEEE arithmetic is documented [3]_ as having a +// peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). +// +// References +// ---------- +// .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in +// *National Physical Laboratory Mathematical Tables*, vol. 5, London: +// Her Majesty's Stationery Office, 1962. +// .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical +// Functions*, 10th printing, New York: Dover, 1964, pp. 379. +// https://personal.math.ubc.ca/~cbm/aands/page_379.htm +// .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero +// +// Examples +// -------- +// >>> np.i0(0.) +// array(1.0) +// >>> np.i0([0, 1, 2, 3]) +// array([1. , 1.26606588, 2.2795853 , 4.88079259]) +// +// +// +//go:linkname I0 py.i0 +func I0(x *py.Object) *py.Object +// +// Add documentation to an existing object, typically one defined in C +// +// The purpose is to allow easier editing of the docstrings without requiring +// a re-compile. This exists primarily for internal use within numpy itself. +// +// Parameters +// ---------- +// place : str +// The absolute name of the module to import from +// obj : str +// The name of the object to add documentation to, typically a class or +// function name +// doc : {str, Tuple[str, str], List[Tuple[str, str]]} +// If a string, the documentation to apply to `obj` +// +// If a tuple, then the first element is interpreted as an attribute of +// `obj` and the second as the docstring to apply - ``(method, docstring)`` +// +// If a list, then each element of the list should be a tuple of length +// two - ``[(method1, docstring1), (method2, docstring2), ...]`` +// warn_on_python : bool +// If True, the default, emit `UserWarning` if this is used to attach +// documentation to a pure-python object. +// +// Notes +// ----- +// This routine never raises an error if the docstring can't be written, but +// will raise an error if the object being documented does not exist. +// +// This routine cannot modify read-only docstrings, as appear +// in new-style classes or built-in functions. Because this +// routine never raises an error the caller must check manually +// that the docstrings were changed. +// +// Since this function grabs the ``char *`` from a c-level str object and puts +// it into the ``tp_doc`` slot of the type of `obj`, it violates a number of +// C-API best-practices, by: +// +// - modifying a `PyTypeObject` after calling `PyType_Ready` +// - calling `Py_INCREF` on the str and losing the reference, so the str +// will never be released +// +// If possible it should be avoided. +// +// +//go:linkname AddNewdoc py.add_newdoc +func AddNewdoc(place *py.Object, obj *py.Object, doc *py.Object, warnOnPython *py.Object) *py.Object +// add_docstring(obj, docstring) +// +// Add a docstring to a built-in obj if possible. +// If the obj already has a docstring raise a RuntimeError +// If this routine does not know how to add a docstring to the object +// raise a TypeError +// +//go:linkname AddDocstring py.add_docstring +func AddDocstring(obj *py.Object, docstring *py.Object) *py.Object +// +// Return a list of coordinate matrices from coordinate vectors. +// +// Make N-D coordinate arrays for vectorized evaluations of +// N-D scalar/vector fields over N-D grids, given +// one-dimensional coordinate arrays x1, x2,..., xn. +// +// .. versionchanged:: 1.9 +// 1-D and 0-D cases are allowed. +// +// Parameters +// ---------- +// x1, x2,..., xn : array_like +// 1-D arrays representing the coordinates of a grid. +// indexing : {'xy', 'ij'}, optional +// Cartesian ('xy', default) or matrix ('ij') indexing of output. +// See Notes for more details. +// +// .. versionadded:: 1.7.0 +// sparse : bool, optional +// If True the shape of the returned coordinate array for dimension *i* +// is reduced from ``(N1, ..., Ni, ... Nn)`` to +// ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are +// intended to be use with :ref:`basics.broadcasting`. When all +// coordinates are used in an expression, broadcasting still leads to a +// fully-dimensonal result array. +// +// Default is False. +// +// .. versionadded:: 1.7.0 +// copy : bool, optional +// If False, a view into the original arrays are returned in order to +// conserve memory. Default is True. Please note that +// ``sparse=False, copy=False`` will likely return non-contiguous +// arrays. Furthermore, more than one element of a broadcast array +// may refer to a single memory location. If you need to write to the +// arrays, make copies first. +// +// .. versionadded:: 1.7.0 +// +// Returns +// ------- +// X1, X2,..., XN : list of ndarrays +// For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, +// returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' +// or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' +// with the elements of `xi` repeated to fill the matrix along +// the first dimension for `x1`, the second for `x2` and so on. +// +// Notes +// ----- +// This function supports both indexing conventions through the indexing +// keyword argument. Giving the string 'ij' returns a meshgrid with +// matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. +// In the 2-D case with inputs of length M and N, the outputs are of shape +// (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case +// with inputs of length M, N and P, outputs are of shape (N, M, P) for +// 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is +// illustrated by the following code snippet:: +// +// xv, yv = np.meshgrid(x, y, indexing='ij') +// for i in range(nx): +// for j in range(ny): +// # treat xv[i,j], yv[i,j] +// +// xv, yv = np.meshgrid(x, y, indexing='xy') +// for i in range(nx): +// for j in range(ny): +// # treat xv[j,i], yv[j,i] +// +// In the 1-D and 0-D case, the indexing and sparse keywords have no effect. +// +// See Also +// -------- +// mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. +// ogrid : Construct an open multi-dimensional "meshgrid" using indexing +// notation. +// how-to-index +// +// Examples +// -------- +// >>> nx, ny = (3, 2) +// >>> x = np.linspace(0, 1, nx) +// >>> y = np.linspace(0, 1, ny) +// >>> xv, yv = np.meshgrid(x, y) +// >>> xv +// array([[0. , 0.5, 1. ], +// [0. , 0.5, 1. ]]) +// >>> yv +// array([[0., 0., 0.], +// [1., 1., 1.]]) +// +// The result of `meshgrid` is a coordinate grid: +// +// >>> import matplotlib.pyplot as plt +// >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') +// >>> plt.show() +// +// You can create sparse output arrays to save memory and computation time. +// +// >>> xv, yv = np.meshgrid(x, y, sparse=True) +// >>> xv +// array([[0. , 0.5, 1. ]]) +// >>> yv +// array([[0.], +// [1.]]) +// +// `meshgrid` is very useful to evaluate functions on a grid. If the +// function depends on all coordinates, both dense and sparse outputs can be +// used. +// +// >>> x = np.linspace(-5, 5, 101) +// >>> y = np.linspace(-5, 5, 101) +// >>> # full coordinate arrays +// >>> xx, yy = np.meshgrid(x, y) +// >>> zz = np.sqrt(xx**2 + yy**2) +// >>> xx.shape, yy.shape, zz.shape +// ((101, 101), (101, 101), (101, 101)) +// >>> # sparse coordinate arrays +// >>> xs, ys = np.meshgrid(x, y, sparse=True) +// >>> zs = np.sqrt(xs**2 + ys**2) +// >>> xs.shape, ys.shape, zs.shape +// ((1, 101), (101, 1), (101, 101)) +// >>> np.array_equal(zz, zs) +// True +// +// >>> h = plt.contourf(x, y, zs) +// >>> plt.axis('scaled') +// >>> plt.colorbar() +// >>> plt.show() +// +// +//go:linkname Meshgrid py.meshgrid +func Meshgrid(__llgo_va_list ...interface{}) *py.Object +// +// Return a new array with sub-arrays along an axis deleted. For a one +// dimensional array, this returns those entries not returned by +// `arr[obj]`. +// +// Parameters +// ---------- +// arr : array_like +// Input array. +// obj : slice, int or array of ints +// Indicate indices of sub-arrays to remove along the specified axis. +// +// .. versionchanged:: 1.19.0 +// Boolean indices are now treated as a mask of elements to remove, +// rather than being cast to the integers 0 and 1. +// +// axis : int, optional +// The axis along which to delete the subarray defined by `obj`. +// If `axis` is None, `obj` is applied to the flattened array. +// +// Returns +// ------- +// out : ndarray +// A copy of `arr` with the elements specified by `obj` removed. Note +// that `delete` does not occur in-place. If `axis` is None, `out` is +// a flattened array. +// +// See Also +// -------- +// insert : Insert elements into an array. +// append : Append elements at the end of an array. +// +// Notes +// ----- +// Often it is preferable to use a boolean mask. For example: +// +// >>> arr = np.arange(12) + 1 +// >>> mask = np.ones(len(arr), dtype=bool) +// >>> mask[[0,2,4]] = False +// >>> result = arr[mask,...] +// +// Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further +// use of `mask`. +// +// Examples +// -------- +// >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) +// >>> arr +// array([[ 1, 2, 3, 4], +// [ 5, 6, 7, 8], +// [ 9, 10, 11, 12]]) +// >>> np.delete(arr, 1, 0) +// array([[ 1, 2, 3, 4], +// [ 9, 10, 11, 12]]) +// +// >>> np.delete(arr, np.s_[::2], 1) +// array([[ 2, 4], +// [ 6, 8], +// [10, 12]]) +// >>> np.delete(arr, [1,3,5], None) +// array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) +// +// +// +//go:linkname Delete py.delete +func Delete(arr *py.Object, obj *py.Object, axis *py.Object) *py.Object +// +// Insert values along the given axis before the given indices. +// +// Parameters +// ---------- +// arr : array_like +// Input array. +// obj : int, slice or sequence of ints +// Object that defines the index or indices before which `values` is +// inserted. +// +// .. versionadded:: 1.8.0 +// +// Support for multiple insertions when `obj` is a single scalar or a +// sequence with one element (similar to calling insert multiple +// times). +// values : array_like +// Values to insert into `arr`. If the type of `values` is different +// from that of `arr`, `values` is converted to the type of `arr`. +// `values` should be shaped so that ``arr[...,obj,...] = values`` +// is legal. +// axis : int, optional +// Axis along which to insert `values`. If `axis` is None then `arr` +// is flattened first. +// +// Returns +// ------- +// out : ndarray +// A copy of `arr` with `values` inserted. Note that `insert` +// does not occur in-place: a new array is returned. If +// `axis` is None, `out` is a flattened array. +// +// See Also +// -------- +// append : Append elements at the end of an array. +// concatenate : Join a sequence of arrays along an existing axis. +// delete : Delete elements from an array. +// +// Notes +// ----- +// Note that for higher dimensional inserts ``obj=0`` behaves very different +// from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from +// ``arr[:,[0],:] = values``. +// +// Examples +// -------- +// >>> a = np.array([[1, 1], [2, 2], [3, 3]]) +// >>> a +// array([[1, 1], +// [2, 2], +// [3, 3]]) +// >>> np.insert(a, 1, 5) +// array([1, 5, 1, ..., 2, 3, 3]) +// >>> np.insert(a, 1, 5, axis=1) +// array([[1, 5, 1], +// [2, 5, 2], +// [3, 5, 3]]) +// +// Difference between sequence and scalars: +// +// >>> np.insert(a, [1], [[1],[2],[3]], axis=1) +// array([[1, 1, 1], +// [2, 2, 2], +// [3, 3, 3]]) +// >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), +// ... np.insert(a, [1], [[1],[2],[3]], axis=1)) +// True +// +// >>> b = a.flatten() +// >>> b +// array([1, 1, 2, 2, 3, 3]) +// >>> np.insert(b, [2, 2], [5, 6]) +// array([1, 1, 5, ..., 2, 3, 3]) +// +// >>> np.insert(b, slice(2, 4), [5, 6]) +// array([1, 1, 5, ..., 2, 3, 3]) +// +// >>> np.insert(b, [2, 2], [7.13, False]) # type casting +// array([1, 1, 7, ..., 2, 3, 3]) +// +// >>> x = np.arange(8).reshape(2, 4) +// >>> idx = (1, 3) +// >>> np.insert(x, idx, 999, axis=1) +// array([[ 0, 999, 1, 2, 999, 3], +// [ 4, 999, 5, 6, 999, 7]]) +// +// +// +//go:linkname Insert py.insert +func Insert(arr *py.Object, obj *py.Object, values *py.Object, axis *py.Object) *py.Object +// +// Append values to the end of an array. +// +// Parameters +// ---------- +// arr : array_like +// Values are appended to a copy of this array. +// values : array_like +// These values are appended to a copy of `arr`. It must be of the +// correct shape (the same shape as `arr`, excluding `axis`). If +// `axis` is not specified, `values` can be any shape and will be +// flattened before use. +// axis : int, optional +// The axis along which `values` are appended. If `axis` is not +// given, both `arr` and `values` are flattened before use. +// +// Returns +// ------- +// append : ndarray +// A copy of `arr` with `values` appended to `axis`. Note that +// `append` does not occur in-place: a new array is allocated and +// filled. If `axis` is None, `out` is a flattened array. +// +// See Also +// -------- +// insert : Insert elements into an array. +// delete : Delete elements from an array. +// +// Examples +// -------- +// >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) +// array([1, 2, 3, ..., 7, 8, 9]) +// +// When `axis` is specified, `values` must have the correct shape. +// +// >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) +// array([[1, 2, 3], +// [4, 5, 6], +// [7, 8, 9]]) +// >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) +// Traceback (most recent call last): +// ... +// ValueError: all the input arrays must have same number of dimensions, but +// the array at index 0 has 2 dimension(s) and the array at index 1 has 1 +// dimension(s) +// +// +// +//go:linkname Append py.append +func Append(arr *py.Object, values *py.Object, axis *py.Object) *py.Object +// +// One-dimensional linear interpolation for monotonically increasing sample points. +// +// Returns the one-dimensional piecewise linear interpolant to a function +// with given discrete data points (`xp`, `fp`), evaluated at `x`. +// +// Parameters +// ---------- +// x : array_like +// The x-coordinates at which to evaluate the interpolated values. +// +// xp : 1-D sequence of floats +// The x-coordinates of the data points, must be increasing if argument +// `period` is not specified. Otherwise, `xp` is internally sorted after +// normalizing the periodic boundaries with ``xp = xp % period``. +// +// fp : 1-D sequence of float or complex +// The y-coordinates of the data points, same length as `xp`. +// +// left : optional float or complex corresponding to fp +// Value to return for `x < xp[0]`, default is `fp[0]`. +// +// right : optional float or complex corresponding to fp +// Value to return for `x > xp[-1]`, default is `fp[-1]`. +// +// period : None or float, optional +// A period for the x-coordinates. This parameter allows the proper +// interpolation of angular x-coordinates. Parameters `left` and `right` +// are ignored if `period` is specified. +// +// .. versionadded:: 1.10.0 +// +// Returns +// ------- +// y : float or complex (corresponding to fp) or ndarray +// The interpolated values, same shape as `x`. +// +// Raises +// ------ +// ValueError +// If `xp` and `fp` have different length +// If `xp` or `fp` are not 1-D sequences +// If `period == 0` +// +// See Also +// -------- +// scipy.interpolate +// +// Warnings +// -------- +// The x-coordinate sequence is expected to be increasing, but this is not +// explicitly enforced. However, if the sequence `xp` is non-increasing, +// interpolation results are meaningless. +// +// Note that, since NaN is unsortable, `xp` also cannot contain NaNs. +// +// A simple check for `xp` being strictly increasing is:: +// +// np.all(np.diff(xp) > 0) +// +// Examples +// -------- +// >>> xp = [1, 2, 3] +// >>> fp = [3, 2, 0] +// >>> np.interp(2.5, xp, fp) +// 1.0 +// >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) +// array([3. , 3. , 2.5 , 0.56, 0. ]) +// >>> UNDEF = -99.0 +// >>> np.interp(3.14, xp, fp, right=UNDEF) +// -99.0 +// +// Plot an interpolant to the sine function: +// +// >>> x = np.linspace(0, 2*np.pi, 10) +// >>> y = np.sin(x) +// >>> xvals = np.linspace(0, 2*np.pi, 50) +// >>> yinterp = np.interp(xvals, x, y) +// >>> import matplotlib.pyplot as plt +// >>> plt.plot(x, y, 'o') +// [] +// >>> plt.plot(xvals, yinterp, '-x') +// [] +// >>> plt.show() +// +// Interpolation with periodic x-coordinates: +// +// >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] +// >>> xp = [190, -190, 350, -350] +// >>> fp = [5, 10, 3, 4] +// >>> np.interp(x, xp, fp, period=360) +// array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) +// +// Complex interpolation: +// +// >>> x = [1.5, 4.0] +// >>> xp = [2,3,5] +// >>> fp = [1.0j, 0, 2+3j] +// >>> np.interp(x, xp, fp) +// array([0.+1.j , 1.+1.5j]) +// +// +// +//go:linkname Interp py.interp +func Interp(x *py.Object, xp *py.Object, fp *py.Object, left *py.Object, right *py.Object, period *py.Object) *py.Object +// add_ufunc_docstring(ufunc, new_docstring) +// +// Replace the docstring for a ufunc with new_docstring. +// This method will only work if the current docstring for +// the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) +// +// Parameters +// ---------- +// ufunc : numpy.ufunc +// A ufunc whose current doc is NULL. +// new_docstring : string +// The new docstring for the ufunc. +// +// Notes +// ----- +// This method allocates memory for new_docstring on +// the heap. Technically this creates a mempory leak, since this +// memory will not be reclaimed until the end of the program +// even if the ufunc itself is removed. However this will only +// be a problem if the user is repeatedly creating ufuncs with +// no documentation, adding documentation via add_newdoc_ufunc, +// and then throwing away the ufunc. +// +//go:linkname AddNewdocUfunc py.add_newdoc_ufunc +func AddNewdocUfunc(__llgo_va_list ...interface{}) *py.Object +// +// Compute the q-th quantile of the data along the specified axis. +// +// .. versionadded:: 1.15.0 +// +// Parameters +// ---------- +// a : array_like of real numbers +// Input array or object that can be converted to an array. +// q : array_like of float +// Probability or sequence of probabilities for the quantiles to compute. +// Values must be between 0 and 1 inclusive. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the quantiles are computed. The default is +// to compute the quantile(s) along a flattened version of the array. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape and buffer length as the expected output, but the +// type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow the input array `a` to be modified by +// intermediate calculations, to save memory. In this case, the +// contents of the input `a` after this function completes is +// undefined. +// method : str, optional +// This parameter specifies the method to use for estimating the +// quantile. There are many different methods, some unique to NumPy. +// See the notes for explanation. The options sorted by their R type +// as summarized in the H&F paper [1]_ are: +// +// 1. 'inverted_cdf' +// 2. 'averaged_inverted_cdf' +// 3. 'closest_observation' +// 4. 'interpolated_inverted_cdf' +// 5. 'hazen' +// 6. 'weibull' +// 7. 'linear' (default) +// 8. 'median_unbiased' +// 9. 'normal_unbiased' +// +// The first three methods are discontinuous. NumPy further defines the +// following discontinuous variations of the default 'linear' (7.) option: +// +// * 'lower' +// * 'higher', +// * 'midpoint' +// * 'nearest' +// +// .. versionchanged:: 1.22.0 +// This argument was previously called "interpolation" and only +// offered the "linear" default and last four options. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left in +// the result as dimensions with size one. With this option, the +// result will broadcast correctly against the original array `a`. +// +// interpolation : str, optional +// Deprecated name for the method keyword argument. +// +// .. deprecated:: 1.22.0 +// +// Returns +// ------- +// quantile : scalar or ndarray +// If `q` is a single probability and `axis=None`, then the result +// is a scalar. If multiple probabilies levels are given, first axis of +// the result corresponds to the quantiles. The other axes are +// the axes that remain after the reduction of `a`. If the input +// contains integers or floats smaller than ``float64``, the output +// data-type is ``float64``. Otherwise, the output data-type is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// mean +// percentile : equivalent to quantile, but with q in the range [0, 100]. +// median : equivalent to ``quantile(..., 0.5)`` +// nanquantile +// +// Notes +// ----- +// Given a vector ``V`` of length ``n``, the q-th quantile of ``V`` is +// the value ``q`` of the way from the minimum to the maximum in a +// sorted copy of ``V``. The values and distances of the two nearest +// neighbors as well as the `method` parameter will determine the +// quantile if the normalized ranking does not match the location of +// ``q`` exactly. This function is the same as the median if ``q=0.5``, the +// same as the minimum if ``q=0.0`` and the same as the maximum if +// ``q=1.0``. +// +// The optional `method` parameter specifies the method to use when the +// desired quantile lies between two indexes ``i`` and ``j = i + 1``. +// In that case, we first determine ``i + g``, a virtual index that lies +// between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the +// fractional part of the index. The final result is, then, an interpolation +// of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, +// ``i`` and ``j`` are modified using correction constants ``alpha`` and +// ``beta`` whose choices depend on the ``method`` used. Finally, note that +// since Python uses 0-based indexing, the code subtracts another 1 from the +// index internally. +// +// The following formula determines the virtual index ``i + g``, the location +// of the quantile in the sorted sample: +// +// .. math:: +// i + g = q * ( n - alpha - beta + 1 ) + alpha +// +// The different methods then work as follows +// +// inverted_cdf: +// method 1 of H&F [1]_. +// This method gives discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 ; then take i +// +// averaged_inverted_cdf: +// method 2 of H&F [1]_. +// This method gives discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 ; then average between bounds +// +// closest_observation: +// method 3 of H&F [1]_. +// This method gives discontinuous results: +// +// * if g > 0 ; then take j +// * if g = 0 and index is odd ; then take j +// * if g = 0 and index is even ; then take i +// +// interpolated_inverted_cdf: +// method 4 of H&F [1]_. +// This method gives continuous results using: +// +// * alpha = 0 +// * beta = 1 +// +// hazen: +// method 5 of H&F [1]_. +// This method gives continuous results using: +// +// * alpha = 1/2 +// * beta = 1/2 +// +// weibull: +// method 6 of H&F [1]_. +// This method gives continuous results using: +// +// * alpha = 0 +// * beta = 0 +// +// linear: +// method 7 of H&F [1]_. +// This method gives continuous results using: +// +// * alpha = 1 +// * beta = 1 +// +// median_unbiased: +// method 8 of H&F [1]_. +// This method is probably the best method if the sample +// distribution function is unknown (see reference). +// This method gives continuous results using: +// +// * alpha = 1/3 +// * beta = 1/3 +// +// normal_unbiased: +// method 9 of H&F [1]_. +// This method is probably the best method if the sample +// distribution function is known to be normal. +// This method gives continuous results using: +// +// * alpha = 3/8 +// * beta = 3/8 +// +// lower: +// NumPy method kept for backwards compatibility. +// Takes ``i`` as the interpolation point. +// +// higher: +// NumPy method kept for backwards compatibility. +// Takes ``j`` as the interpolation point. +// +// nearest: +// NumPy method kept for backwards compatibility. +// Takes ``i`` or ``j``, whichever is nearest. +// +// midpoint: +// NumPy method kept for backwards compatibility. +// Uses ``(i + j) / 2``. +// +// Examples +// -------- +// >>> a = np.array([[10, 7, 4], [3, 2, 1]]) +// >>> a +// array([[10, 7, 4], +// [ 3, 2, 1]]) +// >>> np.quantile(a, 0.5) +// 3.5 +// >>> np.quantile(a, 0.5, axis=0) +// array([6.5, 4.5, 2.5]) +// >>> np.quantile(a, 0.5, axis=1) +// array([7., 2.]) +// >>> np.quantile(a, 0.5, axis=1, keepdims=True) +// array([[7.], +// [2.]]) +// >>> m = np.quantile(a, 0.5, axis=0) +// >>> out = np.zeros_like(m) +// >>> np.quantile(a, 0.5, axis=0, out=out) +// array([6.5, 4.5, 2.5]) +// >>> m +// array([6.5, 4.5, 2.5]) +// >>> b = a.copy() +// >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a == b) +// +// See also `numpy.percentile` for a visualization of most methods. +// +// References +// ---------- +// .. [1] R. J. Hyndman and Y. Fan, +// "Sample quantiles in statistical packages," +// The American Statistician, 50(4), pp. 361-365, 1996 +// +// +// +//go:linkname Quantile py.quantile +func Quantile(__llgo_va_list ...interface{}) *py.Object +// +// Stack 1-D arrays as columns into a 2-D array. +// +// Take a sequence of 1-D arrays and stack them as columns +// to make a single 2-D array. 2-D arrays are stacked as-is, +// just like with `hstack`. 1-D arrays are turned into 2-D columns +// first. +// +// Parameters +// ---------- +// tup : sequence of 1-D or 2-D arrays. +// Arrays to stack. All of them must have the same first dimension. +// +// Returns +// ------- +// stacked : 2-D array +// The array formed by stacking the given arrays. +// +// See Also +// -------- +// stack, hstack, vstack, concatenate +// +// Examples +// -------- +// >>> a = np.array((1,2,3)) +// >>> b = np.array((2,3,4)) +// >>> np.column_stack((a,b)) +// array([[1, 2], +// [2, 3], +// [3, 4]]) +// +// +// +//go:linkname ColumnStack py.column_stack +func ColumnStack(__llgo_va_list ...interface{}) *py.Object +// +// Stack arrays in sequence vertically (row wise). +// +// This is equivalent to concatenation along the first axis after 1-D arrays +// of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by +// `vsplit`. +// +// This function makes most sense for arrays with up to 3 dimensions. For +// instance, for pixel-data with a height (first axis), width (second axis), +// and r/g/b channels (third axis). The functions `concatenate`, `stack` and +// `block` provide more general stacking and concatenation operations. +// +// ``np.row_stack`` is an alias for `vstack`. They are the same function. +// +// Parameters +// ---------- +// tup : sequence of ndarrays +// The arrays must have the same shape along all but the first axis. +// 1-D arrays must have the same length. +// +// dtype : str or dtype +// If provided, the destination array will have this dtype. Cannot be +// provided together with `out`. +// +// .. versionadded:: 1.24 +// +// casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional +// Controls what kind of data casting may occur. Defaults to 'same_kind'. +// +// .. versionadded:: 1.24 +// +// Returns +// ------- +// stacked : ndarray +// The array formed by stacking the given arrays, will be at least 2-D. +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// block : Assemble an nd-array from nested lists of blocks. +// hstack : Stack arrays in sequence horizontally (column wise). +// dstack : Stack arrays in sequence depth wise (along third axis). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// vsplit : Split an array into multiple sub-arrays vertically (row-wise). +// +// Examples +// -------- +// >>> a = np.array([1, 2, 3]) +// >>> b = np.array([4, 5, 6]) +// >>> np.vstack((a,b)) +// array([[1, 2, 3], +// [4, 5, 6]]) +// +// >>> a = np.array([[1], [2], [3]]) +// >>> b = np.array([[4], [5], [6]]) +// >>> np.vstack((a,b)) +// array([[1], +// [2], +// [3], +// [4], +// [5], +// [6]]) +// +// +// +//go:linkname RowStack py.row_stack +func RowStack(__llgo_va_list ...interface{}) *py.Object +// +// Stack arrays in sequence depth wise (along third axis). +// +// This is equivalent to concatenation along the third axis after 2-D arrays +// of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape +// `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by +// `dsplit`. +// +// This function makes most sense for arrays with up to 3 dimensions. For +// instance, for pixel-data with a height (first axis), width (second axis), +// and r/g/b channels (third axis). The functions `concatenate`, `stack` and +// `block` provide more general stacking and concatenation operations. +// +// Parameters +// ---------- +// tup : sequence of arrays +// The arrays must have the same shape along all but the third axis. +// 1-D or 2-D arrays must have the same shape. +// +// Returns +// ------- +// stacked : ndarray +// The array formed by stacking the given arrays, will be at least 3-D. +// +// See Also +// -------- +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// block : Assemble an nd-array from nested lists of blocks. +// vstack : Stack arrays in sequence vertically (row wise). +// hstack : Stack arrays in sequence horizontally (column wise). +// column_stack : Stack 1-D arrays as columns into a 2-D array. +// dsplit : Split array along third axis. +// +// Examples +// -------- +// >>> a = np.array((1,2,3)) +// >>> b = np.array((2,3,4)) +// >>> np.dstack((a,b)) +// array([[[1, 2], +// [2, 3], +// [3, 4]]]) +// +// >>> a = np.array([[1],[2],[3]]) +// >>> b = np.array([[2],[3],[4]]) +// >>> np.dstack((a,b)) +// array([[[1, 2]], +// [[2, 3]], +// [[3, 4]]]) +// +// +// +//go:linkname Dstack py.dstack +func Dstack(tup *py.Object) *py.Object +// +// Split an array into multiple sub-arrays. +// +// Please refer to the ``split`` documentation. The only difference +// between these functions is that ``array_split`` allows +// `indices_or_sections` to be an integer that does *not* equally +// divide the axis. For an array of length l that should be split +// into n sections, it returns l % n sub-arrays of size l//n + 1 +// and the rest of size l//n. +// +// See Also +// -------- +// split : Split array into multiple sub-arrays of equal size. +// +// Examples +// -------- +// >>> x = np.arange(8.0) +// >>> np.array_split(x, 3) +// [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] +// +// >>> x = np.arange(9) +// >>> np.array_split(x, 4) +// [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] +// +// +// +//go:linkname ArraySplit py.array_split +func ArraySplit(ary *py.Object, indicesOrSections *py.Object, axis *py.Object) *py.Object +// +// Split an array into multiple sub-arrays as views into `ary`. +// +// Parameters +// ---------- +// ary : ndarray +// Array to be divided into sub-arrays. +// indices_or_sections : int or 1-D array +// If `indices_or_sections` is an integer, N, the array will be divided +// into N equal arrays along `axis`. If such a split is not possible, +// an error is raised. +// +// If `indices_or_sections` is a 1-D array of sorted integers, the entries +// indicate where along `axis` the array is split. For example, +// ``[2, 3]`` would, for ``axis=0``, result in +// +// - ary[:2] +// - ary[2:3] +// - ary[3:] +// +// If an index exceeds the dimension of the array along `axis`, +// an empty sub-array is returned correspondingly. +// axis : int, optional +// The axis along which to split, default is 0. +// +// Returns +// ------- +// sub-arrays : list of ndarrays +// A list of sub-arrays as views into `ary`. +// +// Raises +// ------ +// ValueError +// If `indices_or_sections` is given as an integer, but +// a split does not result in equal division. +// +// See Also +// -------- +// array_split : Split an array into multiple sub-arrays of equal or +// near-equal size. Does not raise an exception if +// an equal division cannot be made. +// hsplit : Split array into multiple sub-arrays horizontally (column-wise). +// vsplit : Split array into multiple sub-arrays vertically (row wise). +// dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). +// concatenate : Join a sequence of arrays along an existing axis. +// stack : Join a sequence of arrays along a new axis. +// hstack : Stack arrays in sequence horizontally (column wise). +// vstack : Stack arrays in sequence vertically (row wise). +// dstack : Stack arrays in sequence depth wise (along third dimension). +// +// Examples +// -------- +// >>> x = np.arange(9.0) +// >>> np.split(x, 3) +// [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] +// +// >>> x = np.arange(8.0) +// >>> np.split(x, [3, 5, 6, 10]) +// [array([0., 1., 2.]), +// array([3., 4.]), +// array([5.]), +// array([6., 7.]), +// array([], dtype=float64)] +// +// +// +//go:linkname Split py.split +func Split(ary *py.Object, indicesOrSections *py.Object, axis *py.Object) *py.Object +// +// Split an array into multiple sub-arrays horizontally (column-wise). +// +// Please refer to the `split` documentation. `hsplit` is equivalent +// to `split` with ``axis=1``, the array is always split along the second +// axis except for 1-D arrays, where it is split at ``axis=0``. +// +// See Also +// -------- +// split : Split an array into multiple sub-arrays of equal size. +// +// Examples +// -------- +// >>> x = np.arange(16.0).reshape(4, 4) +// >>> x +// array([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]) +// >>> np.hsplit(x, 2) +// [array([[ 0., 1.], +// [ 4., 5.], +// [ 8., 9.], +// [12., 13.]]), +// array([[ 2., 3.], +// [ 6., 7.], +// [10., 11.], +// [14., 15.]])] +// >>> np.hsplit(x, np.array([3, 6])) +// [array([[ 0., 1., 2.], +// [ 4., 5., 6.], +// [ 8., 9., 10.], +// [12., 13., 14.]]), +// array([[ 3.], +// [ 7.], +// [11.], +// [15.]]), +// array([], shape=(4, 0), dtype=float64)] +// +// With a higher dimensional array the split is still along the second axis. +// +// >>> x = np.arange(8.0).reshape(2, 2, 2) +// >>> x +// array([[[0., 1.], +// [2., 3.]], +// [[4., 5.], +// [6., 7.]]]) +// >>> np.hsplit(x, 2) +// [array([[[0., 1.]], +// [[4., 5.]]]), +// array([[[2., 3.]], +// [[6., 7.]]])] +// +// With a 1-D array, the split is along axis 0. +// +// >>> x = np.array([0, 1, 2, 3, 4, 5]) +// >>> np.hsplit(x, 2) +// [array([0, 1, 2]), array([3, 4, 5])] +// +// +// +//go:linkname Hsplit py.hsplit +func Hsplit(ary *py.Object, indicesOrSections *py.Object) *py.Object +// +// Split an array into multiple sub-arrays vertically (row-wise). +// +// Please refer to the ``split`` documentation. ``vsplit`` is equivalent +// to ``split`` with `axis=0` (default), the array is always split along the +// first axis regardless of the array dimension. +// +// See Also +// -------- +// split : Split an array into multiple sub-arrays of equal size. +// +// Examples +// -------- +// >>> x = np.arange(16.0).reshape(4, 4) +// >>> x +// array([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]) +// >>> np.vsplit(x, 2) +// [array([[0., 1., 2., 3.], +// [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], +// [12., 13., 14., 15.]])] +// >>> np.vsplit(x, np.array([3, 6])) +// [array([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] +// +// With a higher dimensional array the split is still along the first axis. +// +// >>> x = np.arange(8.0).reshape(2, 2, 2) +// >>> x +// array([[[0., 1.], +// [2., 3.]], +// [[4., 5.], +// [6., 7.]]]) +// >>> np.vsplit(x, 2) +// [array([[[0., 1.], +// [2., 3.]]]), array([[[4., 5.], +// [6., 7.]]])] +// +// +// +//go:linkname Vsplit py.vsplit +func Vsplit(ary *py.Object, indicesOrSections *py.Object) *py.Object +// +// Split array into multiple sub-arrays along the 3rd axis (depth). +// +// Please refer to the `split` documentation. `dsplit` is equivalent +// to `split` with ``axis=2``, the array is always split along the third +// axis provided the array dimension is greater than or equal to 3. +// +// See Also +// -------- +// split : Split an array into multiple sub-arrays of equal size. +// +// Examples +// -------- +// >>> x = np.arange(16.0).reshape(2, 2, 4) +// >>> x +// array([[[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.]], +// [[ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]]) +// >>> np.dsplit(x, 2) +// [array([[[ 0., 1.], +// [ 4., 5.]], +// [[ 8., 9.], +// [12., 13.]]]), array([[[ 2., 3.], +// [ 6., 7.]], +// [[10., 11.], +// [14., 15.]]])] +// >>> np.dsplit(x, np.array([3, 6])) +// [array([[[ 0., 1., 2.], +// [ 4., 5., 6.]], +// [[ 8., 9., 10.], +// [12., 13., 14.]]]), +// array([[[ 3.], +// [ 7.]], +// [[11.], +// [15.]]]), +// array([], shape=(2, 2, 0), dtype=float64)] +// +// +//go:linkname Dsplit py.dsplit +func Dsplit(ary *py.Object, indicesOrSections *py.Object) *py.Object +// +// Apply a function repeatedly over multiple axes. +// +// `func` is called as `res = func(a, axis)`, where `axis` is the first +// element of `axes`. The result `res` of the function call must have +// either the same dimensions as `a` or one less dimension. If `res` +// has one less dimension than `a`, a dimension is inserted before +// `axis`. The call to `func` is then repeated for each axis in `axes`, +// with `res` as the first argument. +// +// Parameters +// ---------- +// func : function +// This function must take two arguments, `func(a, axis)`. +// a : array_like +// Input array. +// axes : array_like +// Axes over which `func` is applied; the elements must be integers. +// +// Returns +// ------- +// apply_over_axis : ndarray +// The output array. The number of dimensions is the same as `a`, +// but the shape can be different. This depends on whether `func` +// changes the shape of its output with respect to its input. +// +// See Also +// -------- +// apply_along_axis : +// Apply a function to 1-D slices of an array along the given axis. +// +// Notes +// ----- +// This function is equivalent to tuple axis arguments to reorderable ufuncs +// with keepdims=True. Tuple axis arguments to ufuncs have been available since +// version 1.7.0. +// +// Examples +// -------- +// >>> a = np.arange(24).reshape(2,3,4) +// >>> a +// array([[[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11]], +// [[12, 13, 14, 15], +// [16, 17, 18, 19], +// [20, 21, 22, 23]]]) +// +// Sum over axes 0 and 2. The result has same number of dimensions +// as the original array: +// +// >>> np.apply_over_axes(np.sum, a, [0,2]) +// array([[[ 60], +// [ 92], +// [124]]]) +// +// Tuple axis arguments to ufuncs are equivalent: +// +// >>> np.sum(a, axis=(0,2), keepdims=True) +// array([[[ 60], +// [ 92], +// [124]]]) +// +// +// +//go:linkname ApplyOverAxes py.apply_over_axes +func ApplyOverAxes(func_ *py.Object, a *py.Object, axes *py.Object) *py.Object +// +// Expand the shape of an array. +// +// Insert a new axis that will appear at the `axis` position in the expanded +// array shape. +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int or tuple of ints +// Position in the expanded axes where the new axis (or axes) is placed. +// +// .. deprecated:: 1.13.0 +// Passing an axis where ``axis > a.ndim`` will be treated as +// ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will +// be treated as ``axis == 0``. This behavior is deprecated. +// +// .. versionchanged:: 1.18.0 +// A tuple of axes is now supported. Out of range axes as +// described above are now forbidden and raise an `AxisError`. +// +// Returns +// ------- +// result : ndarray +// View of `a` with the number of dimensions increased. +// +// See Also +// -------- +// squeeze : The inverse operation, removing singleton dimensions +// reshape : Insert, remove, and combine dimensions, and resize existing ones +// doc.indexing, atleast_1d, atleast_2d, atleast_3d +// +// Examples +// -------- +// >>> x = np.array([1, 2]) +// >>> x.shape +// (2,) +// +// The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: +// +// >>> y = np.expand_dims(x, axis=0) +// >>> y +// array([[1, 2]]) +// >>> y.shape +// (1, 2) +// +// The following is equivalent to ``x[:, np.newaxis]``: +// +// >>> y = np.expand_dims(x, axis=1) +// >>> y +// array([[1], +// [2]]) +// >>> y.shape +// (2, 1) +// +// ``axis`` may also be a tuple: +// +// >>> y = np.expand_dims(x, axis=(0, 1)) +// >>> y +// array([[[1, 2]]]) +// +// >>> y = np.expand_dims(x, axis=(2, 0)) +// >>> y +// array([[[1], +// [2]]]) +// +// Note that some examples may use ``None`` instead of ``np.newaxis``. These +// are the same objects: +// +// >>> np.newaxis is None +// True +// +// +// +//go:linkname ExpandDims py.expand_dims +func ExpandDims(a *py.Object, axis *py.Object) *py.Object +// +// Apply a function to 1-D slices along the given axis. +// +// Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays +// and `a` is a 1-D slice of `arr` along `axis`. +// +// This is equivalent to (but faster than) the following use of `ndindex` and +// `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: +// +// Ni, Nk = a.shape[:axis], a.shape[axis+1:] +// for ii in ndindex(Ni): +// for kk in ndindex(Nk): +// f = func1d(arr[ii + s_[:,] + kk]) +// Nj = f.shape +// for jj in ndindex(Nj): +// out[ii + jj + kk] = f[jj] +// +// Equivalently, eliminating the inner loop, this can be expressed as:: +// +// Ni, Nk = a.shape[:axis], a.shape[axis+1:] +// for ii in ndindex(Ni): +// for kk in ndindex(Nk): +// out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) +// +// Parameters +// ---------- +// func1d : function (M,) -> (Nj...) +// This function should accept 1-D arrays. It is applied to 1-D +// slices of `arr` along the specified axis. +// axis : integer +// Axis along which `arr` is sliced. +// arr : ndarray (Ni..., M, Nk...) +// Input array. +// args : any +// Additional arguments to `func1d`. +// kwargs : any +// Additional named arguments to `func1d`. +// +// .. versionadded:: 1.9.0 +// +// +// Returns +// ------- +// out : ndarray (Ni..., Nj..., Nk...) +// The output array. The shape of `out` is identical to the shape of +// `arr`, except along the `axis` dimension. This axis is removed, and +// replaced with new dimensions equal to the shape of the return value +// of `func1d`. So if `func1d` returns a scalar `out` will have one +// fewer dimensions than `arr`. +// +// See Also +// -------- +// apply_over_axes : Apply a function repeatedly over multiple axes. +// +// Examples +// -------- +// >>> def my_func(a): +// ... """Average first and last element of a 1-D array""" +// ... return (a[0] + a[-1]) * 0.5 +// >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) +// >>> np.apply_along_axis(my_func, 0, b) +// array([4., 5., 6.]) +// >>> np.apply_along_axis(my_func, 1, b) +// array([2., 5., 8.]) +// +// For a function that returns a 1D array, the number of dimensions in +// `outarr` is the same as `arr`. +// +// >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) +// >>> np.apply_along_axis(sorted, 1, b) +// array([[1, 7, 8], +// [3, 4, 9], +// [2, 5, 6]]) +// +// For a function that returns a higher dimensional array, those dimensions +// are inserted in place of the `axis` dimension. +// +// >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) +// >>> np.apply_along_axis(np.diag, -1, b) +// array([[[1, 0, 0], +// [0, 2, 0], +// [0, 0, 3]], +// [[4, 0, 0], +// [0, 5, 0], +// [0, 0, 6]], +// [[7, 0, 0], +// [0, 8, 0], +// [0, 0, 9]]]) +// +// +//go:linkname ApplyAlongAxis py.apply_along_axis +func ApplyAlongAxis(func1d *py.Object, axis *py.Object, arr *py.Object, __llgo_va_list ...interface{}) *py.Object +// +// Kronecker product of two arrays. +// +// Computes the Kronecker product, a composite array made of blocks of the +// second array scaled by the first. +// +// Parameters +// ---------- +// a, b : array_like +// +// Returns +// ------- +// out : ndarray +// +// See Also +// -------- +// outer : The outer product +// +// Notes +// ----- +// The function assumes that the number of dimensions of `a` and `b` +// are the same, if necessary prepending the smallest with ones. +// If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, +// the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. +// The elements are products of elements from `a` and `b`, organized +// explicitly by:: +// +// kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] +// +// where:: +// +// kt = it * st + jt, t = 0,...,N +// +// In the common 2-D case (N=1), the block structure can be visualized:: +// +// [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], +// [ ... ... ], +// [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] +// +// +// Examples +// -------- +// >>> np.kron([1,10,100], [5,6,7]) +// array([ 5, 6, 7, ..., 500, 600, 700]) +// >>> np.kron([5,6,7], [1,10,100]) +// array([ 5, 50, 500, ..., 7, 70, 700]) +// +// >>> np.kron(np.eye(2), np.ones((2,2))) +// array([[1., 1., 0., 0.], +// [1., 1., 0., 0.], +// [0., 0., 1., 1.], +// [0., 0., 1., 1.]]) +// +// >>> a = np.arange(100).reshape((2,5,2,5)) +// >>> b = np.arange(24).reshape((2,3,4)) +// >>> c = np.kron(a,b) +// >>> c.shape +// (2, 10, 6, 20) +// >>> I = (1,3,0,2) +// >>> J = (0,2,1) +// >>> J1 = (0,) + J # extend to ndim=4 +// >>> S1 = (1,) + b.shape +// >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) +// >>> c[K] == a[I]*b[J] +// True +// +// +// +//go:linkname Kron py.kron +func Kron(a *py.Object, b *py.Object) *py.Object +// +// Construct an array by repeating A the number of times given by reps. +// +// If `reps` has length ``d``, the result will have dimension of +// ``max(d, A.ndim)``. +// +// If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new +// axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, +// or shape (1, 1, 3) for 3-D replication. If this is not the desired +// behavior, promote `A` to d-dimensions manually before calling this +// function. +// +// If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. +// Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as +// (1, 1, 2, 2). +// +// Note : Although tile may be used for broadcasting, it is strongly +// recommended to use numpy's broadcasting operations and functions. +// +// Parameters +// ---------- +// A : array_like +// The input array. +// reps : array_like +// The number of repetitions of `A` along each axis. +// +// Returns +// ------- +// c : ndarray +// The tiled output array. +// +// See Also +// -------- +// repeat : Repeat elements of an array. +// broadcast_to : Broadcast an array to a new shape +// +// Examples +// -------- +// >>> a = np.array([0, 1, 2]) +// >>> np.tile(a, 2) +// array([0, 1, 2, 0, 1, 2]) +// >>> np.tile(a, (2, 2)) +// array([[0, 1, 2, 0, 1, 2], +// [0, 1, 2, 0, 1, 2]]) +// >>> np.tile(a, (2, 1, 2)) +// array([[[0, 1, 2, 0, 1, 2]], +// [[0, 1, 2, 0, 1, 2]]]) +// +// >>> b = np.array([[1, 2], [3, 4]]) +// >>> np.tile(b, 2) +// array([[1, 2, 1, 2], +// [3, 4, 3, 4]]) +// >>> np.tile(b, (2, 1)) +// array([[1, 2], +// [3, 4], +// [1, 2], +// [3, 4]]) +// +// >>> c = np.array([1,2,3,4]) +// >>> np.tile(c,(4,1)) +// array([[1, 2, 3, 4], +// [1, 2, 3, 4], +// [1, 2, 3, 4], +// [1, 2, 3, 4]]) +// +// +//go:linkname Tile py.tile +func Tile(A *py.Object, reps *py.Object) *py.Object +// Find the wrapper for the array with the highest priority. +// +// In case of ties, leftmost wins. If no wrapper is found, return None +// +// +//go:linkname GetArrayWrap py.get_array_wrap +func GetArrayWrap(__llgo_va_list ...interface{}) *py.Object +// +// Take values from the input array by matching 1d index and data slices. +// +// This iterates over matching 1d slices oriented along the specified axis in +// the index and data arrays, and uses the former to look up values in the +// latter. These slices can be different lengths. +// +// Functions returning an index along an axis, like `argsort` and +// `argpartition`, produce suitable indices for this function. +// +// .. versionadded:: 1.15.0 +// +// Parameters +// ---------- +// arr : ndarray (Ni..., M, Nk...) +// Source array +// indices : ndarray (Ni..., J, Nk...) +// Indices to take along each 1d slice of `arr`. This must match the +// dimension of arr, but dimensions Ni and Nj only need to broadcast +// against `arr`. +// axis : int +// The axis to take 1d slices along. If axis is None, the input array is +// treated as if it had first been flattened to 1d, for consistency with +// `sort` and `argsort`. +// +// Returns +// ------- +// out: ndarray (Ni..., J, Nk...) +// The indexed result. +// +// Notes +// ----- +// This is equivalent to (but faster than) the following use of `ndindex` and +// `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: +// +// Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] +// J = indices.shape[axis] # Need not equal M +// out = np.empty(Ni + (J,) + Nk) +// +// for ii in ndindex(Ni): +// for kk in ndindex(Nk): +// a_1d = a [ii + s_[:,] + kk] +// indices_1d = indices[ii + s_[:,] + kk] +// out_1d = out [ii + s_[:,] + kk] +// for j in range(J): +// out_1d[j] = a_1d[indices_1d[j]] +// +// Equivalently, eliminating the inner loop, the last two lines would be:: +// +// out_1d[:] = a_1d[indices_1d] +// +// See Also +// -------- +// take : Take along an axis, using the same indices for every 1d slice +// put_along_axis : +// Put values into the destination array by matching 1d index and data slices +// +// Examples +// -------- +// +// For this sample array +// +// >>> a = np.array([[10, 30, 20], [60, 40, 50]]) +// +// We can sort either by using sort directly, or argsort and this function +// +// >>> np.sort(a, axis=1) +// array([[10, 20, 30], +// [40, 50, 60]]) +// >>> ai = np.argsort(a, axis=1) +// >>> ai +// array([[0, 2, 1], +// [1, 2, 0]]) +// >>> np.take_along_axis(a, ai, axis=1) +// array([[10, 20, 30], +// [40, 50, 60]]) +// +// The same works for max and min, if you maintain the trivial dimension +// with ``keepdims``: +// +// >>> np.max(a, axis=1, keepdims=True) +// array([[30], +// [60]]) +// >>> ai = np.argmax(a, axis=1, keepdims=True) +// >>> ai +// array([[1], +// [0]]) +// >>> np.take_along_axis(a, ai, axis=1) +// array([[30], +// [60]]) +// +// If we want to get the max and min at the same time, we can stack the +// indices first +// +// >>> ai_min = np.argmin(a, axis=1, keepdims=True) +// >>> ai_max = np.argmax(a, axis=1, keepdims=True) +// >>> ai = np.concatenate([ai_min, ai_max], axis=1) +// >>> ai +// array([[0, 1], +// [1, 0]]) +// >>> np.take_along_axis(a, ai, axis=1) +// array([[10, 30], +// [40, 60]]) +// +// +//go:linkname TakeAlongAxis py.take_along_axis +func TakeAlongAxis(arr *py.Object, indices *py.Object, axis *py.Object) *py.Object +// +// Put values into the destination array by matching 1d index and data slices. +// +// This iterates over matching 1d slices oriented along the specified axis in +// the index and data arrays, and uses the former to place values into the +// latter. These slices can be different lengths. +// +// Functions returning an index along an axis, like `argsort` and +// `argpartition`, produce suitable indices for this function. +// +// .. versionadded:: 1.15.0 +// +// Parameters +// ---------- +// arr : ndarray (Ni..., M, Nk...) +// Destination array. +// indices : ndarray (Ni..., J, Nk...) +// Indices to change along each 1d slice of `arr`. This must match the +// dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast +// against `arr`. +// values : array_like (Ni..., J, Nk...) +// values to insert at those indices. Its shape and dimension are +// broadcast to match that of `indices`. +// axis : int +// The axis to take 1d slices along. If axis is None, the destination +// array is treated as if a flattened 1d view had been created of it. +// +// Notes +// ----- +// This is equivalent to (but faster than) the following use of `ndindex` and +// `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: +// +// Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] +// J = indices.shape[axis] # Need not equal M +// +// for ii in ndindex(Ni): +// for kk in ndindex(Nk): +// a_1d = a [ii + s_[:,] + kk] +// indices_1d = indices[ii + s_[:,] + kk] +// values_1d = values [ii + s_[:,] + kk] +// for j in range(J): +// a_1d[indices_1d[j]] = values_1d[j] +// +// Equivalently, eliminating the inner loop, the last two lines would be:: +// +// a_1d[indices_1d] = values_1d +// +// See Also +// -------- +// take_along_axis : +// Take values from the input array by matching 1d index and data slices +// +// Examples +// -------- +// +// For this sample array +// +// >>> a = np.array([[10, 30, 20], [60, 40, 50]]) +// +// We can replace the maximum values with: +// +// >>> ai = np.argmax(a, axis=1, keepdims=True) +// >>> ai +// array([[1], +// [0]]) +// >>> np.put_along_axis(a, ai, 99, axis=1) +// >>> a +// array([[10, 99, 20], +// [99, 40, 50]]) +// +// +// +//go:linkname PutAlongAxis py.put_along_axis +func PutAlongAxis(arr *py.Object, indices *py.Object, values *py.Object, axis *py.Object) *py.Object +// Broadcast an array to a new shape. +// +// Parameters +// ---------- +// array : array_like +// The array to broadcast. +// shape : tuple or int +// The shape of the desired array. A single integer ``i`` is interpreted +// as ``(i,)``. +// subok : bool, optional +// If True, then sub-classes will be passed-through, otherwise +// the returned array will be forced to be a base-class array (default). +// +// Returns +// ------- +// broadcast : array +// A readonly view on the original array with the given shape. It is +// typically not contiguous. Furthermore, more than one element of a +// broadcasted array may refer to a single memory location. +// +// Raises +// ------ +// ValueError +// If the array is not compatible with the new shape according to NumPy's +// broadcasting rules. +// +// See Also +// -------- +// broadcast +// broadcast_arrays +// broadcast_shapes +// +// Notes +// ----- +// .. versionadded:: 1.10.0 +// +// Examples +// -------- +// >>> x = np.array([1, 2, 3]) +// >>> np.broadcast_to(x, (3, 3)) +// array([[1, 2, 3], +// [1, 2, 3], +// [1, 2, 3]]) +// +// +//go:linkname BroadcastTo py.broadcast_to +func BroadcastTo(array *py.Object, shape *py.Object, subok *py.Object) *py.Object +// +// Broadcast any number of arrays against each other. +// +// Parameters +// ---------- +// `*args` : array_likes +// The arrays to broadcast. +// +// subok : bool, optional +// If True, then sub-classes will be passed-through, otherwise +// the returned arrays will be forced to be a base-class array (default). +// +// Returns +// ------- +// broadcasted : list of arrays +// These arrays are views on the original arrays. They are typically +// not contiguous. Furthermore, more than one element of a +// broadcasted array may refer to a single memory location. If you need +// to write to the arrays, make copies first. While you can set the +// ``writable`` flag True, writing to a single output value may end up +// changing more than one location in the output array. +// +// .. deprecated:: 1.17 +// The output is currently marked so that if written to, a deprecation +// warning will be emitted. A future version will set the +// ``writable`` flag False so writing to it will raise an error. +// +// See Also +// -------- +// broadcast +// broadcast_to +// broadcast_shapes +// +// Examples +// -------- +// >>> x = np.array([[1,2,3]]) +// >>> y = np.array([[4],[5]]) +// >>> np.broadcast_arrays(x, y) +// [array([[1, 2, 3], +// [1, 2, 3]]), array([[4, 4, 4], +// [5, 5, 5]])] +// +// Here is a useful idiom for getting contiguous copies instead of +// non-contiguous views. +// +// >>> [np.array(a) for a in np.broadcast_arrays(x, y)] +// [array([[1, 2, 3], +// [1, 2, 3]]), array([[4, 4, 4], +// [5, 5, 5]])] +// +// +// +//go:linkname BroadcastArrays py.broadcast_arrays +func BroadcastArrays(__llgo_va_list ...interface{}) *py.Object +// +// Broadcast the input shapes into a single shape. +// +// :ref:`Learn more about broadcasting here `. +// +// .. versionadded:: 1.20.0 +// +// Parameters +// ---------- +// `*args` : tuples of ints, or ints +// The shapes to be broadcast against each other. +// +// Returns +// ------- +// tuple +// Broadcasted shape. +// +// Raises +// ------ +// ValueError +// If the shapes are not compatible and cannot be broadcast according +// to NumPy's broadcasting rules. +// +// See Also +// -------- +// broadcast +// broadcast_arrays +// broadcast_to +// +// Examples +// -------- +// >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) +// (3, 2) +// +// >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) +// (5, 6, 7) +// +// +//go:linkname BroadcastShapes py.broadcast_shapes +func BroadcastShapes(__llgo_va_list ...interface{}) *py.Object +// +// Extract a diagonal or construct a diagonal array. +// +// See the more detailed documentation for ``numpy.diagonal`` if you use this +// function to extract a diagonal and wish to write to the resulting array; +// whether it returns a copy or a view depends on what version of numpy you +// are using. +// +// Parameters +// ---------- +// v : array_like +// If `v` is a 2-D array, return a copy of its `k`-th diagonal. +// If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th +// diagonal. +// k : int, optional +// Diagonal in question. The default is 0. Use `k>0` for diagonals +// above the main diagonal, and `k<0` for diagonals below the main +// diagonal. +// +// Returns +// ------- +// out : ndarray +// The extracted diagonal or constructed diagonal array. +// +// See Also +// -------- +// diagonal : Return specified diagonals. +// diagflat : Create a 2-D array with the flattened input as a diagonal. +// trace : Sum along diagonals. +// triu : Upper triangle of an array. +// tril : Lower triangle of an array. +// +// Examples +// -------- +// >>> x = np.arange(9).reshape((3,3)) +// >>> x +// array([[0, 1, 2], +// [3, 4, 5], +// [6, 7, 8]]) +// +// >>> np.diag(x) +// array([0, 4, 8]) +// >>> np.diag(x, k=1) +// array([1, 5]) +// >>> np.diag(x, k=-1) +// array([3, 7]) +// +// >>> np.diag(np.diag(x)) +// array([[0, 0, 0], +// [0, 4, 0], +// [0, 0, 8]]) +// +// +// +//go:linkname Diag py.diag +func Diag(v *py.Object, k *py.Object) *py.Object +// +// Create a two-dimensional array with the flattened input as a diagonal. +// +// Parameters +// ---------- +// v : array_like +// Input data, which is flattened and set as the `k`-th +// diagonal of the output. +// k : int, optional +// Diagonal to set; 0, the default, corresponds to the "main" diagonal, +// a positive (negative) `k` giving the number of the diagonal above +// (below) the main. +// +// Returns +// ------- +// out : ndarray +// The 2-D output array. +// +// See Also +// -------- +// diag : MATLAB work-alike for 1-D and 2-D arrays. +// diagonal : Return specified diagonals. +// trace : Sum along diagonals. +// +// Examples +// -------- +// >>> np.diagflat([[1,2], [3,4]]) +// array([[1, 0, 0, 0], +// [0, 2, 0, 0], +// [0, 0, 3, 0], +// [0, 0, 0, 4]]) +// +// >>> np.diagflat([1,2], 1) +// array([[0, 1, 0], +// [0, 0, 2], +// [0, 0, 0]]) +// +// +// +//go:linkname Diagflat py.diagflat +func Diagflat(v *py.Object, k *py.Object) *py.Object +// +// Return a 2-D array with ones on the diagonal and zeros elsewhere. +// +// Parameters +// ---------- +// N : int +// Number of rows in the output. +// M : int, optional +// Number of columns in the output. If None, defaults to `N`. +// k : int, optional +// Index of the diagonal: 0 (the default) refers to the main diagonal, +// a positive value refers to an upper diagonal, and a negative value +// to a lower diagonal. +// dtype : data-type, optional +// Data-type of the returned array. +// order : {'C', 'F'}, optional +// Whether the output should be stored in row-major (C-style) or +// column-major (Fortran-style) order in memory. +// +// .. versionadded:: 1.14.0 +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// I : ndarray of shape (N,M) +// An array where all elements are equal to zero, except for the `k`-th +// diagonal, whose values are equal to one. +// +// See Also +// -------- +// identity : (almost) equivalent function +// diag : diagonal 2-D array from a 1-D array specified by the user. +// +// Examples +// -------- +// >>> np.eye(2, dtype=int) +// array([[1, 0], +// [0, 1]]) +// >>> np.eye(3, k=1) +// array([[0., 1., 0.], +// [0., 0., 1.], +// [0., 0., 0.]]) +// +// +// +//go:linkname Eye py.eye +func Eye(N *py.Object, M *py.Object, k *py.Object, dtype *py.Object, order *py.Object) *py.Object +// +// Reverse the order of elements along axis 1 (left/right). +// +// For a 2-D array, this flips the entries in each row in the left/right +// direction. Columns are preserved, but appear in a different order than +// before. +// +// Parameters +// ---------- +// m : array_like +// Input array, must be at least 2-D. +// +// Returns +// ------- +// f : ndarray +// A view of `m` with the columns reversed. Since a view +// is returned, this operation is :math:`\mathcal O(1)`. +// +// See Also +// -------- +// flipud : Flip array in the up/down direction. +// flip : Flip array in one or more dimensions. +// rot90 : Rotate array counterclockwise. +// +// Notes +// ----- +// Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. +// Requires the array to be at least 2-D. +// +// Examples +// -------- +// >>> A = np.diag([1.,2.,3.]) +// >>> A +// array([[1., 0., 0.], +// [0., 2., 0.], +// [0., 0., 3.]]) +// >>> np.fliplr(A) +// array([[0., 0., 1.], +// [0., 2., 0.], +// [3., 0., 0.]]) +// +// >>> A = np.random.randn(2,3,5) +// >>> np.all(np.fliplr(A) == A[:,::-1,...]) +// True +// +// +// +//go:linkname Fliplr py.fliplr +func Fliplr(m *py.Object) *py.Object +// +// Reverse the order of elements along axis 0 (up/down). +// +// For a 2-D array, this flips the entries in each column in the up/down +// direction. Rows are preserved, but appear in a different order than before. +// +// Parameters +// ---------- +// m : array_like +// Input array. +// +// Returns +// ------- +// out : array_like +// A view of `m` with the rows reversed. Since a view is +// returned, this operation is :math:`\mathcal O(1)`. +// +// See Also +// -------- +// fliplr : Flip array in the left/right direction. +// flip : Flip array in one or more dimensions. +// rot90 : Rotate array counterclockwise. +// +// Notes +// ----- +// Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. +// Requires the array to be at least 1-D. +// +// Examples +// -------- +// >>> A = np.diag([1.0, 2, 3]) +// >>> A +// array([[1., 0., 0.], +// [0., 2., 0.], +// [0., 0., 3.]]) +// >>> np.flipud(A) +// array([[0., 0., 3.], +// [0., 2., 0.], +// [1., 0., 0.]]) +// +// >>> A = np.random.randn(2,3,5) +// >>> np.all(np.flipud(A) == A[::-1,...]) +// True +// +// >>> np.flipud([1,2]) +// array([2, 1]) +// +// +// +//go:linkname Flipud py.flipud +func Flipud(m *py.Object) *py.Object +// +// An array with ones at and below the given diagonal and zeros elsewhere. +// +// Parameters +// ---------- +// N : int +// Number of rows in the array. +// M : int, optional +// Number of columns in the array. +// By default, `M` is taken equal to `N`. +// k : int, optional +// The sub-diagonal at and below which the array is filled. +// `k` = 0 is the main diagonal, while `k` < 0 is below it, +// and `k` > 0 is above. The default is 0. +// dtype : dtype, optional +// Data type of the returned array. The default is float. +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// tri : ndarray of shape (N, M) +// Array with its lower triangle filled with ones and zero elsewhere; +// in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. +// +// Examples +// -------- +// >>> np.tri(3, 5, 2, dtype=int) +// array([[1, 1, 1, 0, 0], +// [1, 1, 1, 1, 0], +// [1, 1, 1, 1, 1]]) +// +// >>> np.tri(3, 5, -1) +// array([[0., 0., 0., 0., 0.], +// [1., 0., 0., 0., 0.], +// [1., 1., 0., 0., 0.]]) +// +// +// +//go:linkname Tri py.tri +func Tri(N *py.Object, M *py.Object, k *py.Object, dtype *py.Object) *py.Object +// +// Upper triangle of an array. +// +// Return a copy of an array with the elements below the `k`-th diagonal +// zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the +// final two axes. +// +// Please refer to the documentation for `tril` for further details. +// +// See Also +// -------- +// tril : lower triangle of an array +// +// Examples +// -------- +// >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) +// array([[ 1, 2, 3], +// [ 4, 5, 6], +// [ 0, 8, 9], +// [ 0, 0, 12]]) +// +// >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) +// array([[[ 0, 1, 2, 3, 4], +// [ 0, 6, 7, 8, 9], +// [ 0, 0, 12, 13, 14], +// [ 0, 0, 0, 18, 19]], +// [[20, 21, 22, 23, 24], +// [ 0, 26, 27, 28, 29], +// [ 0, 0, 32, 33, 34], +// [ 0, 0, 0, 38, 39]], +// [[40, 41, 42, 43, 44], +// [ 0, 46, 47, 48, 49], +// [ 0, 0, 52, 53, 54], +// [ 0, 0, 0, 58, 59]]]) +// +// +// +//go:linkname Triu py.triu +func Triu(m *py.Object, k *py.Object) *py.Object +// +// Lower triangle of an array. +// +// Return a copy of an array with elements above the `k`-th diagonal zeroed. +// For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two +// axes. +// +// Parameters +// ---------- +// m : array_like, shape (..., M, N) +// Input array. +// k : int, optional +// Diagonal above which to zero elements. `k = 0` (the default) is the +// main diagonal, `k < 0` is below it and `k > 0` is above. +// +// Returns +// ------- +// tril : ndarray, shape (..., M, N) +// Lower triangle of `m`, of same shape and data-type as `m`. +// +// See Also +// -------- +// triu : same thing, only for the upper triangle +// +// Examples +// -------- +// >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) +// array([[ 0, 0, 0], +// [ 4, 0, 0], +// [ 7, 8, 0], +// [10, 11, 12]]) +// +// >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) +// array([[[ 0, 0, 0, 0, 0], +// [ 5, 6, 0, 0, 0], +// [10, 11, 12, 0, 0], +// [15, 16, 17, 18, 0]], +// [[20, 0, 0, 0, 0], +// [25, 26, 0, 0, 0], +// [30, 31, 32, 0, 0], +// [35, 36, 37, 38, 0]], +// [[40, 0, 0, 0, 0], +// [45, 46, 0, 0, 0], +// [50, 51, 52, 0, 0], +// [55, 56, 57, 58, 0]]]) +// +// +// +//go:linkname Tril py.tril +func Tril(m *py.Object, k *py.Object) *py.Object +// +// Generate a Vandermonde matrix. +// +// The columns of the output matrix are powers of the input vector. The +// order of the powers is determined by the `increasing` boolean argument. +// Specifically, when `increasing` is False, the `i`-th output column is +// the input vector raised element-wise to the power of ``N - i - 1``. Such +// a matrix with a geometric progression in each row is named for Alexandre- +// Theophile Vandermonde. +// +// Parameters +// ---------- +// x : array_like +// 1-D input array. +// N : int, optional +// Number of columns in the output. If `N` is not specified, a square +// array is returned (``N = len(x)``). +// increasing : bool, optional +// Order of the powers of the columns. If True, the powers increase +// from left to right, if False (the default) they are reversed. +// +// .. versionadded:: 1.9.0 +// +// Returns +// ------- +// out : ndarray +// Vandermonde matrix. If `increasing` is False, the first column is +// ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is +// True, the columns are ``x^0, x^1, ..., x^(N-1)``. +// +// See Also +// -------- +// polynomial.polynomial.polyvander +// +// Examples +// -------- +// >>> x = np.array([1, 2, 3, 5]) +// >>> N = 3 +// >>> np.vander(x, N) +// array([[ 1, 1, 1], +// [ 4, 2, 1], +// [ 9, 3, 1], +// [25, 5, 1]]) +// +// >>> np.column_stack([x**(N-1-i) for i in range(N)]) +// array([[ 1, 1, 1], +// [ 4, 2, 1], +// [ 9, 3, 1], +// [25, 5, 1]]) +// +// >>> x = np.array([1, 2, 3, 5]) +// >>> np.vander(x) +// array([[ 1, 1, 1, 1], +// [ 8, 4, 2, 1], +// [ 27, 9, 3, 1], +// [125, 25, 5, 1]]) +// >>> np.vander(x, increasing=True) +// array([[ 1, 1, 1, 1], +// [ 1, 2, 4, 8], +// [ 1, 3, 9, 27], +// [ 1, 5, 25, 125]]) +// +// The determinant of a square Vandermonde matrix is the product +// of the differences between the values of the input vector: +// +// >>> np.linalg.det(np.vander(x)) +// 48.000000000000043 # may vary +// >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) +// 48 +// +// +// +//go:linkname Vander py.vander +func Vander(x *py.Object, N *py.Object, increasing *py.Object) *py.Object +// +// Compute the bi-dimensional histogram of two data samples. +// +// Parameters +// ---------- +// x : array_like, shape (N,) +// An array containing the x coordinates of the points to be +// histogrammed. +// y : array_like, shape (N,) +// An array containing the y coordinates of the points to be +// histogrammed. +// bins : int or array_like or [int, int] or [array, array], optional +// The bin specification: +// +// * If int, the number of bins for the two dimensions (nx=ny=bins). +// * If array_like, the bin edges for the two dimensions +// (x_edges=y_edges=bins). +// * If [int, int], the number of bins in each dimension +// (nx, ny = bins). +// * If [array, array], the bin edges in each dimension +// (x_edges, y_edges = bins). +// * A combination [int, array] or [array, int], where int +// is the number of bins and array is the bin edges. +// +// range : array_like, shape(2,2), optional +// The leftmost and rightmost edges of the bins along each dimension +// (if not specified explicitly in the `bins` parameters): +// ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range +// will be considered outliers and not tallied in the histogram. +// density : bool, optional +// If False, the default, returns the number of samples in each bin. +// If True, returns the probability *density* function at the bin, +// ``bin_count / sample_count / bin_area``. +// weights : array_like, shape(N,), optional +// An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. +// Weights are normalized to 1 if `density` is True. If `density` is +// False, the values of the returned histogram are equal to the sum of +// the weights belonging to the samples falling into each bin. +// +// Returns +// ------- +// H : ndarray, shape(nx, ny) +// The bi-dimensional histogram of samples `x` and `y`. Values in `x` +// are histogrammed along the first dimension and values in `y` are +// histogrammed along the second dimension. +// xedges : ndarray, shape(nx+1,) +// The bin edges along the first dimension. +// yedges : ndarray, shape(ny+1,) +// The bin edges along the second dimension. +// +// See Also +// -------- +// histogram : 1D histogram +// histogramdd : Multidimensional histogram +// +// Notes +// ----- +// When `density` is True, then the returned histogram is the sample +// density, defined such that the sum over bins of the product +// ``bin_value * bin_area`` is 1. +// +// Please note that the histogram does not follow the Cartesian convention +// where `x` values are on the abscissa and `y` values on the ordinate +// axis. Rather, `x` is histogrammed along the first dimension of the +// array (vertical), and `y` along the second dimension of the array +// (horizontal). This ensures compatibility with `histogramdd`. +// +// Examples +// -------- +// >>> from matplotlib.image import NonUniformImage +// >>> import matplotlib.pyplot as plt +// +// Construct a 2-D histogram with variable bin width. First define the bin +// edges: +// +// >>> xedges = [0, 1, 3, 5] +// >>> yedges = [0, 2, 3, 4, 6] +// +// Next we create a histogram H with random bin content: +// +// >>> x = np.random.normal(2, 1, 100) +// >>> y = np.random.normal(1, 1, 100) +// >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) +// >>> # Histogram does not follow Cartesian convention (see Notes), +// >>> # therefore transpose H for visualization purposes. +// >>> H = H.T +// +// :func:`imshow ` can only display square bins: +// +// >>> fig = plt.figure(figsize=(7, 3)) +// >>> ax = fig.add_subplot(131, title='imshow: square bins') +// >>> plt.imshow(H, interpolation='nearest', origin='lower', +// ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) +// +// +// :func:`pcolormesh ` can display actual edges: +// +// >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', +// ... aspect='equal') +// >>> X, Y = np.meshgrid(xedges, yedges) +// >>> ax.pcolormesh(X, Y, H) +// +// +// :class:`NonUniformImage ` can be used to +// display actual bin edges with interpolation: +// +// >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', +// ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) +// >>> im = NonUniformImage(ax, interpolation='bilinear') +// >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 +// >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 +// >>> im.set_data(xcenters, ycenters, H) +// >>> ax.add_image(im) +// >>> plt.show() +// +// It is also possible to construct a 2-D histogram without specifying bin +// edges: +// +// >>> # Generate non-symmetric test data +// >>> n = 10000 +// >>> x = np.linspace(1, 100, n) +// >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 +// >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges +// >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) +// +// Now we can plot the histogram using +// :func:`pcolormesh `, and a +// :func:`hexbin ` for comparison. +// +// >>> # Plot histogram using pcolormesh +// >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) +// >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') +// >>> ax1.plot(x, 2*np.log(x), 'k-') +// >>> ax1.set_xlim(x.min(), x.max()) +// >>> ax1.set_ylim(y.min(), y.max()) +// >>> ax1.set_xlabel('x') +// >>> ax1.set_ylabel('y') +// >>> ax1.set_title('histogram2d') +// >>> ax1.grid() +// +// >>> # Create hexbin plot for comparison +// >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') +// >>> ax2.plot(x, 2*np.log(x), 'k-') +// >>> ax2.set_title('hexbin') +// >>> ax2.set_xlim(x.min(), x.max()) +// >>> ax2.set_xlabel('x') +// >>> ax2.grid() +// +// >>> plt.show() +// +// +//go:linkname Histogram2d py.histogram2d +func Histogram2d(x *py.Object, y *py.Object, bins *py.Object, range_ *py.Object, density *py.Object, weights *py.Object) *py.Object +// +// Return the indices to access (n, n) arrays, given a masking function. +// +// Assume `mask_func` is a function that, for a square array a of size +// ``(n, n)`` with a possible offset argument `k`, when called as +// ``mask_func(a, k)`` returns a new array with zeros in certain locations +// (functions like `triu` or `tril` do precisely this). Then this function +// returns the indices where the non-zero values would be located. +// +// Parameters +// ---------- +// n : int +// The returned indices will be valid to access arrays of shape (n, n). +// mask_func : callable +// A function whose call signature is similar to that of `triu`, `tril`. +// That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. +// `k` is an optional argument to the function. +// k : scalar +// An optional argument which is passed through to `mask_func`. Functions +// like `triu`, `tril` take a second argument that is interpreted as an +// offset. +// +// Returns +// ------- +// indices : tuple of arrays. +// The `n` arrays of indices corresponding to the locations where +// ``mask_func(np.ones((n, n)), k)`` is True. +// +// See Also +// -------- +// triu, tril, triu_indices, tril_indices +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// These are the indices that would allow you to access the upper triangular +// part of any 3x3 array: +// +// >>> iu = np.mask_indices(3, np.triu) +// +// For example, if `a` is a 3x3 array: +// +// >>> a = np.arange(9).reshape(3, 3) +// >>> a +// array([[0, 1, 2], +// [3, 4, 5], +// [6, 7, 8]]) +// >>> a[iu] +// array([0, 1, 2, 4, 5, 8]) +// +// An offset can be passed also to the masking function. This gets us the +// indices starting on the first diagonal right of the main one: +// +// >>> iu1 = np.mask_indices(3, np.triu, 1) +// +// with which we now extract only three elements: +// +// >>> a[iu1] +// array([1, 2, 5]) +// +// +// +//go:linkname MaskIndices py.mask_indices +func MaskIndices(n *py.Object, maskFunc *py.Object, k *py.Object) *py.Object +// +// Return the indices for the lower-triangle of an (n, m) array. +// +// Parameters +// ---------- +// n : int +// The row dimension of the arrays for which the returned +// indices will be valid. +// k : int, optional +// Diagonal offset (see `tril` for details). +// m : int, optional +// .. versionadded:: 1.9.0 +// +// The column dimension of the arrays for which the returned +// arrays will be valid. +// By default `m` is taken equal to `n`. +// +// +// Returns +// ------- +// inds : tuple of arrays +// The indices for the triangle. The returned tuple contains two arrays, +// each with the indices along one dimension of the array. +// +// See also +// -------- +// triu_indices : similar function, for upper-triangular. +// mask_indices : generic function accepting an arbitrary mask function. +// tril, triu +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// Compute two different sets of indices to access 4x4 arrays, one for the +// lower triangular part starting at the main diagonal, and one starting two +// diagonals further right: +// +// >>> il1 = np.tril_indices(4) +// >>> il2 = np.tril_indices(4, 2) +// +// Here is how they can be used with a sample array: +// +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// +// Both for indexing: +// +// >>> a[il1] +// array([ 0, 4, 5, ..., 13, 14, 15]) +// +// And for assigning values: +// +// >>> a[il1] = -1 +// >>> a +// array([[-1, 1, 2, 3], +// [-1, -1, 6, 7], +// [-1, -1, -1, 11], +// [-1, -1, -1, -1]]) +// +// These cover almost the whole array (two diagonals right of the main one): +// +// >>> a[il2] = -10 +// >>> a +// array([[-10, -10, -10, 3], +// [-10, -10, -10, -10], +// [-10, -10, -10, -10], +// [-10, -10, -10, -10]]) +// +// +// +//go:linkname TrilIndices py.tril_indices +func TrilIndices(n *py.Object, k *py.Object, m *py.Object) *py.Object +// +// Return the indices for the lower-triangle of arr. +// +// See `tril_indices` for full details. +// +// Parameters +// ---------- +// arr : array_like +// The indices will be valid for square arrays whose dimensions are +// the same as arr. +// k : int, optional +// Diagonal offset (see `tril` for details). +// +// Examples +// -------- +// +// Create a 4 by 4 array. +// +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// +// Pass the array to get the indices of the lower triangular elements. +// +// >>> trili = np.tril_indices_from(a) +// >>> trili +// (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) +// +// >>> a[trili] +// array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) +// +// This is syntactic sugar for tril_indices(). +// +// >>> np.tril_indices(a.shape[0]) +// (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) +// +// Use the `k` parameter to return the indices for the lower triangular array +// up to the k-th diagonal. +// +// >>> trili1 = np.tril_indices_from(a, k=1) +// >>> a[trili1] +// array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) +// +// See Also +// -------- +// tril_indices, tril, triu_indices_from +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// +// +//go:linkname TrilIndicesFrom py.tril_indices_from +func TrilIndicesFrom(arr *py.Object, k *py.Object) *py.Object +// +// Return the indices for the upper-triangle of an (n, m) array. +// +// Parameters +// ---------- +// n : int +// The size of the arrays for which the returned indices will +// be valid. +// k : int, optional +// Diagonal offset (see `triu` for details). +// m : int, optional +// .. versionadded:: 1.9.0 +// +// The column dimension of the arrays for which the returned +// arrays will be valid. +// By default `m` is taken equal to `n`. +// +// +// Returns +// ------- +// inds : tuple, shape(2) of ndarrays, shape(`n`) +// The indices for the triangle. The returned tuple contains two arrays, +// each with the indices along one dimension of the array. Can be used +// to slice a ndarray of shape(`n`, `n`). +// +// See also +// -------- +// tril_indices : similar function, for lower-triangular. +// mask_indices : generic function accepting an arbitrary mask function. +// triu, tril +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// Compute two different sets of indices to access 4x4 arrays, one for the +// upper triangular part starting at the main diagonal, and one starting two +// diagonals further right: +// +// >>> iu1 = np.triu_indices(4) +// >>> iu2 = np.triu_indices(4, 2) +// +// Here is how they can be used with a sample array: +// +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// +// Both for indexing: +// +// >>> a[iu1] +// array([ 0, 1, 2, ..., 10, 11, 15]) +// +// And for assigning values: +// +// >>> a[iu1] = -1 +// >>> a +// array([[-1, -1, -1, -1], +// [ 4, -1, -1, -1], +// [ 8, 9, -1, -1], +// [12, 13, 14, -1]]) +// +// These cover only a small part of the whole array (two diagonals right +// of the main one): +// +// >>> a[iu2] = -10 +// >>> a +// array([[ -1, -1, -10, -10], +// [ 4, -1, -1, -10], +// [ 8, 9, -1, -1], +// [ 12, 13, 14, -1]]) +// +// +// +//go:linkname TriuIndices py.triu_indices +func TriuIndices(n *py.Object, k *py.Object, m *py.Object) *py.Object +// +// Return the indices for the upper-triangle of arr. +// +// See `triu_indices` for full details. +// +// Parameters +// ---------- +// arr : ndarray, shape(N, N) +// The indices will be valid for square arrays. +// k : int, optional +// Diagonal offset (see `triu` for details). +// +// Returns +// ------- +// triu_indices_from : tuple, shape(2) of ndarray, shape(N) +// Indices for the upper-triangle of `arr`. +// +// Examples +// -------- +// +// Create a 4 by 4 array. +// +// >>> a = np.arange(16).reshape(4, 4) +// >>> a +// array([[ 0, 1, 2, 3], +// [ 4, 5, 6, 7], +// [ 8, 9, 10, 11], +// [12, 13, 14, 15]]) +// +// Pass the array to get the indices of the upper triangular elements. +// +// >>> triui = np.triu_indices_from(a) +// >>> triui +// (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) +// +// >>> a[triui] +// array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) +// +// This is syntactic sugar for triu_indices(). +// +// >>> np.triu_indices(a.shape[0]) +// (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) +// +// Use the `k` parameter to return the indices for the upper triangular array +// from the k-th diagonal. +// +// >>> triuim1 = np.triu_indices_from(a, k=1) +// >>> a[triuim1] +// array([ 1, 2, 3, 6, 7, 11]) +// +// +// See Also +// -------- +// triu_indices, triu, tril_indices_from +// +// Notes +// ----- +// .. versionadded:: 1.4.0 +// +// +// +//go:linkname TriuIndicesFrom py.triu_indices_from +func TriuIndicesFrom(arr *py.Object, k *py.Object) *py.Object +// +// Round to nearest integer towards zero. +// +// Round an array of floats element-wise to nearest integer towards zero. +// The rounded values are returned as floats. +// +// Parameters +// ---------- +// x : array_like +// An array of floats to be rounded +// out : ndarray, optional +// A location into which the result is stored. If provided, it must have +// a shape that the input broadcasts to. If not provided or None, a +// freshly-allocated array is returned. +// +// Returns +// ------- +// out : ndarray of floats +// A float array with the same dimensions as the input. +// If second argument is not supplied then a float array is returned +// with the rounded values. +// +// If a second argument is supplied the result is stored there. +// The return value `out` is then a reference to that array. +// +// See Also +// -------- +// rint, trunc, floor, ceil +// around : Round to given number of decimals +// +// Examples +// -------- +// >>> np.fix(3.14) +// 3.0 +// >>> np.fix(3) +// 3.0 +// >>> np.fix([2.1, 2.9, -2.1, -2.9]) +// array([ 2., 2., -2., -2.]) +// +// +// +//go:linkname Fix py.fix +func Fix(x *py.Object, out *py.Object) *py.Object +// +// Test element-wise for negative infinity, return result as bool array. +// +// Parameters +// ---------- +// x : array_like +// The input array. +// out : array_like, optional +// A location into which the result is stored. If provided, it must have a +// shape that the input broadcasts to. If not provided or None, a +// freshly-allocated boolean array is returned. +// +// Returns +// ------- +// out : ndarray +// A boolean array with the same dimensions as the input. +// If second argument is not supplied then a numpy boolean array is +// returned with values True where the corresponding element of the +// input is negative infinity and values False where the element of +// the input is not negative infinity. +// +// If a second argument is supplied the result is stored there. If the +// type of that array is a numeric type the result is represented as +// zeros and ones, if the type is boolean then as False and True. The +// return value `out` is then a reference to that array. +// +// See Also +// -------- +// isinf, isposinf, isnan, isfinite +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). +// +// Errors result if the second argument is also supplied when x is a scalar +// input, if first and second arguments have different shapes, or if the +// first argument has complex values. +// +// Examples +// -------- +// >>> np.isneginf(np.NINF) +// True +// >>> np.isneginf(np.inf) +// False +// >>> np.isneginf(np.PINF) +// False +// >>> np.isneginf([-np.inf, 0., np.inf]) +// array([ True, False, False]) +// +// >>> x = np.array([-np.inf, 0., np.inf]) +// >>> y = np.array([2, 2, 2]) +// >>> np.isneginf(x, y) +// array([1, 0, 0]) +// >>> y +// array([1, 0, 0]) +// +// +// +//go:linkname Isneginf py.isneginf +func Isneginf(x *py.Object, out *py.Object) *py.Object +// +// Test element-wise for positive infinity, return result as bool array. +// +// Parameters +// ---------- +// x : array_like +// The input array. +// out : array_like, optional +// A location into which the result is stored. If provided, it must have a +// shape that the input broadcasts to. If not provided or None, a +// freshly-allocated boolean array is returned. +// +// Returns +// ------- +// out : ndarray +// A boolean array with the same dimensions as the input. +// If second argument is not supplied then a boolean array is returned +// with values True where the corresponding element of the input is +// positive infinity and values False where the element of the input is +// not positive infinity. +// +// If a second argument is supplied the result is stored there. If the +// type of that array is a numeric type the result is represented as zeros +// and ones, if the type is boolean then as False and True. +// The return value `out` is then a reference to that array. +// +// See Also +// -------- +// isinf, isneginf, isfinite, isnan +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). +// +// Errors result if the second argument is also supplied when x is a scalar +// input, if first and second arguments have different shapes, or if the +// first argument has complex values +// +// Examples +// -------- +// >>> np.isposinf(np.PINF) +// True +// >>> np.isposinf(np.inf) +// True +// >>> np.isposinf(np.NINF) +// False +// >>> np.isposinf([-np.inf, 0., np.inf]) +// array([False, False, True]) +// +// >>> x = np.array([-np.inf, 0., np.inf]) +// >>> y = np.array([2, 2, 2]) +// >>> np.isposinf(x, y) +// array([0, 0, 1]) +// >>> y +// array([0, 0, 1]) +// +// +// +//go:linkname Isposinf py.isposinf +func Isposinf(x *py.Object, out *py.Object) *py.Object +// +// Pad an array. +// +// Parameters +// ---------- +// array : array_like of rank N +// The array to pad. +// pad_width : {sequence, array_like, int} +// Number of values padded to the edges of each axis. +// ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths +// for each axis. +// ``(before, after)`` or ``((before, after),)`` yields same before +// and after pad for each axis. +// ``(pad,)`` or ``int`` is a shortcut for before = after = pad width +// for all axes. +// mode : str or function, optional +// One of the following string values or a user supplied function. +// +// 'constant' (default) +// Pads with a constant value. +// 'edge' +// Pads with the edge values of array. +// 'linear_ramp' +// Pads with the linear ramp between end_value and the +// array edge value. +// 'maximum' +// Pads with the maximum value of all or part of the +// vector along each axis. +// 'mean' +// Pads with the mean value of all or part of the +// vector along each axis. +// 'median' +// Pads with the median value of all or part of the +// vector along each axis. +// 'minimum' +// Pads with the minimum value of all or part of the +// vector along each axis. +// 'reflect' +// Pads with the reflection of the vector mirrored on +// the first and last values of the vector along each +// axis. +// 'symmetric' +// Pads with the reflection of the vector mirrored +// along the edge of the array. +// 'wrap' +// Pads with the wrap of the vector along the axis. +// The first values are used to pad the end and the +// end values are used to pad the beginning. +// 'empty' +// Pads with undefined values. +// +// .. versionadded:: 1.17 +// +// +// Padding function, see Notes. +// stat_length : sequence or int, optional +// Used in 'maximum', 'mean', 'median', and 'minimum'. Number of +// values at edge of each axis used to calculate the statistic value. +// +// ``((before_1, after_1), ... (before_N, after_N))`` unique statistic +// lengths for each axis. +// +// ``(before, after)`` or ``((before, after),)`` yields same before +// and after statistic lengths for each axis. +// +// ``(stat_length,)`` or ``int`` is a shortcut for +// ``before = after = statistic`` length for all axes. +// +// Default is ``None``, to use the entire axis. +// constant_values : sequence or scalar, optional +// Used in 'constant'. The values to set the padded values for each +// axis. +// +// ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants +// for each axis. +// +// ``(before, after)`` or ``((before, after),)`` yields same before +// and after constants for each axis. +// +// ``(constant,)`` or ``constant`` is a shortcut for +// ``before = after = constant`` for all axes. +// +// Default is 0. +// end_values : sequence or scalar, optional +// Used in 'linear_ramp'. The values used for the ending value of the +// linear_ramp and that will form the edge of the padded array. +// +// ``((before_1, after_1), ... (before_N, after_N))`` unique end values +// for each axis. +// +// ``(before, after)`` or ``((before, after),)`` yields same before +// and after end values for each axis. +// +// ``(constant,)`` or ``constant`` is a shortcut for +// ``before = after = constant`` for all axes. +// +// Default is 0. +// reflect_type : {'even', 'odd'}, optional +// Used in 'reflect', and 'symmetric'. The 'even' style is the +// default with an unaltered reflection around the edge value. For +// the 'odd' style, the extended part of the array is created by +// subtracting the reflected values from two times the edge value. +// +// Returns +// ------- +// pad : ndarray +// Padded array of rank equal to `array` with shape increased +// according to `pad_width`. +// +// Notes +// ----- +// .. versionadded:: 1.7.0 +// +// For an array with rank greater than 1, some of the padding of later +// axes is calculated from padding of previous axes. This is easiest to +// think about with a rank 2 array where the corners of the padded array +// are calculated by using padded values from the first axis. +// +// The padding function, if used, should modify a rank 1 array in-place. It +// has the following signature:: +// +// padding_func(vector, iaxis_pad_width, iaxis, kwargs) +// +// where +// +// vector : ndarray +// A rank 1 array already padded with zeros. Padded values are +// vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. +// iaxis_pad_width : tuple +// A 2-tuple of ints, iaxis_pad_width[0] represents the number of +// values padded at the beginning of vector where +// iaxis_pad_width[1] represents the number of values padded at +// the end of vector. +// iaxis : int +// The axis currently being calculated. +// kwargs : dict +// Any keyword arguments the function requires. +// +// Examples +// -------- +// >>> a = [1, 2, 3, 4, 5] +// >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) +// array([4, 4, 1, ..., 6, 6, 6]) +// +// >>> np.pad(a, (2, 3), 'edge') +// array([1, 1, 1, ..., 5, 5, 5]) +// +// >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) +// array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) +// +// >>> np.pad(a, (2,), 'maximum') +// array([5, 5, 1, 2, 3, 4, 5, 5, 5]) +// +// >>> np.pad(a, (2,), 'mean') +// array([3, 3, 1, 2, 3, 4, 5, 3, 3]) +// +// >>> np.pad(a, (2,), 'median') +// array([3, 3, 1, 2, 3, 4, 5, 3, 3]) +// +// >>> a = [[1, 2], [3, 4]] +// >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') +// array([[1, 1, 1, 2, 1, 1, 1], +// [1, 1, 1, 2, 1, 1, 1], +// [1, 1, 1, 2, 1, 1, 1], +// [1, 1, 1, 2, 1, 1, 1], +// [3, 3, 3, 4, 3, 3, 3], +// [1, 1, 1, 2, 1, 1, 1], +// [1, 1, 1, 2, 1, 1, 1]]) +// +// >>> a = [1, 2, 3, 4, 5] +// >>> np.pad(a, (2, 3), 'reflect') +// array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) +// +// >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') +// array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) +// +// >>> np.pad(a, (2, 3), 'symmetric') +// array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) +// +// >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') +// array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) +// +// >>> np.pad(a, (2, 3), 'wrap') +// array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) +// +// >>> def pad_with(vector, pad_width, iaxis, kwargs): +// ... pad_value = kwargs.get('padder', 10) +// ... vector[:pad_width[0]] = pad_value +// ... vector[-pad_width[1]:] = pad_value +// >>> a = np.arange(6) +// >>> a = a.reshape((2, 3)) +// >>> np.pad(a, 2, pad_with) +// array([[10, 10, 10, 10, 10, 10, 10], +// [10, 10, 10, 10, 10, 10, 10], +// [10, 10, 0, 1, 2, 10, 10], +// [10, 10, 3, 4, 5, 10, 10], +// [10, 10, 10, 10, 10, 10, 10], +// [10, 10, 10, 10, 10, 10, 10]]) +// >>> np.pad(a, 2, pad_with, padder=100) +// array([[100, 100, 100, 100, 100, 100, 100], +// [100, 100, 100, 100, 100, 100, 100], +// [100, 100, 0, 1, 2, 100, 100], +// [100, 100, 3, 4, 5, 100, 100], +// [100, 100, 100, 100, 100, 100, 100], +// [100, 100, 100, 100, 100, 100, 100]]) +// +// +//go:linkname Pad py.pad +func Pad(array *py.Object, padWidth *py.Object, mode *py.Object) *py.Object +// +// Find the coefficients of a polynomial with the given sequence of roots. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Returns the coefficients of the polynomial whose leading coefficient +// is one for the given sequence of zeros (multiple roots must be included +// in the sequence as many times as their multiplicity; see Examples). +// A square matrix (or array, which will be treated as a matrix) can also +// be given, in which case the coefficients of the characteristic polynomial +// of the matrix are returned. +// +// Parameters +// ---------- +// seq_of_zeros : array_like, shape (N,) or (N, N) +// A sequence of polynomial roots, or a square array or matrix object. +// +// Returns +// ------- +// c : ndarray +// 1D array of polynomial coefficients from highest to lowest degree: +// +// ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` +// where c[0] always equals 1. +// +// Raises +// ------ +// ValueError +// If input is the wrong shape (the input must be a 1-D or square +// 2-D array). +// +// See Also +// -------- +// polyval : Compute polynomial values. +// roots : Return the roots of a polynomial. +// polyfit : Least squares polynomial fit. +// poly1d : A one-dimensional polynomial class. +// +// Notes +// ----- +// Specifying the roots of a polynomial still leaves one degree of +// freedom, typically represented by an undetermined leading +// coefficient. [1]_ In the case of this function, that coefficient - +// the first one in the returned array - is always taken as one. (If +// for some reason you have one other point, the only automatic way +// presently to leverage that information is to use ``polyfit``.) +// +// The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` +// matrix **A** is given by +// +// :math:`p_a(t) = \mathrm{det}(t\, \mathbf{I} - \mathbf{A})`, +// +// where **I** is the `n`-by-`n` identity matrix. [2]_ +// +// References +// ---------- +// .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, +// Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. +// +// .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," +// Academic Press, pg. 182, 1980. +// +// Examples +// -------- +// Given a sequence of a polynomial's zeros: +// +// >>> np.poly((0, 0, 0)) # Multiple root example +// array([1., 0., 0., 0.]) +// +// The line above represents z**3 + 0*z**2 + 0*z + 0. +// +// >>> np.poly((-1./2, 0, 1./2)) +// array([ 1. , 0. , -0.25, 0. ]) +// +// The line above represents z**3 - z/4 +// +// >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) +// array([ 1. , -0.77086955, 0.08618131, 0. ]) # random +// +// Given a square array object: +// +// >>> P = np.array([[0, 1./3], [-1./2, 0]]) +// >>> np.poly(P) +// array([1. , 0. , 0.16666667]) +// +// Note how in all cases the leading coefficient is always 1. +// +// +// +//go:linkname Poly py.poly +func Poly(seqOfZeros *py.Object) *py.Object +// +// Return the roots of a polynomial with coefficients given in p. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// The values in the rank-1 array `p` are coefficients of a polynomial. +// If the length of `p` is n+1 then the polynomial is described by:: +// +// p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] +// +// Parameters +// ---------- +// p : array_like +// Rank-1 array of polynomial coefficients. +// +// Returns +// ------- +// out : ndarray +// An array containing the roots of the polynomial. +// +// Raises +// ------ +// ValueError +// When `p` cannot be converted to a rank-1 array. +// +// See also +// -------- +// poly : Find the coefficients of a polynomial with a given sequence +// of roots. +// polyval : Compute polynomial values. +// polyfit : Least squares polynomial fit. +// poly1d : A one-dimensional polynomial class. +// +// Notes +// ----- +// The algorithm relies on computing the eigenvalues of the +// companion matrix [1]_. +// +// References +// ---------- +// .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: +// Cambridge University Press, 1999, pp. 146-7. +// +// Examples +// -------- +// >>> coeff = [3.2, 2, 1] +// >>> np.roots(coeff) +// array([-0.3125+0.46351241j, -0.3125-0.46351241j]) +// +// +// +//go:linkname Roots py.roots +func Roots(p *py.Object) *py.Object +// +// Return an antiderivative (indefinite integral) of a polynomial. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// The returned order `m` antiderivative `P` of polynomial `p` satisfies +// :math:`\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` +// integration constants `k`. The constants determine the low-order +// polynomial part +// +// .. math:: \frac{k_{m-1}}{0!} x^0 + \ldots + \frac{k_0}{(m-1)!}x^{m-1} +// +// of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. +// +// Parameters +// ---------- +// p : array_like or poly1d +// Polynomial to integrate. +// A sequence is interpreted as polynomial coefficients, see `poly1d`. +// m : int, optional +// Order of the antiderivative. (Default: 1) +// k : list of `m` scalars or scalar, optional +// Integration constants. They are given in the order of integration: +// those corresponding to highest-order terms come first. +// +// If ``None`` (default), all constants are assumed to be zero. +// If `m = 1`, a single scalar can be given instead of a list. +// +// See Also +// -------- +// polyder : derivative of a polynomial +// poly1d.integ : equivalent method +// +// Examples +// -------- +// The defining property of the antiderivative: +// +// >>> p = np.poly1d([1,1,1]) +// >>> P = np.polyint(p) +// >>> P +// poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary +// >>> np.polyder(P) == p +// True +// +// The integration constants default to zero, but can be specified: +// +// >>> P = np.polyint(p, 3) +// >>> P(0) +// 0.0 +// >>> np.polyder(P)(0) +// 0.0 +// >>> np.polyder(P, 2)(0) +// 0.0 +// >>> P = np.polyint(p, 3, k=[6,5,3]) +// >>> P +// poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary +// +// Note that 3 = 6 / 2!, and that the constants are given in the order of +// integrations. Constant of the highest-order polynomial term comes first: +// +// >>> np.polyder(P, 2)(0) +// 6.0 +// >>> np.polyder(P, 1)(0) +// 5.0 +// >>> P(0) +// 3.0 +// +// +// +//go:linkname Polyint py.polyint +func Polyint(p *py.Object, m *py.Object, k *py.Object) *py.Object +// +// Return the derivative of the specified order of a polynomial. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Parameters +// ---------- +// p : poly1d or sequence +// Polynomial to differentiate. +// A sequence is interpreted as polynomial coefficients, see `poly1d`. +// m : int, optional +// Order of differentiation (default: 1) +// +// Returns +// ------- +// der : poly1d +// A new polynomial representing the derivative. +// +// See Also +// -------- +// polyint : Anti-derivative of a polynomial. +// poly1d : Class for one-dimensional polynomials. +// +// Examples +// -------- +// The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: +// +// >>> p = np.poly1d([1,1,1,1]) +// >>> p2 = np.polyder(p) +// >>> p2 +// poly1d([3, 2, 1]) +// +// which evaluates to: +// +// >>> p2(2.) +// 17.0 +// +// We can verify this, approximating the derivative with +// ``(f(x + h) - f(x))/h``: +// +// >>> (p(2. + 0.001) - p(2.)) / 0.001 +// 17.007000999997857 +// +// The fourth-order derivative of a 3rd-order polynomial is zero: +// +// >>> np.polyder(p, 2) +// poly1d([6, 2]) +// >>> np.polyder(p, 3) +// poly1d([6]) +// >>> np.polyder(p, 4) +// poly1d([0]) +// +// +// +//go:linkname Polyder py.polyder +func Polyder(p *py.Object, m *py.Object) *py.Object +// +// Find the sum of two polynomials. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Returns the polynomial resulting from the sum of two input polynomials. +// Each input must be either a poly1d object or a 1D sequence of polynomial +// coefficients, from highest to lowest degree. +// +// Parameters +// ---------- +// a1, a2 : array_like or poly1d object +// Input polynomials. +// +// Returns +// ------- +// out : ndarray or poly1d object +// The sum of the inputs. If either input is a poly1d object, then the +// output is also a poly1d object. Otherwise, it is a 1D array of +// polynomial coefficients from highest to lowest degree. +// +// See Also +// -------- +// poly1d : A one-dimensional polynomial class. +// poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval +// +// Examples +// -------- +// >>> np.polyadd([1, 2], [9, 5, 4]) +// array([9, 6, 6]) +// +// Using poly1d objects: +// +// >>> p1 = np.poly1d([1, 2]) +// >>> p2 = np.poly1d([9, 5, 4]) +// >>> print(p1) +// 1 x + 2 +// >>> print(p2) +// 2 +// 9 x + 5 x + 4 +// >>> print(np.polyadd(p1, p2)) +// 2 +// 9 x + 6 x + 6 +// +// +// +//go:linkname Polyadd py.polyadd +func Polyadd(a1 *py.Object, a2 *py.Object) *py.Object +// +// Difference (subtraction) of two polynomials. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Given two polynomials `a1` and `a2`, returns ``a1 - a2``. +// `a1` and `a2` can be either array_like sequences of the polynomials' +// coefficients (including coefficients equal to zero), or `poly1d` objects. +// +// Parameters +// ---------- +// a1, a2 : array_like or poly1d +// Minuend and subtrahend polynomials, respectively. +// +// Returns +// ------- +// out : ndarray or poly1d +// Array or `poly1d` object of the difference polynomial's coefficients. +// +// See Also +// -------- +// polyval, polydiv, polymul, polyadd +// +// Examples +// -------- +// .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) +// +// >>> np.polysub([2, 10, -2], [3, 10, -4]) +// array([-1, 0, 2]) +// +// +// +//go:linkname Polysub py.polysub +func Polysub(a1 *py.Object, a2 *py.Object) *py.Object +// +// Find the product of two polynomials. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Finds the polynomial resulting from the multiplication of the two input +// polynomials. Each input must be either a poly1d object or a 1D sequence +// of polynomial coefficients, from highest to lowest degree. +// +// Parameters +// ---------- +// a1, a2 : array_like or poly1d object +// Input polynomials. +// +// Returns +// ------- +// out : ndarray or poly1d object +// The polynomial resulting from the multiplication of the inputs. If +// either inputs is a poly1d object, then the output is also a poly1d +// object. Otherwise, it is a 1D array of polynomial coefficients from +// highest to lowest degree. +// +// See Also +// -------- +// poly1d : A one-dimensional polynomial class. +// poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval +// convolve : Array convolution. Same output as polymul, but has parameter +// for overlap mode. +// +// Examples +// -------- +// >>> np.polymul([1, 2, 3], [9, 5, 1]) +// array([ 9, 23, 38, 17, 3]) +// +// Using poly1d objects: +// +// >>> p1 = np.poly1d([1, 2, 3]) +// >>> p2 = np.poly1d([9, 5, 1]) +// >>> print(p1) +// 2 +// 1 x + 2 x + 3 +// >>> print(p2) +// 2 +// 9 x + 5 x + 1 +// >>> print(np.polymul(p1, p2)) +// 4 3 2 +// 9 x + 23 x + 38 x + 17 x + 3 +// +// +// +//go:linkname Polymul py.polymul +func Polymul(a1 *py.Object, a2 *py.Object) *py.Object +// +// Returns the quotient and remainder of polynomial division. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// The input arrays are the coefficients (including any coefficients +// equal to zero) of the "numerator" (dividend) and "denominator" +// (divisor) polynomials, respectively. +// +// Parameters +// ---------- +// u : array_like or poly1d +// Dividend polynomial's coefficients. +// +// v : array_like or poly1d +// Divisor polynomial's coefficients. +// +// Returns +// ------- +// q : ndarray +// Coefficients, including those equal to zero, of the quotient. +// r : ndarray +// Coefficients, including those equal to zero, of the remainder. +// +// See Also +// -------- +// poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub +// polyval +// +// Notes +// ----- +// Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need +// not equal `v.ndim`. In other words, all four possible combinations - +// ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, +// ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. +// +// Examples +// -------- +// .. math:: \frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 +// +// >>> x = np.array([3.0, 5.0, 2.0]) +// >>> y = np.array([2.0, 1.0]) +// >>> np.polydiv(x, y) +// (array([1.5 , 1.75]), array([0.25])) +// +// +// +//go:linkname Polydiv py.polydiv +func Polydiv(u *py.Object, v *py.Object) *py.Object +// +// Evaluate a polynomial at specific values. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// If `p` is of length N, this function returns the value: +// +// ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` +// +// If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. +// If `x` is another polynomial then the composite polynomial ``p(x(t))`` +// is returned. +// +// Parameters +// ---------- +// p : array_like or poly1d object +// 1D array of polynomial coefficients (including coefficients equal +// to zero) from highest degree to the constant term, or an +// instance of poly1d. +// x : array_like or poly1d object +// A number, an array of numbers, or an instance of poly1d, at +// which to evaluate `p`. +// +// Returns +// ------- +// values : ndarray or poly1d +// If `x` is a poly1d instance, the result is the composition of the two +// polynomials, i.e., `x` is "substituted" in `p` and the simplified +// result is returned. In addition, the type of `x` - array_like or +// poly1d - governs the type of the output: `x` array_like => `values` +// array_like, `x` a poly1d object => `values` is also. +// +// See Also +// -------- +// poly1d: A polynomial class. +// +// Notes +// ----- +// Horner's scheme [1]_ is used to evaluate the polynomial. Even so, +// for polynomials of high degree the values may be inaccurate due to +// rounding errors. Use carefully. +// +// If `x` is a subtype of `ndarray` the return value will be of the same type. +// +// References +// ---------- +// .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. +// trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand +// Reinhold Co., 1985, pg. 720. +// +// Examples +// -------- +// >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 +// 76 +// >>> np.polyval([3,0,1], np.poly1d(5)) +// poly1d([76]) +// >>> np.polyval(np.poly1d([3,0,1]), 5) +// 76 +// >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) +// poly1d([76]) +// +// +// +//go:linkname Polyval py.polyval +func Polyval(p *py.Object, x *py.Object) *py.Object +// +// Least squares polynomial fit. +// +// .. note:: +// This forms part of the old polynomial API. Since version 1.4, the +// new polynomial API defined in `numpy.polynomial` is preferred. +// A summary of the differences can be found in the +// :doc:`transition guide `. +// +// Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` +// to points `(x, y)`. Returns a vector of coefficients `p` that minimises +// the squared error in the order `deg`, `deg-1`, ... `0`. +// +// The `Polynomial.fit ` class +// method is recommended for new code as it is more stable numerically. See +// the documentation of the method for more information. +// +// Parameters +// ---------- +// x : array_like, shape (M,) +// x-coordinates of the M sample points ``(x[i], y[i])``. +// y : array_like, shape (M,) or (M, K) +// y-coordinates of the sample points. Several data sets of sample +// points sharing the same x-coordinates can be fitted at once by +// passing in a 2D-array that contains one dataset per column. +// deg : int +// Degree of the fitting polynomial +// rcond : float, optional +// Relative condition number of the fit. Singular values smaller than +// this relative to the largest singular value will be ignored. The +// default value is len(x)*eps, where eps is the relative precision of +// the float type, about 2e-16 in most cases. +// full : bool, optional +// Switch determining nature of return value. When it is False (the +// default) just the coefficients are returned, when True diagnostic +// information from the singular value decomposition is also returned. +// w : array_like, shape (M,), optional +// Weights. If not None, the weight ``w[i]`` applies to the unsquared +// residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are +// chosen so that the errors of the products ``w[i]*y[i]`` all have the +// same variance. When using inverse-variance weighting, use +// ``w[i] = 1/sigma(y[i])``. The default value is None. +// cov : bool or str, optional +// If given and not `False`, return not just the estimate but also its +// covariance matrix. By default, the covariance are scaled by +// chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed +// to be unreliable except in a relative sense and everything is scaled +// such that the reduced chi2 is unity. This scaling is omitted if +// ``cov='unscaled'``, as is relevant for the case that the weights are +// w = 1/sigma, with sigma known to be a reliable estimate of the +// uncertainty. +// +// Returns +// ------- +// p : ndarray, shape (deg + 1,) or (deg + 1, K) +// Polynomial coefficients, highest power first. If `y` was 2-D, the +// coefficients for `k`-th data set are in ``p[:,k]``. +// +// residuals, rank, singular_values, rcond +// These values are only returned if ``full == True`` +// +// - residuals -- sum of squared residuals of the least squares fit +// - rank -- the effective rank of the scaled Vandermonde +// coefficient matrix +// - singular_values -- singular values of the scaled Vandermonde +// coefficient matrix +// - rcond -- value of `rcond`. +// +// For more details, see `numpy.linalg.lstsq`. +// +// V : ndarray, shape (M,M) or (M,M,K) +// Present only if ``full == False`` and ``cov == True``. The covariance +// matrix of the polynomial coefficient estimates. The diagonal of +// this matrix are the variance estimates for each coefficient. If y +// is a 2-D array, then the covariance matrix for the `k`-th data set +// are in ``V[:,:,k]`` +// +// +// Warns +// ----- +// RankWarning +// The rank of the coefficient matrix in the least-squares fit is +// deficient. The warning is only raised if ``full == False``. +// +// The warnings can be turned off by +// +// >>> import warnings +// >>> warnings.simplefilter('ignore', np.RankWarning) +// +// See Also +// -------- +// polyval : Compute polynomial values. +// linalg.lstsq : Computes a least-squares fit. +// scipy.interpolate.UnivariateSpline : Computes spline fits. +// +// Notes +// ----- +// The solution minimizes the squared error +// +// .. math:: +// E = \sum_{j=0}^k |p(x_j) - y_j|^2 +// +// in the equations:: +// +// x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] +// x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] +// ... +// x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] +// +// The coefficient matrix of the coefficients `p` is a Vandermonde matrix. +// +// `polyfit` issues a `RankWarning` when the least-squares fit is badly +// conditioned. This implies that the best fit is not well-defined due +// to numerical error. The results may be improved by lowering the polynomial +// degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter +// can also be set to a value smaller than its default, but the resulting +// fit may be spurious: including contributions from the small singular +// values can add numerical noise to the result. +// +// Note that fitting polynomial coefficients is inherently badly conditioned +// when the degree of the polynomial is large or the interval of sample points +// is badly centered. The quality of the fit should always be checked in these +// cases. When polynomial fits are not satisfactory, splines may be a good +// alternative. +// +// References +// ---------- +// .. [1] Wikipedia, "Curve fitting", +// https://en.wikipedia.org/wiki/Curve_fitting +// .. [2] Wikipedia, "Polynomial interpolation", +// https://en.wikipedia.org/wiki/Polynomial_interpolation +// +// Examples +// -------- +// >>> import warnings +// >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) +// >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) +// >>> z = np.polyfit(x, y, 3) +// >>> z +// array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary +// +// It is convenient to use `poly1d` objects for dealing with polynomials: +// +// >>> p = np.poly1d(z) +// >>> p(0.5) +// 0.6143849206349179 # may vary +// >>> p(3.5) +// -0.34732142857143039 # may vary +// >>> p(10) +// 22.579365079365115 # may vary +// +// High-order polynomials may oscillate wildly: +// +// >>> with warnings.catch_warnings(): +// ... warnings.simplefilter('ignore', np.RankWarning) +// ... p30 = np.poly1d(np.polyfit(x, y, 30)) +// ... +// >>> p30(4) +// -0.80000000000000204 # may vary +// >>> p30(5) +// -0.99999999999999445 # may vary +// >>> p30(4.5) +// -0.10547061179440398 # may vary +// +// Illustration: +// +// >>> import matplotlib.pyplot as plt +// >>> xp = np.linspace(-2, 6, 100) +// >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') +// >>> plt.ylim(-2,2) +// (-2, 2) +// >>> plt.show() +// +// +// +//go:linkname Polyfit py.polyfit +func Polyfit(x *py.Object, y *py.Object, deg *py.Object, rcond *py.Object, full *py.Object, w *py.Object, cov *py.Object) *py.Object +// +// Determine if a class is a subclass of a second class. +// +// `issubclass_` is equivalent to the Python built-in ``issubclass``, +// except that it returns False instead of raising a TypeError if one +// of the arguments is not a class. +// +// Parameters +// ---------- +// arg1 : class +// Input class. True is returned if `arg1` is a subclass of `arg2`. +// arg2 : class or tuple of classes. +// Input class. If a tuple of classes, True is returned if `arg1` is a +// subclass of any of the tuple elements. +// +// Returns +// ------- +// out : bool +// Whether `arg1` is a subclass of `arg2` or not. +// +// See Also +// -------- +// issubsctype, issubdtype, issctype +// +// Examples +// -------- +// >>> np.issubclass_(np.int32, int) +// False +// >>> np.issubclass_(np.int32, float) +// False +// >>> np.issubclass_(np.float64, float) +// True +// +// +// +//go:linkname Issubclass_ py.issubclass_ +func Issubclass_(arg1 *py.Object, arg2 *py.Object) *py.Object +// +// Determine if the first argument is a subclass of the second argument. +// +// Parameters +// ---------- +// arg1, arg2 : dtype or dtype specifier +// Data-types. +// +// Returns +// ------- +// out : bool +// The result. +// +// See Also +// -------- +// issctype, issubdtype, obj2sctype +// +// Examples +// -------- +// >>> np.issubsctype('S8', str) +// False +// >>> np.issubsctype(np.array([1]), int) +// True +// >>> np.issubsctype(np.array([1]), float) +// False +// +// +// +//go:linkname Issubsctype py.issubsctype +func Issubsctype(arg1 *py.Object, arg2 *py.Object) *py.Object +// +// Issues a DeprecationWarning, adds warning to `old_name`'s +// docstring, rebinds ``old_name.__name__`` and returns the new +// function object. +// +// This function may also be used as a decorator. +// +// Parameters +// ---------- +// func : function +// The function to be deprecated. +// old_name : str, optional +// The name of the function to be deprecated. Default is None, in +// which case the name of `func` is used. +// new_name : str, optional +// The new name for the function. Default is None, in which case the +// deprecation message is that `old_name` is deprecated. If given, the +// deprecation message is that `old_name` is deprecated and `new_name` +// should be used instead. +// message : str, optional +// Additional explanation of the deprecation. Displayed in the +// docstring after the warning. +// +// Returns +// ------- +// old_func : function +// The deprecated function. +// +// Examples +// -------- +// Note that ``olduint`` returns a value after printing Deprecation +// Warning: +// +// >>> olduint = np.deprecate(np.uint) +// DeprecationWarning: `uint64` is deprecated! # may vary +// >>> olduint(6) +// 6 +// +// +// +//go:linkname Deprecate py.deprecate +func Deprecate(__llgo_va_list ...interface{}) *py.Object +// +// Deprecates a function and includes the deprecation in its docstring. +// +// This function is used as a decorator. It returns an object that can be +// used to issue a DeprecationWarning, by passing the to-be decorated +// function as argument, this adds warning to the to-be decorated function's +// docstring and returns the new function object. +// +// See Also +// -------- +// deprecate : Decorate a function such that it issues a `DeprecationWarning` +// +// Parameters +// ---------- +// msg : str +// Additional explanation of the deprecation. Displayed in the +// docstring after the warning. +// +// Returns +// ------- +// obj : object +// +// +// +//go:linkname DeprecateWithDoc py.deprecate_with_doc +func DeprecateWithDoc(msg *py.Object) *py.Object +// +// Return the directory that contains the NumPy \*.h header files. +// +// Extension modules that need to compile against NumPy should use this +// function to locate the appropriate include directory. +// +// Notes +// ----- +// When using ``distutils``, for example in ``setup.py``:: +// +// import numpy as np +// ... +// Extension('extension_name', ... +// include_dirs=[np.get_include()]) +// ... +// +// +// +//go:linkname GetInclude py.get_include +func GetInclude() *py.Object +// +// Get help information for an array, function, class, or module. +// +// Parameters +// ---------- +// object : object or str, optional +// Input object or name to get information about. If `object` is +// an `ndarray` instance, information about the array is printed. +// If `object` is a numpy object, its docstring is given. If it is +// a string, available modules are searched for matching objects. +// If None, information about `info` itself is returned. +// maxwidth : int, optional +// Printing width. +// output : file like object, optional +// File like object that the output is written to, default is +// ``None``, in which case ``sys.stdout`` will be used. +// The object has to be opened in 'w' or 'a' mode. +// toplevel : str, optional +// Start search at this level. +// +// See Also +// -------- +// source, lookfor +// +// Notes +// ----- +// When used interactively with an object, ``np.info(obj)`` is equivalent +// to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython +// prompt. +// +// Examples +// -------- +// >>> np.info(np.polyval) # doctest: +SKIP +// polyval(p, x) +// Evaluate the polynomial p at x. +// ... +// +// When using a string for `object` it is possible to get multiple results. +// +// >>> np.info('fft') # doctest: +SKIP +// *** Found in numpy *** +// Core FFT routines +// ... +// *** Found in numpy.fft *** +// fft(a, n=None, axis=-1) +// ... +// *** Repeat reference found in numpy.fft.fftpack *** +// *** Total of 3 references found. *** +// +// When the argument is an array, information about the array is printed. +// +// >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) +// >>> np.info(a) +// class: ndarray +// shape: (2, 3) +// strides: (24, 8) +// itemsize: 8 +// aligned: True +// contiguous: True +// fortran: False +// data pointer: 0x562b6e0d2860 # may vary +// byteorder: little +// byteswap: False +// type: complex64 +// +// +// +//go:linkname Info py.info +func Info(object *py.Object, maxwidth *py.Object, output *py.Object, toplevel *py.Object) *py.Object +// +// Print or write to a file the source code for a NumPy object. +// +// The source code is only returned for objects written in Python. Many +// functions and classes are defined in C and will therefore not return +// useful information. +// +// Parameters +// ---------- +// object : numpy object +// Input object. This can be any object (function, class, module, +// ...). +// output : file object, optional +// If `output` not supplied then source code is printed to screen +// (sys.stdout). File object must be created with either write 'w' or +// append 'a' modes. +// +// See Also +// -------- +// lookfor, info +// +// Examples +// -------- +// >>> np.source(np.interp) #doctest: +SKIP +// In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py +// def interp(x, xp, fp, left=None, right=None): +// """.... (full docstring printed)""" +// if isinstance(x, (float, int, number)): +// return compiled_interp([x], xp, fp, left, right).item() +// else: +// return compiled_interp(x, xp, fp, left, right) +// +// The source code is only returned for objects written in Python. +// +// >>> np.source(np.array) #doctest: +SKIP +// Not available for this object. +// +// +// +//go:linkname Source py.source +func Source(object *py.Object, output *py.Object) *py.Object +// +// Print the NumPy arrays in the given dictionary. +// +// If there is no dictionary passed in or `vardict` is None then returns +// NumPy arrays in the globals() dictionary (all NumPy arrays in the +// namespace). +// +// Parameters +// ---------- +// vardict : dict, optional +// A dictionary possibly containing ndarrays. Default is globals(). +// +// Returns +// ------- +// out : None +// Returns 'None'. +// +// Notes +// ----- +// Prints out the name, shape, bytes and type of all of the ndarrays +// present in `vardict`. +// +// Examples +// -------- +// >>> a = np.arange(10) +// >>> b = np.ones(20) +// >>> np.who() +// Name Shape Bytes Type +// =========================================================== +// a 10 80 int64 +// b 20 160 float64 +// Upper bound on total bytes = 240 +// +// >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', +// ... 'idx':5} +// >>> np.who(d) +// Name Shape Bytes Type +// =========================================================== +// x 2 16 float64 +// y 3 24 float64 +// Upper bound on total bytes = 40 +// +// +// +//go:linkname Who py.who +func Who(vardict *py.Object) *py.Object +// +// Do a keyword search on docstrings. +// +// A list of objects that matched the search is displayed, +// sorted by relevance. All given keywords need to be found in the +// docstring for it to be returned as a result, but the order does +// not matter. +// +// Parameters +// ---------- +// what : str +// String containing words to look for. +// module : str or list, optional +// Name of module(s) whose docstrings to go through. +// import_modules : bool, optional +// Whether to import sub-modules in packages. Default is True. +// regenerate : bool, optional +// Whether to re-generate the docstring cache. Default is False. +// output : file-like, optional +// File-like object to write the output to. If omitted, use a pager. +// +// See Also +// -------- +// source, info +// +// Notes +// ----- +// Relevance is determined only roughly, by checking if the keywords occur +// in the function name, at the start of a docstring, etc. +// +// Examples +// -------- +// >>> np.lookfor('binary representation') # doctest: +SKIP +// Search results for 'binary representation' +// ------------------------------------------ +// numpy.binary_repr +// Return the binary representation of the input number as a string. +// numpy.core.setup_common.long_double_representation +// Given a binary dump as given by GNU od -b, look for long double +// numpy.base_repr +// Return a string representation of a number in the given base system. +// ... +// +// +// +//go:linkname Lookfor py.lookfor +func Lookfor(what *py.Object, module *py.Object, importModules *py.Object, regenerate *py.Object, output *py.Object) *py.Object +// +// Returns pointers to the end-points of an array. +// +// Parameters +// ---------- +// a : ndarray +// Input array. It must conform to the Python-side of the array +// interface. +// +// Returns +// ------- +// (low, high) : tuple of 2 integers +// The first integer is the first byte of the array, the second +// integer is just past the last byte of the array. If `a` is not +// contiguous it will not use every byte between the (`low`, `high`) +// values. +// +// Examples +// -------- +// >>> I = np.eye(2, dtype='f'); I.dtype +// dtype('float32') +// >>> low, high = np.byte_bounds(I) +// >>> high - low == I.size*I.itemsize +// True +// >>> I = np.eye(2); I.dtype +// dtype('float64') +// >>> low, high = np.byte_bounds(I) +// >>> high - low == I.size*I.itemsize +// True +// +// +// +//go:linkname ByteBounds py.byte_bounds +func ByteBounds(a *py.Object) *py.Object +// +// Protected string evaluation. +// +// Evaluate a string containing a Python literal expression without +// allowing the execution of arbitrary non-literal code. +// +// .. warning:: +// +// This function is identical to :py:meth:`ast.literal_eval` and +// has the same security implications. It may not always be safe +// to evaluate large input strings. +// +// Parameters +// ---------- +// source : str +// The string to evaluate. +// +// Returns +// ------- +// obj : object +// The result of evaluating `source`. +// +// Raises +// ------ +// SyntaxError +// If the code has invalid Python syntax, or if it contains +// non-literal code. +// +// Examples +// -------- +// >>> np.safe_eval('1') +// 1 +// >>> np.safe_eval('[1, 2, 3]') +// [1, 2, 3] +// >>> np.safe_eval('{"foo": ("bar", 10.0)}') +// {'foo': ('bar', 10.0)} +// +// >>> np.safe_eval('import os') +// Traceback (most recent call last): +// ... +// SyntaxError: invalid syntax +// +// >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') +// Traceback (most recent call last): +// ... +// ValueError: malformed node or string: <_ast.Call object at 0x...> +// +// +// +//go:linkname SafeEval py.safe_eval +func SafeEval(source *py.Object) *py.Object +// +// Print information about various resources in the system +// including available intrinsic support and BLAS/LAPACK library +// in use +// +// .. versionadded:: 1.24.0 +// +// See Also +// -------- +// show_config : Show libraries in the system on which NumPy was built. +// +// Notes +// ----- +// 1. Information is derived with the help of `threadpoolctl `_ +// library if available. +// 2. SIMD related information is derived from ``__cpu_features__``, +// ``__cpu_baseline__`` and ``__cpu_dispatch__`` +// +// +// +//go:linkname ShowRuntime py.show_runtime +func ShowRuntime() *py.Object +// +// The differences between consecutive elements of an array. +// +// Parameters +// ---------- +// ary : array_like +// If necessary, will be flattened before the differences are taken. +// to_end : array_like, optional +// Number(s) to append at the end of the returned differences. +// to_begin : array_like, optional +// Number(s) to prepend at the beginning of the returned differences. +// +// Returns +// ------- +// ediff1d : ndarray +// The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. +// +// See Also +// -------- +// diff, gradient +// +// Notes +// ----- +// When applied to masked arrays, this function drops the mask information +// if the `to_begin` and/or `to_end` parameters are used. +// +// Examples +// -------- +// >>> x = np.array([1, 2, 4, 7, 0]) +// >>> np.ediff1d(x) +// array([ 1, 2, 3, -7]) +// +// >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) +// array([-99, 1, 2, ..., -7, 88, 99]) +// +// The returned array is always 1D. +// +// >>> y = [[1, 2, 4], [1, 6, 24]] +// >>> np.ediff1d(y) +// array([ 1, 2, -3, 5, 18]) +// +// +// +//go:linkname Ediff1d py.ediff1d +func Ediff1d(ary *py.Object, toEnd *py.Object, toBegin *py.Object) *py.Object +// +// Find the intersection of two arrays. +// +// Return the sorted, unique values that are in both of the input arrays. +// +// Parameters +// ---------- +// ar1, ar2 : array_like +// Input arrays. Will be flattened if not already 1D. +// assume_unique : bool +// If True, the input arrays are both assumed to be unique, which +// can speed up the calculation. If True but ``ar1`` or ``ar2`` are not +// unique, incorrect results and out-of-bounds indices could result. +// Default is False. +// return_indices : bool +// If True, the indices which correspond to the intersection of the two +// arrays are returned. The first instance of a value is used if there are +// multiple. Default is False. +// +// .. versionadded:: 1.15.0 +// +// Returns +// ------- +// intersect1d : ndarray +// Sorted 1D array of common and unique elements. +// comm1 : ndarray +// The indices of the first occurrences of the common values in `ar1`. +// Only provided if `return_indices` is True. +// comm2 : ndarray +// The indices of the first occurrences of the common values in `ar2`. +// Only provided if `return_indices` is True. +// +// +// See Also +// -------- +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// +// Examples +// -------- +// >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) +// array([1, 3]) +// +// To intersect more than two arrays, use functools.reduce: +// +// >>> from functools import reduce +// >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) +// array([3]) +// +// To return the indices of the values common to the input arrays +// along with the intersected values: +// +// >>> x = np.array([1, 1, 2, 3, 4]) +// >>> y = np.array([2, 1, 4, 6]) +// >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) +// >>> x_ind, y_ind +// (array([0, 2, 4]), array([1, 0, 2])) +// >>> xy, x[x_ind], y[y_ind] +// (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) +// +// +// +//go:linkname Intersect1d py.intersect1d +func Intersect1d(ar1 *py.Object, ar2 *py.Object, assumeUnique *py.Object, returnIndices *py.Object) *py.Object +// +// Find the set exclusive-or of two arrays. +// +// Return the sorted, unique values that are in only one (not both) of the +// input arrays. +// +// Parameters +// ---------- +// ar1, ar2 : array_like +// Input arrays. +// assume_unique : bool +// If True, the input arrays are both assumed to be unique, which +// can speed up the calculation. Default is False. +// +// Returns +// ------- +// setxor1d : ndarray +// Sorted 1D array of unique values that are in only one of the input +// arrays. +// +// Examples +// -------- +// >>> a = np.array([1, 2, 3, 2, 4]) +// >>> b = np.array([2, 3, 5, 7, 5]) +// >>> np.setxor1d(a,b) +// array([1, 4, 5, 7]) +// +// +// +//go:linkname Setxor1d py.setxor1d +func Setxor1d(ar1 *py.Object, ar2 *py.Object, assumeUnique *py.Object) *py.Object +// +// Find the union of two arrays. +// +// Return the unique, sorted array of values that are in either of the two +// input arrays. +// +// Parameters +// ---------- +// ar1, ar2 : array_like +// Input arrays. They are flattened if they are not already 1D. +// +// Returns +// ------- +// union1d : ndarray +// Unique, sorted union of the input arrays. +// +// See Also +// -------- +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// +// Examples +// -------- +// >>> np.union1d([-1, 0, 1], [-2, 0, 2]) +// array([-2, -1, 0, 1, 2]) +// +// To find the union of more than two arrays, use functools.reduce: +// +// >>> from functools import reduce +// >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) +// array([1, 2, 3, 4, 6]) +// +// +//go:linkname Union1d py.union1d +func Union1d(ar1 *py.Object, ar2 *py.Object) *py.Object +// +// Find the set difference of two arrays. +// +// Return the unique values in `ar1` that are not in `ar2`. +// +// Parameters +// ---------- +// ar1 : array_like +// Input array. +// ar2 : array_like +// Input comparison array. +// assume_unique : bool +// If True, the input arrays are both assumed to be unique, which +// can speed up the calculation. Default is False. +// +// Returns +// ------- +// setdiff1d : ndarray +// 1D array of values in `ar1` that are not in `ar2`. The result +// is sorted when `assume_unique=False`, but otherwise only sorted +// if the input is sorted. +// +// See Also +// -------- +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// +// Examples +// -------- +// >>> a = np.array([1, 2, 3, 2, 4, 1]) +// >>> b = np.array([3, 4, 5, 6]) +// >>> np.setdiff1d(a, b) +// array([1, 2]) +// +// +// +//go:linkname Setdiff1d py.setdiff1d +func Setdiff1d(ar1 *py.Object, ar2 *py.Object, assumeUnique *py.Object) *py.Object +// +// Find the unique elements of an array. +// +// Returns the sorted unique elements of an array. There are three optional +// outputs in addition to the unique elements: +// +// * the indices of the input array that give the unique values +// * the indices of the unique array that reconstruct the input array +// * the number of times each unique value comes up in the input array +// +// Parameters +// ---------- +// ar : array_like +// Input array. Unless `axis` is specified, this will be flattened if it +// is not already 1-D. +// return_index : bool, optional +// If True, also return the indices of `ar` (along the specified axis, +// if provided, or in the flattened array) that result in the unique array. +// return_inverse : bool, optional +// If True, also return the indices of the unique array (for the specified +// axis, if provided) that can be used to reconstruct `ar`. +// return_counts : bool, optional +// If True, also return the number of times each unique item appears +// in `ar`. +// axis : int or None, optional +// The axis to operate on. If None, `ar` will be flattened. If an integer, +// the subarrays indexed by the given axis will be flattened and treated +// as the elements of a 1-D array with the dimension of the given axis, +// see the notes for more details. Object arrays or structured arrays +// that contain objects are not supported if the `axis` kwarg is used. The +// default is None. +// +// .. versionadded:: 1.13.0 +// +// equal_nan : bool, optional +// If True, collapses multiple NaN values in the return array into one. +// +// .. versionadded:: 1.24 +// +// Returns +// ------- +// unique : ndarray +// The sorted unique values. +// unique_indices : ndarray, optional +// The indices of the first occurrences of the unique values in the +// original array. Only provided if `return_index` is True. +// unique_inverse : ndarray, optional +// The indices to reconstruct the original array from the +// unique array. Only provided if `return_inverse` is True. +// unique_counts : ndarray, optional +// The number of times each of the unique values comes up in the +// original array. Only provided if `return_counts` is True. +// +// .. versionadded:: 1.9.0 +// +// See Also +// -------- +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// repeat : Repeat elements of an array. +// +// Notes +// ----- +// When an axis is specified the subarrays indexed by the axis are sorted. +// This is done by making the specified axis the first dimension of the array +// (move the axis to the first dimension to keep the order of the other axes) +// and then flattening the subarrays in C order. The flattened subarrays are +// then viewed as a structured type with each element given a label, with the +// effect that we end up with a 1-D array of structured types that can be +// treated in the same way as any other 1-D array. The result is that the +// flattened subarrays are sorted in lexicographic order starting with the +// first element. +// +// .. versionchanged: NumPy 1.21 +// If nan values are in the input array, a single nan is put +// to the end of the sorted unique values. +// +// Also for complex arrays all NaN values are considered equivalent +// (no matter whether the NaN is in the real or imaginary part). +// As the representant for the returned array the smallest one in the +// lexicographical order is chosen - see np.sort for how the lexicographical +// order is defined for complex arrays. +// +// Examples +// -------- +// >>> np.unique([1, 1, 2, 2, 3, 3]) +// array([1, 2, 3]) +// >>> a = np.array([[1, 1], [2, 3]]) +// >>> np.unique(a) +// array([1, 2, 3]) +// +// Return the unique rows of a 2D array +// +// >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) +// >>> np.unique(a, axis=0) +// array([[1, 0, 0], [2, 3, 4]]) +// +// Return the indices of the original array that give the unique values: +// +// >>> a = np.array(['a', 'b', 'b', 'c', 'a']) +// >>> u, indices = np.unique(a, return_index=True) +// >>> u +// array(['a', 'b', 'c'], dtype='>> indices +// array([0, 1, 3]) +// >>> a[indices] +// array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) +// >>> u, indices = np.unique(a, return_inverse=True) +// >>> u +// array([1, 2, 3, 4, 6]) +// >>> indices +// array([0, 1, 4, 3, 1, 2, 1]) +// >>> u[indices] +// array([1, 2, 6, 4, 2, 3, 2]) +// +// Reconstruct the input values from the unique values and counts: +// +// >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) +// >>> values, counts = np.unique(a, return_counts=True) +// >>> values +// array([1, 2, 3, 4, 6]) +// >>> counts +// array([1, 3, 1, 1, 1]) +// >>> np.repeat(values, counts) +// array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved +// +// +// +//go:linkname Unique py.unique +func Unique(ar *py.Object, returnIndex *py.Object, returnInverse *py.Object, returnCounts *py.Object, axis *py.Object) *py.Object +// +// Test whether each element of a 1-D array is also present in a second array. +// +// Returns a boolean array the same length as `ar1` that is True +// where an element of `ar1` is in `ar2` and False otherwise. +// +// We recommend using :func:`isin` instead of `in1d` for new code. +// +// Parameters +// ---------- +// ar1 : (M,) array_like +// Input array. +// ar2 : array_like +// The values against which to test each value of `ar1`. +// assume_unique : bool, optional +// If True, the input arrays are both assumed to be unique, which +// can speed up the calculation. Default is False. +// invert : bool, optional +// If True, the values in the returned array are inverted (that is, +// False where an element of `ar1` is in `ar2` and True otherwise). +// Default is False. ``np.in1d(a, b, invert=True)`` is equivalent +// to (but is faster than) ``np.invert(in1d(a, b))``. +// kind : {None, 'sort', 'table'}, optional +// The algorithm to use. This will not affect the final result, +// but will affect the speed and memory use. The default, None, +// will select automatically based on memory considerations. +// +// * If 'sort', will use a mergesort-based approach. This will have +// a memory usage of roughly 6 times the sum of the sizes of +// `ar1` and `ar2`, not accounting for size of dtypes. +// * If 'table', will use a lookup table approach similar +// to a counting sort. This is only available for boolean and +// integer arrays. This will have a memory usage of the +// size of `ar1` plus the max-min value of `ar2`. `assume_unique` +// has no effect when the 'table' option is used. +// * If None, will automatically choose 'table' if +// the required memory allocation is less than or equal to +// 6 times the sum of the sizes of `ar1` and `ar2`, +// otherwise will use 'sort'. This is done to not use +// a large amount of memory by default, even though +// 'table' may be faster in most cases. If 'table' is chosen, +// `assume_unique` will have no effect. +// +// .. versionadded:: 1.8.0 +// +// Returns +// ------- +// in1d : (M,) ndarray, bool +// The values `ar1[in1d]` are in `ar2`. +// +// See Also +// -------- +// isin : Version of this function that preserves the +// shape of ar1. +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// +// Notes +// ----- +// `in1d` can be considered as an element-wise function version of the +// python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly +// equivalent to ``np.array([item in b for item in a])``. +// However, this idea fails if `ar2` is a set, or similar (non-sequence) +// container: As ``ar2`` is converted to an array, in those cases +// ``asarray(ar2)`` is an object array rather than the expected array of +// contained values. +// +// Using ``kind='table'`` tends to be faster than `kind='sort'` if the +// following relationship is true: +// ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, +// but may use greater memory. The default value for `kind` will +// be automatically selected based only on memory usage, so one may +// manually set ``kind='table'`` if memory constraints can be relaxed. +// +// .. versionadded:: 1.4.0 +// +// Examples +// -------- +// >>> test = np.array([0, 1, 2, 5, 0]) +// >>> states = [0, 2] +// >>> mask = np.in1d(test, states) +// >>> mask +// array([ True, False, True, False, True]) +// >>> test[mask] +// array([0, 2, 0]) +// >>> mask = np.in1d(test, states, invert=True) +// >>> mask +// array([False, True, False, True, False]) +// >>> test[mask] +// array([1, 5]) +// +// +//go:linkname In1d py.in1d +func In1d(ar1 *py.Object, ar2 *py.Object, assumeUnique *py.Object, invert *py.Object) *py.Object +// +// Calculates ``element in test_elements``, broadcasting over `element` only. +// Returns a boolean array of the same shape as `element` that is True +// where an element of `element` is in `test_elements` and False otherwise. +// +// Parameters +// ---------- +// element : array_like +// Input array. +// test_elements : array_like +// The values against which to test each value of `element`. +// This argument is flattened if it is an array or array_like. +// See notes for behavior with non-array-like parameters. +// assume_unique : bool, optional +// If True, the input arrays are both assumed to be unique, which +// can speed up the calculation. Default is False. +// invert : bool, optional +// If True, the values in the returned array are inverted, as if +// calculating `element not in test_elements`. Default is False. +// ``np.isin(a, b, invert=True)`` is equivalent to (but faster +// than) ``np.invert(np.isin(a, b))``. +// kind : {None, 'sort', 'table'}, optional +// The algorithm to use. This will not affect the final result, +// but will affect the speed and memory use. The default, None, +// will select automatically based on memory considerations. +// +// * If 'sort', will use a mergesort-based approach. This will have +// a memory usage of roughly 6 times the sum of the sizes of +// `ar1` and `ar2`, not accounting for size of dtypes. +// * If 'table', will use a lookup table approach similar +// to a counting sort. This is only available for boolean and +// integer arrays. This will have a memory usage of the +// size of `ar1` plus the max-min value of `ar2`. `assume_unique` +// has no effect when the 'table' option is used. +// * If None, will automatically choose 'table' if +// the required memory allocation is less than or equal to +// 6 times the sum of the sizes of `ar1` and `ar2`, +// otherwise will use 'sort'. This is done to not use +// a large amount of memory by default, even though +// 'table' may be faster in most cases. If 'table' is chosen, +// `assume_unique` will have no effect. +// +// +// Returns +// ------- +// isin : ndarray, bool +// Has the same shape as `element`. The values `element[isin]` +// are in `test_elements`. +// +// See Also +// -------- +// in1d : Flattened version of this function. +// numpy.lib.arraysetops : Module with a number of other functions for +// performing set operations on arrays. +// +// Notes +// ----- +// +// `isin` is an element-wise function version of the python keyword `in`. +// ``isin(a, b)`` is roughly equivalent to +// ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. +// +// `element` and `test_elements` are converted to arrays if they are not +// already. If `test_elements` is a set (or other non-sequence collection) +// it will be converted to an object array with one element, rather than an +// array of the values contained in `test_elements`. This is a consequence +// of the `array` constructor's way of handling non-sequence collections. +// Converting the set to a list usually gives the desired behavior. +// +// Using ``kind='table'`` tends to be faster than `kind='sort'` if the +// following relationship is true: +// ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, +// but may use greater memory. The default value for `kind` will +// be automatically selected based only on memory usage, so one may +// manually set ``kind='table'`` if memory constraints can be relaxed. +// +// .. versionadded:: 1.13.0 +// +// Examples +// -------- +// >>> element = 2*np.arange(4).reshape((2, 2)) +// >>> element +// array([[0, 2], +// [4, 6]]) +// >>> test_elements = [1, 2, 4, 8] +// >>> mask = np.isin(element, test_elements) +// >>> mask +// array([[False, True], +// [ True, False]]) +// >>> element[mask] +// array([2, 4]) +// +// The indices of the matched values can be obtained with `nonzero`: +// +// >>> np.nonzero(mask) +// (array([0, 1]), array([1, 0])) +// +// The test can also be inverted: +// +// >>> mask = np.isin(element, test_elements, invert=True) +// >>> mask +// array([[ True, False], +// [False, True]]) +// >>> element[mask] +// array([0, 6]) +// +// Because of how `array` handles sets, the following does not +// work as expected: +// +// >>> test_set = {1, 2, 4, 8} +// >>> np.isin(element, test_set) +// array([[False, False], +// [False, False]]) +// +// Casting the set to a list gives the expected result: +// +// >>> np.isin(element, list(test_set)) +// array([[False, True], +// [ True, False]]) +// +// +//go:linkname Isin py.isin +func Isin(element *py.Object, testElements *py.Object, assumeUnique *py.Object, invert *py.Object) *py.Object +// +// Save an array to a text file. +// +// Parameters +// ---------- +// fname : filename or file handle +// If the filename ends in ``.gz``, the file is automatically saved in +// compressed gzip format. `loadtxt` understands gzipped files +// transparently. +// X : 1D or 2D array_like +// Data to be saved to a text file. +// fmt : str or sequence of strs, optional +// A single format (%10.5f), a sequence of formats, or a +// multi-format string, e.g. 'Iteration %d -- %10.5f', in which +// case `delimiter` is ignored. For complex `X`, the legal options +// for `fmt` are: +// +// * a single specifier, `fmt='%.4e'`, resulting in numbers formatted +// like `' (%s+%sj)' % (fmt, fmt)` +// * a full string specifying every real and imaginary part, e.g. +// `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns +// * a list of specifiers, one per column - in this case, the real +// and imaginary part must have separate specifiers, +// e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns +// delimiter : str, optional +// String or character separating columns. +// newline : str, optional +// String or character separating lines. +// +// .. versionadded:: 1.5.0 +// header : str, optional +// String that will be written at the beginning of the file. +// +// .. versionadded:: 1.7.0 +// footer : str, optional +// String that will be written at the end of the file. +// +// .. versionadded:: 1.7.0 +// comments : str, optional +// String that will be prepended to the ``header`` and ``footer`` strings, +// to mark them as comments. Default: '# ', as expected by e.g. +// ``numpy.loadtxt``. +// +// .. versionadded:: 1.7.0 +// encoding : {None, str}, optional +// Encoding used to encode the outputfile. Does not apply to output +// streams. If the encoding is something other than 'bytes' or 'latin1' +// you will not be able to load the file in NumPy versions < 1.14. Default +// is 'latin1'. +// +// .. versionadded:: 1.14.0 +// +// +// See Also +// -------- +// save : Save an array to a binary file in NumPy ``.npy`` format +// savez : Save several arrays into an uncompressed ``.npz`` archive +// savez_compressed : Save several arrays into a compressed ``.npz`` archive +// +// Notes +// ----- +// Further explanation of the `fmt` parameter +// (``%[flag]width[.precision]specifier``): +// +// flags: +// ``-`` : left justify +// +// ``+`` : Forces to precede result with + or -. +// +// ``0`` : Left pad the number with zeros instead of space (see width). +// +// width: +// Minimum number of characters to be printed. The value is not truncated +// if it has more characters. +// +// precision: +// - For integer specifiers (eg. ``d,i,o,x``), the minimum number of +// digits. +// - For ``e, E`` and ``f`` specifiers, the number of digits to print +// after the decimal point. +// - For ``g`` and ``G``, the maximum number of significant digits. +// - For ``s``, the maximum number of characters. +// +// specifiers: +// ``c`` : character +// +// ``d`` or ``i`` : signed decimal integer +// +// ``e`` or ``E`` : scientific notation with ``e`` or ``E``. +// +// ``f`` : decimal floating point +// +// ``g,G`` : use the shorter of ``e,E`` or ``f`` +// +// ``o`` : signed octal +// +// ``s`` : string of characters +// +// ``u`` : unsigned decimal integer +// +// ``x,X`` : unsigned hexadecimal integer +// +// This explanation of ``fmt`` is not complete, for an exhaustive +// specification see [1]_. +// +// References +// ---------- +// .. [1] `Format Specification Mini-Language +// `_, +// Python Documentation. +// +// Examples +// -------- +// >>> x = y = z = np.arange(0.0,5.0,1.0) +// >>> np.savetxt('test.out', x, delimiter=',') # X is an array +// >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays +// >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation +// +// +// +//go:linkname Savetxt py.savetxt +func Savetxt(fname *py.Object, X *py.Object, fmt *py.Object, delimiter *py.Object, newline *py.Object, header *py.Object, footer *py.Object, comments *py.Object, encoding *py.Object) *py.Object +// +// Load data from a text file. +// +// Parameters +// ---------- +// fname : file, str, pathlib.Path, list of str, generator +// File, filename, list, or generator to read. If the filename +// extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note +// that generators must return bytes or strings. The strings +// in a list or produced by a generator are treated as lines. +// dtype : data-type, optional +// Data-type of the resulting array; default: float. If this is a +// structured data-type, the resulting array will be 1-dimensional, and +// each row will be interpreted as an element of the array. In this +// case, the number of columns used must match the number of fields in +// the data-type. +// comments : str or sequence of str or None, optional +// The characters or list of characters used to indicate the start of a +// comment. None implies no comments. For backwards compatibility, byte +// strings will be decoded as 'latin1'. The default is '#'. +// delimiter : str, optional +// The character used to separate the values. For backwards compatibility, +// byte strings will be decoded as 'latin1'. The default is whitespace. +// +// .. versionchanged:: 1.23.0 +// Only single character delimiters are supported. Newline characters +// cannot be used as the delimiter. +// +// converters : dict or callable, optional +// Converter functions to customize value parsing. If `converters` is +// callable, the function is applied to all columns, else it must be a +// dict that maps column number to a parser function. +// See examples for further details. +// Default: None. +// +// .. versionchanged:: 1.23.0 +// The ability to pass a single callable to be applied to all columns +// was added. +// +// skiprows : int, optional +// Skip the first `skiprows` lines, including comments; default: 0. +// usecols : int or sequence, optional +// Which columns to read, with 0 being the first. For example, +// ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. +// The default, None, results in all columns being read. +// +// .. versionchanged:: 1.11.0 +// When a single column has to be read it is possible to use +// an integer instead of a tuple. E.g ``usecols = 3`` reads the +// fourth column the same way as ``usecols = (3,)`` would. +// unpack : bool, optional +// If True, the returned array is transposed, so that arguments may be +// unpacked using ``x, y, z = loadtxt(...)``. When used with a +// structured data-type, arrays are returned for each field. +// Default is False. +// ndmin : int, optional +// The returned array will have at least `ndmin` dimensions. +// Otherwise mono-dimensional axes will be squeezed. +// Legal values: 0 (default), 1 or 2. +// +// .. versionadded:: 1.6.0 +// encoding : str, optional +// Encoding used to decode the inputfile. Does not apply to input streams. +// The special value 'bytes' enables backward compatibility workarounds +// that ensures you receive byte arrays as results if possible and passes +// 'latin1' encoded strings to converters. Override this value to receive +// unicode arrays and pass strings as input to converters. If set to None +// the system default is used. The default value is 'bytes'. +// +// .. versionadded:: 1.14.0 +// max_rows : int, optional +// Read `max_rows` rows of content after `skiprows` lines. The default is +// to read all the rows. Note that empty rows containing no data such as +// empty lines and comment lines are not counted towards `max_rows`, +// while such lines are counted in `skiprows`. +// +// .. versionadded:: 1.16.0 +// +// .. versionchanged:: 1.23.0 +// Lines containing no data, including comment lines (e.g., lines +// starting with '#' or as specified via `comments`) are not counted +// towards `max_rows`. +// quotechar : unicode character or None, optional +// The character used to denote the start and end of a quoted item. +// Occurrences of the delimiter or comment characters are ignored within +// a quoted item. The default value is ``quotechar=None``, which means +// quoting support is disabled. +// +// If two consecutive instances of `quotechar` are found within a quoted +// field, the first is treated as an escape character. See examples. +// +// .. versionadded:: 1.23.0 +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Data read from the text file. +// +// See Also +// -------- +// load, fromstring, fromregex +// genfromtxt : Load data with missing values handled as specified. +// scipy.io.loadmat : reads MATLAB data files +// +// Notes +// ----- +// This function aims to be a fast reader for simply formatted files. The +// `genfromtxt` function provides more sophisticated handling of, e.g., +// lines with missing values. +// +// Each row in the input text file must have the same number of values to be +// able to read all values. If all rows do not have same number of values, a +// subset of up to n columns (where n is the least number of values present +// in all rows) can be read by specifying the columns via `usecols`. +// +// .. versionadded:: 1.10.0 +// +// The strings produced by the Python float.hex method can be used as +// input for floats. +// +// Examples +// -------- +// >>> from io import StringIO # StringIO behaves like a file object +// >>> c = StringIO("0 1\n2 3") +// >>> np.loadtxt(c) +// array([[0., 1.], +// [2., 3.]]) +// +// >>> d = StringIO("M 21 72\nF 35 58") +// >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), +// ... 'formats': ('S1', 'i4', 'f4')}) +// array([(b'M', 21, 72.), (b'F', 35, 58.)], +// dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") +// >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) +// >>> x +// array([1., 3.]) +// >>> y +// array([2., 4.]) +// +// The `converters` argument is used to specify functions to preprocess the +// text prior to parsing. `converters` can be a dictionary that maps +// preprocessing functions to each column: +// +// >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") +// >>> conv = { +// ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 +// ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 +// ... } +// >>> np.loadtxt(s, delimiter=",", converters=conv) +// array([[1., 3.], +// [3., 5.]]) +// +// `converters` can be a callable instead of a dictionary, in which case it +// is applied to all columns: +// +// >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") +// >>> import functools +// >>> conv = functools.partial(int, base=16) +// >>> np.loadtxt(s, converters=conv) +// array([[222., 173.], +// [192., 222.]]) +// +// This example shows how `converters` can be used to convert a field +// with a trailing minus sign into a negative number. +// +// >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') +// >>> def conv(fld): +// ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld) +// ... +// >>> np.loadtxt(s, converters=conv) +// array([[ 10.01, -31.25], +// [ 19.22, 64.31], +// [-17.57, 63.94]]) +// +// Using a callable as the converter can be particularly useful for handling +// values with different formatting, e.g. floats with underscores: +// +// >>> s = StringIO("1 2.7 100_000") +// >>> np.loadtxt(s, converters=float) +// array([1.e+00, 2.7e+00, 1.e+05]) +// +// This idea can be extended to automatically handle values specified in +// many different formats: +// +// >>> def conv(val): +// ... try: +// ... return float(val) +// ... except ValueError: +// ... return float.fromhex(val) +// >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") +// >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None) +// array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) +// +// Note that with the default ``encoding="bytes"``, the inputs to the +// converter function are latin-1 encoded byte strings. To deactivate the +// implicit encoding prior to conversion, use ``encoding=None`` +// +// >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') +// >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x) +// >>> np.loadtxt(s, converters=conv, encoding=None) +// array([[ 10.01, -31.25], +// [ 19.22, 64.31], +// [-17.57, 63.94]]) +// +// Support for quoted fields is enabled with the `quotechar` parameter. +// Comment and delimiter characters are ignored when they appear within a +// quoted item delineated by `quotechar`: +// +// >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') +// >>> dtype = np.dtype([("label", "U12"), ("value", float)]) +// >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') +// array([('alpha, #42', 10.), ('beta, #64', 2.)], +// dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') +// >>> dtype = np.dtype([("label", "U12"), ("value", float)]) +// >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') +// array([('alpha, #42', 10.), ('beta, #64', 2.)], +// dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') +// >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') +// array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") +// >>> np.loadtxt(d, usecols=(0, 1)) +// array([[ 1., 2.], +// [ 2., 4.], +// [ 3., 9.], +// [ 4., 16.]]) +// +// +// +//go:linkname Loadtxt py.loadtxt +func Loadtxt(fname *py.Object, dtype *py.Object, comments *py.Object, delimiter *py.Object, converters *py.Object, skiprows *py.Object, usecols *py.Object, unpack *py.Object, ndmin *py.Object, encoding *py.Object, maxRows *py.Object) *py.Object +// +// Load data from a text file, with missing values handled as specified. +// +// Each line past the first `skip_header` lines is split at the `delimiter` +// character, and characters following the `comments` character are discarded. +// +// Parameters +// ---------- +// fname : file, str, pathlib.Path, list of str, generator +// File, filename, list, or generator to read. If the filename +// extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note +// that generators must return bytes or strings. The strings +// in a list or produced by a generator are treated as lines. +// dtype : dtype, optional +// Data type of the resulting array. +// If None, the dtypes will be determined by the contents of each +// column, individually. +// comments : str, optional +// The character used to indicate the start of a comment. +// All the characters occurring on a line after a comment are discarded. +// delimiter : str, int, or sequence, optional +// The string used to separate values. By default, any consecutive +// whitespaces act as delimiter. An integer or sequence of integers +// can also be provided as width(s) of each field. +// skiprows : int, optional +// `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. +// skip_header : int, optional +// The number of lines to skip at the beginning of the file. +// skip_footer : int, optional +// The number of lines to skip at the end of the file. +// converters : variable, optional +// The set of functions that convert the data of a column to a value. +// The converters can also be used to provide a default value +// for missing data: ``converters = {3: lambda s: float(s or 0)}``. +// missing : variable, optional +// `missing` was removed in numpy 1.10. Please use `missing_values` +// instead. +// missing_values : variable, optional +// The set of strings corresponding to missing data. +// filling_values : variable, optional +// The set of values to be used as default when the data are missing. +// usecols : sequence, optional +// Which columns to read, with 0 being the first. For example, +// ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. +// names : {None, True, str, sequence}, optional +// If `names` is True, the field names are read from the first line after +// the first `skip_header` lines. This line can optionally be preceded +// by a comment delimiter. If `names` is a sequence or a single-string of +// comma-separated names, the names will be used to define the field names +// in a structured dtype. If `names` is None, the names of the dtype +// fields will be used, if any. +// excludelist : sequence, optional +// A list of names to exclude. This list is appended to the default list +// ['return','file','print']. Excluded names are appended with an +// underscore: for example, `file` would become `file_`. +// deletechars : str, optional +// A string combining invalid characters that must be deleted from the +// names. +// defaultfmt : str, optional +// A format used to define default field names, such as "f%i" or "f_%02i". +// autostrip : bool, optional +// Whether to automatically strip white spaces from the variables. +// replace_space : char, optional +// Character(s) used in replacement of white spaces in the variable +// names. By default, use a '_'. +// case_sensitive : {True, False, 'upper', 'lower'}, optional +// If True, field names are case sensitive. +// If False or 'upper', field names are converted to upper case. +// If 'lower', field names are converted to lower case. +// unpack : bool, optional +// If True, the returned array is transposed, so that arguments may be +// unpacked using ``x, y, z = genfromtxt(...)``. When used with a +// structured data-type, arrays are returned for each field. +// Default is False. +// usemask : bool, optional +// If True, return a masked array. +// If False, return a regular array. +// loose : bool, optional +// If True, do not raise errors for invalid values. +// invalid_raise : bool, optional +// If True, an exception is raised if an inconsistency is detected in the +// number of columns. +// If False, a warning is emitted and the offending lines are skipped. +// max_rows : int, optional +// The maximum number of rows to read. Must not be used with skip_footer +// at the same time. If given, the value must be at least 1. Default is +// to read the entire file. +// +// .. versionadded:: 1.10.0 +// encoding : str, optional +// Encoding used to decode the inputfile. Does not apply when `fname` is +// a file object. The special value 'bytes' enables backward compatibility +// workarounds that ensure that you receive byte arrays when possible +// and passes latin1 encoded strings to converters. Override this value to +// receive unicode arrays and pass strings as input to converters. If set +// to None the system default is used. The default value is 'bytes'. +// +// .. versionadded:: 1.14.0 +// ndmin : int, optional +// Same parameter as `loadtxt` +// +// .. versionadded:: 1.23.0 +// like : array_like, optional +// Reference object to allow the creation of arrays which are not +// NumPy arrays. If an array-like passed in as ``like`` supports +// the ``__array_function__`` protocol, the result will be defined +// by it. In this case, it ensures the creation of an array object +// compatible with that passed in via this argument. +// +// .. versionadded:: 1.20.0 +// +// Returns +// ------- +// out : ndarray +// Data read from the text file. If `usemask` is True, this is a +// masked array. +// +// See Also +// -------- +// numpy.loadtxt : equivalent function when no data is missing. +// +// Notes +// ----- +// * When spaces are used as delimiters, or when no delimiter has been given +// as input, there should not be any missing data between two fields. +// * When the variables are named (either by a flexible dtype or with `names`), +// there must not be any header in the file (else a ValueError +// exception is raised). +// * Individual values are not stripped of spaces by default. +// When using a custom converter, make sure the function does remove spaces. +// +// References +// ---------- +// .. [1] NumPy User Guide, section `I/O with NumPy +// `_. +// +// Examples +// -------- +// >>> from io import StringIO +// >>> import numpy as np +// +// Comma delimited file with mixed dtype +// +// >>> s = StringIO(u"1,1.3,abcde") +// >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), +// ... ('mystring','S5')], delimiter=",") +// >>> data +// array((1, 1.3, b'abcde'), +// dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only +// >>> data = np.genfromtxt(s, dtype=None, +// ... names = ['myint','myfloat','mystring'], delimiter=",") +// >>> data +// array((1, 1.3, b'abcde'), +// dtype=[('myint', '>> _ = s.seek(0) +// >>> data = np.genfromtxt(s, dtype="i8,f8,S5", +// ... names=['myint','myfloat','mystring'], delimiter=",") +// >>> data +// array((1, 1.3, b'abcde'), +// dtype=[('myint', '>> s = StringIO(u"11.3abcde") +// >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], +// ... delimiter=[1,3,5]) +// >>> data +// array((1, 1.3, b'abcde'), +// dtype=[('intvar', '>> f = StringIO(''' +// ... text,# of chars +// ... hello world,11 +// ... numpy,5''') +// >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') +// array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], +// dtype=[('f0', 'S12'), ('f1', 'S12')]) +// +// +// +//go:linkname Genfromtxt py.genfromtxt +func Genfromtxt(fname *py.Object, dtype *py.Object, comments *py.Object, delimiter *py.Object, skipHeader *py.Object, skipFooter *py.Object, converters *py.Object, missingValues *py.Object, fillingValues *py.Object, usecols *py.Object, names *py.Object, excludelist *py.Object, deletechars *py.Object, replaceSpace *py.Object, autostrip *py.Object, caseSensitive *py.Object, defaultfmt *py.Object, unpack *py.Object, usemask *py.Object, loose *py.Object, invalidRaise *py.Object, maxRows *py.Object, encoding *py.Object) *py.Object +// +// Load ASCII data from a file and return it in a record array. +// +// If ``usemask=False`` a standard `recarray` is returned, +// if ``usemask=True`` a MaskedRecords array is returned. +// +// Parameters +// ---------- +// fname, kwargs : For a description of input parameters, see `genfromtxt`. +// +// See Also +// -------- +// numpy.genfromtxt : generic function +// +// Notes +// ----- +// By default, `dtype` is None, which means that the data-type of the output +// array will be determined from the data. +// +// +// +//go:linkname Recfromtxt py.recfromtxt +func Recfromtxt(fname *py.Object) *py.Object +// +// Load ASCII data stored in a comma-separated file. +// +// The returned array is a record array (if ``usemask=False``, see +// `recarray`) or a masked record array (if ``usemask=True``, +// see `ma.mrecords.MaskedRecords`). +// +// Parameters +// ---------- +// fname, kwargs : For a description of input parameters, see `genfromtxt`. +// +// See Also +// -------- +// numpy.genfromtxt : generic function to load ASCII data. +// +// Notes +// ----- +// By default, `dtype` is None, which means that the data-type of the output +// array will be determined from the data. +// +// +// +//go:linkname Recfromcsv py.recfromcsv +func Recfromcsv(fname *py.Object) *py.Object +// +// Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. +// +// .. warning:: Loading files that contain object arrays uses the ``pickle`` +// module, which is not secure against erroneous or maliciously +// constructed data. Consider passing ``allow_pickle=False`` to +// load data that is known not to contain object arrays for the +// safer handling of untrusted sources. +// +// Parameters +// ---------- +// file : file-like object, string, or pathlib.Path +// The file to read. File-like objects must support the +// ``seek()`` and ``read()`` methods and must always +// be opened in binary mode. Pickled files require that the +// file-like object support the ``readline()`` method as well. +// mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional +// If not None, then memory-map the file, using the given mode (see +// `numpy.memmap` for a detailed description of the modes). A +// memory-mapped array is kept on disk. However, it can be accessed +// and sliced like any ndarray. Memory mapping is especially useful +// for accessing small fragments of large files without reading the +// entire file into memory. +// allow_pickle : bool, optional +// Allow loading pickled object arrays stored in npy files. Reasons for +// disallowing pickles include security, as loading pickled data can +// execute arbitrary code. If pickles are disallowed, loading object +// arrays will fail. Default: False +// +// .. versionchanged:: 1.16.3 +// Made default False in response to CVE-2019-6446. +// +// fix_imports : bool, optional +// Only useful when loading Python 2 generated pickled files on Python 3, +// which includes npy/npz files containing object arrays. If `fix_imports` +// is True, pickle will try to map the old Python 2 names to the new names +// used in Python 3. +// encoding : str, optional +// What encoding to use when reading Python 2 strings. Only useful when +// loading Python 2 generated pickled files in Python 3, which includes +// npy/npz files containing object arrays. Values other than 'latin1', +// 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical +// data. Default: 'ASCII' +// max_header_size : int, optional +// Maximum allowed size of the header. Large headers may not be safe +// to load securely and thus require explicitly passing a larger value. +// See :py:func:`ast.literal_eval()` for details. +// This option is ignored when `allow_pickle` is passed. In that case +// the file is by definition trusted and the limit is unnecessary. +// +// Returns +// ------- +// result : array, tuple, dict, etc. +// Data stored in the file. For ``.npz`` files, the returned instance +// of NpzFile class must be closed to avoid leaking file descriptors. +// +// Raises +// ------ +// OSError +// If the input file does not exist or cannot be read. +// UnpicklingError +// If ``allow_pickle=True``, but the file cannot be loaded as a pickle. +// ValueError +// The file contains an object array, but ``allow_pickle=False`` given. +// EOFError +// When calling ``np.load`` multiple times on the same file handle, +// if all data has already been read +// +// See Also +// -------- +// save, savez, savez_compressed, loadtxt +// memmap : Create a memory-map to an array stored in a file on disk. +// lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. +// +// Notes +// ----- +// - If the file contains pickle data, then whatever object is stored +// in the pickle is returned. +// - If the file is a ``.npy`` file, then a single array is returned. +// - If the file is a ``.npz`` file, then a dictionary-like object is +// returned, containing ``{filename: array}`` key-value pairs, one for +// each file in the archive. +// - If the file is a ``.npz`` file, the returned value supports the +// context manager protocol in a similar fashion to the open function:: +// +// with load('foo.npz') as data: +// a = data['a'] +// +// The underlying file descriptor is closed when exiting the 'with' +// block. +// +// Examples +// -------- +// Store data to disk, and load it again: +// +// >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) +// >>> np.load('/tmp/123.npy') +// array([[1, 2, 3], +// [4, 5, 6]]) +// +// Store compressed data to disk, and load it again: +// +// >>> a=np.array([[1, 2, 3], [4, 5, 6]]) +// >>> b=np.array([1, 2]) +// >>> np.savez('/tmp/123.npz', a=a, b=b) +// >>> data = np.load('/tmp/123.npz') +// >>> data['a'] +// array([[1, 2, 3], +// [4, 5, 6]]) +// >>> data['b'] +// array([1, 2]) +// >>> data.close() +// +// Mem-map the stored array, and then access the second row +// directly from disk: +// +// >>> X = np.load('/tmp/123.npy', mmap_mode='r') +// >>> X[1, :] +// memmap([4, 5, 6]) +// +// +// +//go:linkname Load py.load +func Load(file *py.Object, mmapMode *py.Object, allowPickle *py.Object, fixImports *py.Object, encoding *py.Object) *py.Object +// +// Save an array to a binary file in NumPy ``.npy`` format. +// +// Parameters +// ---------- +// file : file, str, or pathlib.Path +// File or filename to which the data is saved. If file is a file-object, +// then the filename is unchanged. If file is a string or Path, a ``.npy`` +// extension will be appended to the filename if it does not already +// have one. +// arr : array_like +// Array data to be saved. +// allow_pickle : bool, optional +// Allow saving object arrays using Python pickles. Reasons for disallowing +// pickles include security (loading pickled data can execute arbitrary +// code) and portability (pickled objects may not be loadable on different +// Python installations, for example if the stored objects require libraries +// that are not available, and not all pickled data is compatible between +// Python 2 and Python 3). +// Default: True +// fix_imports : bool, optional +// Only useful in forcing objects in object arrays on Python 3 to be +// pickled in a Python 2 compatible way. If `fix_imports` is True, pickle +// will try to map the new Python 3 names to the old module names used in +// Python 2, so that the pickle data stream is readable with Python 2. +// +// See Also +// -------- +// savez : Save several arrays into a ``.npz`` archive +// savetxt, load +// +// Notes +// ----- +// For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. +// +// Any data saved to the file is appended to the end of the file. +// +// Examples +// -------- +// >>> from tempfile import TemporaryFile +// >>> outfile = TemporaryFile() +// +// >>> x = np.arange(10) +// >>> np.save(outfile, x) +// +// >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file +// >>> np.load(outfile) +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// +// +// >>> with open('test.npy', 'wb') as f: +// ... np.save(f, np.array([1, 2])) +// ... np.save(f, np.array([1, 3])) +// >>> with open('test.npy', 'rb') as f: +// ... a = np.load(f) +// ... b = np.load(f) +// >>> print(a, b) +// # [1 2] [1 3] +// +// +//go:linkname Save py.save +func Save(file *py.Object, arr *py.Object, allowPickle *py.Object, fixImports *py.Object) *py.Object +// Save several arrays into a single file in uncompressed ``.npz`` format. +// +// Provide arrays as keyword arguments to store them under the +// corresponding name in the output file: ``savez(fn, x=x, y=y)``. +// +// If arrays are specified as positional arguments, i.e., ``savez(fn, +// x, y)``, their names will be `arr_0`, `arr_1`, etc. +// +// Parameters +// ---------- +// file : str or file +// Either the filename (string) or an open file (file-like object) +// where the data will be saved. If file is a string or a Path, the +// ``.npz`` extension will be appended to the filename if it is not +// already there. +// args : Arguments, optional +// Arrays to save to the file. Please use keyword arguments (see +// `kwds` below) to assign names to arrays. Arrays specified as +// args will be named "arr_0", "arr_1", and so on. +// kwds : Keyword arguments, optional +// Arrays to save to the file. Each array will be saved to the +// output file with its corresponding keyword name. +// +// Returns +// ------- +// None +// +// See Also +// -------- +// save : Save a single array to a binary file in NumPy format. +// savetxt : Save an array to a file as plain text. +// savez_compressed : Save several arrays into a compressed ``.npz`` archive +// +// Notes +// ----- +// The ``.npz`` file format is a zipped archive of files named after the +// variables they contain. The archive is not compressed and each file +// in the archive contains one variable in ``.npy`` format. For a +// description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. +// +// When opening the saved ``.npz`` file with `load` a `NpzFile` object is +// returned. This is a dictionary-like object which can be queried for +// its list of arrays (with the ``.files`` attribute), and for the arrays +// themselves. +// +// Keys passed in `kwds` are used as filenames inside the ZIP archive. +// Therefore, keys should be valid filenames; e.g., avoid keys that begin with +// ``/`` or contain ``.``. +// +// When naming variables with keyword arguments, it is not possible to name a +// variable ``file``, as this would cause the ``file`` argument to be defined +// twice in the call to ``savez``. +// +// Examples +// -------- +// >>> from tempfile import TemporaryFile +// >>> outfile = TemporaryFile() +// >>> x = np.arange(10) +// >>> y = np.sin(x) +// +// Using `savez` with \*args, the arrays are saved with default names. +// +// >>> np.savez(outfile, x, y) +// >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file +// >>> npzfile = np.load(outfile) +// >>> npzfile.files +// ['arr_0', 'arr_1'] +// >>> npzfile['arr_0'] +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// +// Using `savez` with \**kwds, the arrays are saved with the keyword names. +// +// >>> outfile = TemporaryFile() +// >>> np.savez(outfile, x=x, y=y) +// >>> _ = outfile.seek(0) +// >>> npzfile = np.load(outfile) +// >>> sorted(npzfile.files) +// ['x', 'y'] +// >>> npzfile['x'] +// array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// +// +// +//go:linkname Savez py.savez +func Savez(file *py.Object, __llgo_va_list ...interface{}) *py.Object +// +// Save several arrays into a single file in compressed ``.npz`` format. +// +// Provide arrays as keyword arguments to store them under the +// corresponding name in the output file: ``savez(fn, x=x, y=y)``. +// +// If arrays are specified as positional arguments, i.e., ``savez(fn, +// x, y)``, their names will be `arr_0`, `arr_1`, etc. +// +// Parameters +// ---------- +// file : str or file +// Either the filename (string) or an open file (file-like object) +// where the data will be saved. If file is a string or a Path, the +// ``.npz`` extension will be appended to the filename if it is not +// already there. +// args : Arguments, optional +// Arrays to save to the file. Please use keyword arguments (see +// `kwds` below) to assign names to arrays. Arrays specified as +// args will be named "arr_0", "arr_1", and so on. +// kwds : Keyword arguments, optional +// Arrays to save to the file. Each array will be saved to the +// output file with its corresponding keyword name. +// +// Returns +// ------- +// None +// +// See Also +// -------- +// numpy.save : Save a single array to a binary file in NumPy format. +// numpy.savetxt : Save an array to a file as plain text. +// numpy.savez : Save several arrays into an uncompressed ``.npz`` file format +// numpy.load : Load the files created by savez_compressed. +// +// Notes +// ----- +// The ``.npz`` file format is a zipped archive of files named after the +// variables they contain. The archive is compressed with +// ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable +// in ``.npy`` format. For a description of the ``.npy`` format, see +// :py:mod:`numpy.lib.format`. +// +// +// When opening the saved ``.npz`` file with `load` a `NpzFile` object is +// returned. This is a dictionary-like object which can be queried for +// its list of arrays (with the ``.files`` attribute), and for the arrays +// themselves. +// +// Examples +// -------- +// >>> test_array = np.random.rand(3, 2) +// >>> test_vector = np.random.rand(4) +// >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) +// >>> loaded = np.load('/tmp/123.npz') +// >>> print(np.array_equal(test_array, loaded['a'])) +// True +// >>> print(np.array_equal(test_vector, loaded['b'])) +// True +// +// +// +//go:linkname SavezCompressed py.savez_compressed +func SavezCompressed(file *py.Object, __llgo_va_list ...interface{}) *py.Object +// +// packbits(a, /, axis=None, bitorder='big') +// +// Packs the elements of a binary-valued array into bits in a uint8 array. +// +// The result is padded to full bytes by inserting zero bits at the end. +// +// Parameters +// ---------- +// a : array_like +// An array of integers or booleans whose elements should be packed to +// bits. +// axis : int, optional +// The dimension over which bit-packing is done. +// ``None`` implies packing the flattened array. +// bitorder : {'big', 'little'}, optional +// The order of the input bits. 'big' will mimic bin(val), +// ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will +// reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. +// Defaults to 'big'. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// packed : ndarray +// Array of type uint8 whose elements represent bits corresponding to the +// logical (0 or nonzero) value of the input elements. The shape of +// `packed` has the same number of dimensions as the input (unless `axis` +// is None, in which case the output is 1-D). +// +// See Also +// -------- +// unpackbits: Unpacks elements of a uint8 array into a binary-valued output +// array. +// +// Examples +// -------- +// >>> a = np.array([[[1,0,1], +// ... [0,1,0]], +// ... [[1,1,0], +// ... [0,0,1]]]) +// >>> b = np.packbits(a, axis=-1) +// >>> b +// array([[[160], +// [ 64]], +// [[192], +// [ 32]]], dtype=uint8) +// +// Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, +// and 32 = 0010 0000. +// +// +// +//go:linkname Packbits py.packbits +func Packbits(a *py.Object, axis *py.Object, bitorder *py.Object) *py.Object +// +// unpackbits(a, /, axis=None, count=None, bitorder='big') +// +// Unpacks elements of a uint8 array into a binary-valued output array. +// +// Each element of `a` represents a bit-field that should be unpacked +// into a binary-valued output array. The shape of the output array is +// either 1-D (if `axis` is ``None``) or the same shape as the input +// array with unpacking done along the axis specified. +// +// Parameters +// ---------- +// a : ndarray, uint8 type +// Input array. +// axis : int, optional +// The dimension over which bit-unpacking is done. +// ``None`` implies unpacking the flattened array. +// count : int or None, optional +// The number of elements to unpack along `axis`, provided as a way +// of undoing the effect of packing a size that is not a multiple +// of eight. A non-negative number means to only unpack `count` +// bits. A negative number means to trim off that many bits from +// the end. ``None`` means to unpack the entire array (the +// default). Counts larger than the available number of bits will +// add zero padding to the output. Negative counts must not +// exceed the available number of bits. +// +// .. versionadded:: 1.17.0 +// +// bitorder : {'big', 'little'}, optional +// The order of the returned bits. 'big' will mimic bin(val), +// ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse +// the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. +// Defaults to 'big'. +// +// .. versionadded:: 1.17.0 +// +// Returns +// ------- +// unpacked : ndarray, uint8 type +// The elements are binary-valued (0 or 1). +// +// See Also +// -------- +// packbits : Packs the elements of a binary-valued array into bits in +// a uint8 array. +// +// Examples +// -------- +// >>> a = np.array([[2], [7], [23]], dtype=np.uint8) +// >>> a +// array([[ 2], +// [ 7], +// [23]], dtype=uint8) +// >>> b = np.unpackbits(a, axis=1) +// >>> b +// array([[0, 0, 0, 0, 0, 0, 1, 0], +// [0, 0, 0, 0, 0, 1, 1, 1], +// [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) +// >>> c = np.unpackbits(a, axis=1, count=-3) +// >>> c +// array([[0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0], +// [0, 0, 0, 1, 0]], dtype=uint8) +// +// >>> p = np.packbits(b, axis=0) +// >>> np.unpackbits(p, axis=0) +// array([[0, 0, 0, 0, 0, 0, 1, 0], +// [0, 0, 0, 0, 0, 1, 1, 1], +// [0, 0, 0, 1, 0, 1, 1, 1], +// [0, 0, 0, 0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) +// >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) +// True +// +// +// +//go:linkname Unpackbits py.unpackbits +func Unpackbits(a *py.Object, axis *py.Object, count *py.Object, bitorder *py.Object) *py.Object +// +// Construct an array from a text file, using regular expression parsing. +// +// The returned array is always a structured array, and is constructed from +// all matches of the regular expression in the file. Groups in the regular +// expression are converted to fields of the structured array. +// +// Parameters +// ---------- +// file : path or file +// Filename or file object to read. +// +// .. versionchanged:: 1.22.0 +// Now accepts `os.PathLike` implementations. +// regexp : str or regexp +// Regular expression used to parse the file. +// Groups in the regular expression correspond to fields in the dtype. +// dtype : dtype or list of dtypes +// Dtype for the structured array; must be a structured datatype. +// encoding : str, optional +// Encoding used to decode the inputfile. Does not apply to input streams. +// +// .. versionadded:: 1.14.0 +// +// Returns +// ------- +// output : ndarray +// The output array, containing the part of the content of `file` that +// was matched by `regexp`. `output` is always a structured array. +// +// Raises +// ------ +// TypeError +// When `dtype` is not a valid dtype for a structured array. +// +// See Also +// -------- +// fromstring, loadtxt +// +// Notes +// ----- +// Dtypes for structured arrays can be specified in several forms, but all +// forms specify at least the data type and field name. For details see +// `basics.rec`. +// +// Examples +// -------- +// >>> from io import StringIO +// >>> text = StringIO("1312 foo\n1534 bar\n444 qux") +// +// >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] +// >>> output = np.fromregex(text, regexp, +// ... [('num', np.int64), ('key', 'S3')]) +// >>> output +// array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], +// dtype=[('num', '>> output['num'] +// array([1312, 1534, 444]) +// +// +// +//go:linkname Fromregex py.fromregex +func Fromregex(file *py.Object, regexp *py.Object, dtype *py.Object, encoding *py.Object) *py.Object +// +// Return the sum of array elements over a given axis treating Not a +// Numbers (NaNs) as zero. +// +// In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or +// empty. In later versions zero is returned. +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose sum is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the sum is computed. The default is to compute the +// sum of the flattened array. +// dtype : data-type, optional +// The type of the returned array and of the accumulator in which the +// elements are summed. By default, the dtype of `a` is used. An +// exception is when `a` has an integer type with less precision than +// the platform (u)intp. In that case, the default will be either +// (u)int32 or (u)int64 depending on whether the platform is 32 or 64 +// bits. For inexact inputs, dtype must be inexact. +// +// .. versionadded:: 1.8.0 +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``. If provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. See +// :ref:`ufuncs-output-type` for more details. The casting of NaN to integer +// can yield unexpected results. +// +// .. versionadded:: 1.8.0 +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// +// If the value is anything but the default, then +// `keepdims` will be passed through to the `mean` or `sum` methods +// of sub-classes of `ndarray`. If the sub-classes methods +// does not implement `keepdims` any exceptions will be raised. +// +// .. versionadded:: 1.8.0 +// initial : scalar, optional +// Starting value for the sum. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// where : array_like of bool, optional +// Elements to include in the sum. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// nansum : ndarray. +// A new array holding the result is returned unless `out` is +// specified, in which it is returned. The result has the same +// size as `a`, and the same shape as `a` if `axis` is not None +// or `a` is a 1-d array. +// +// See Also +// -------- +// numpy.sum : Sum across array propagating NaNs. +// isnan : Show which elements are NaN. +// isfinite : Show which elements are not NaN or +/-inf. +// +// Notes +// ----- +// If both positive and negative infinity are present, the sum will be Not +// A Number (NaN). +// +// Examples +// -------- +// >>> np.nansum(1) +// 1 +// >>> np.nansum([1]) +// 1 +// >>> np.nansum([1, np.nan]) +// 1.0 +// >>> a = np.array([[1, 1], [1, np.nan]]) +// >>> np.nansum(a) +// 3.0 +// >>> np.nansum(a, axis=0) +// array([2., 1.]) +// >>> np.nansum([1, np.nan, np.inf]) +// inf +// >>> np.nansum([1, np.nan, np.NINF]) +// -inf +// >>> from numpy.testing import suppress_warnings +// >>> with suppress_warnings() as sup: +// ... sup.filter(RuntimeWarning) +// ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present +// nan +// +// +// +//go:linkname Nansum py.nansum +func Nansum(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the maximum of an array or maximum along an axis, ignoring any +// NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is +// raised and NaN is returned for that slice. +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose maximum is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the maximum is computed. The default is to compute +// the maximum of the flattened array. +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``; if provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. See +// :ref:`ufuncs-output-type` for more details. +// +// .. versionadded:: 1.8.0 +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// If the value is anything but the default, then +// `keepdims` will be passed through to the `max` method +// of sub-classes of `ndarray`. If the sub-classes methods +// does not implement `keepdims` any exceptions will be raised. +// +// .. versionadded:: 1.8.0 +// initial : scalar, optional +// The minimum value of an output element. Must be present to allow +// computation on empty slice. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// where : array_like of bool, optional +// Elements to compare for the maximum. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// nanmax : ndarray +// An array with the same shape as `a`, with the specified axis removed. +// If `a` is a 0-d array, or if axis is None, an ndarray scalar is +// returned. The same dtype as `a` is returned. +// +// See Also +// -------- +// nanmin : +// The minimum value of an array along a given axis, ignoring any NaNs. +// amax : +// The maximum value of an array along a given axis, propagating any NaNs. +// fmax : +// Element-wise maximum of two arrays, ignoring any NaNs. +// maximum : +// Element-wise maximum of two arrays, propagating any NaNs. +// isnan : +// Shows which elements are Not a Number (NaN). +// isfinite: +// Shows which elements are neither NaN nor infinity. +// +// amin, fmin, minimum +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). This means that Not a Number is not equivalent to infinity. +// Positive infinity is treated as a very large number and negative +// infinity is treated as a very small (i.e. negative) number. +// +// If the input has a integer type the function is equivalent to np.max. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, np.nan]]) +// >>> np.nanmax(a) +// 3.0 +// >>> np.nanmax(a, axis=0) +// array([3., 2.]) +// >>> np.nanmax(a, axis=1) +// array([2., 3.]) +// +// When positive infinity and negative infinity are present: +// +// >>> np.nanmax([1, 2, np.nan, np.NINF]) +// 2.0 +// >>> np.nanmax([1, 2, np.nan, np.inf]) +// inf +// +// +// +//go:linkname Nanmax py.nanmax +func Nanmax(a *py.Object, axis *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return minimum of an array or minimum along an axis, ignoring any NaNs. +// When all-NaN slices are encountered a ``RuntimeWarning`` is raised and +// Nan is returned for that slice. +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose minimum is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the minimum is computed. The default is to compute +// the minimum of the flattened array. +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``; if provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. See +// :ref:`ufuncs-output-type` for more details. +// +// .. versionadded:: 1.8.0 +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// If the value is anything but the default, then +// `keepdims` will be passed through to the `min` method +// of sub-classes of `ndarray`. If the sub-classes methods +// does not implement `keepdims` any exceptions will be raised. +// +// .. versionadded:: 1.8.0 +// initial : scalar, optional +// The maximum value of an output element. Must be present to allow +// computation on empty slice. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// where : array_like of bool, optional +// Elements to compare for the minimum. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// nanmin : ndarray +// An array with the same shape as `a`, with the specified axis +// removed. If `a` is a 0-d array, or if axis is None, an ndarray +// scalar is returned. The same dtype as `a` is returned. +// +// See Also +// -------- +// nanmax : +// The maximum value of an array along a given axis, ignoring any NaNs. +// amin : +// The minimum value of an array along a given axis, propagating any NaNs. +// fmin : +// Element-wise minimum of two arrays, ignoring any NaNs. +// minimum : +// Element-wise minimum of two arrays, propagating any NaNs. +// isnan : +// Shows which elements are Not a Number (NaN). +// isfinite: +// Shows which elements are neither NaN nor infinity. +// +// amax, fmax, maximum +// +// Notes +// ----- +// NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic +// (IEEE 754). This means that Not a Number is not equivalent to infinity. +// Positive infinity is treated as a very large number and negative +// infinity is treated as a very small (i.e. negative) number. +// +// If the input has a integer type the function is equivalent to np.min. +// +// Examples +// -------- +// >>> a = np.array([[1, 2], [3, np.nan]]) +// >>> np.nanmin(a) +// 1.0 +// >>> np.nanmin(a, axis=0) +// array([1., 2.]) +// >>> np.nanmin(a, axis=1) +// array([1., 3.]) +// +// When positive infinity and negative infinity are present: +// +// >>> np.nanmin([1, 2, np.nan, np.inf]) +// 1.0 +// >>> np.nanmin([1, 2, np.nan, np.NINF]) +// -inf +// +// +// +//go:linkname Nanmin py.nanmin +func Nanmin(a *py.Object, axis *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the indices of the maximum values in the specified axis ignoring +// NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the +// results cannot be trusted if a slice contains only NaNs and -Infs. +// +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : int, optional +// Axis along which to operate. By default flattened input is used. +// out : array, optional +// If provided, the result will be inserted into this array. It should +// be of the appropriate shape and dtype. +// +// .. versionadded:: 1.22.0 +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the array. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// index_array : ndarray +// An array of indices or a single index value. +// +// See Also +// -------- +// argmax, nanargmin +// +// Examples +// -------- +// >>> a = np.array([[np.nan, 4], [2, 3]]) +// >>> np.argmax(a) +// 0 +// >>> np.nanargmax(a) +// 1 +// >>> np.nanargmax(a, axis=0) +// array([1, 0]) +// >>> np.nanargmax(a, axis=1) +// array([1, 1]) +// +// +// +//go:linkname Nanargmax py.nanargmax +func Nanargmax(a *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Return the indices of the minimum values in the specified axis ignoring +// NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results +// cannot be trusted if a slice contains only NaNs and Infs. +// +// Parameters +// ---------- +// a : array_like +// Input data. +// axis : int, optional +// Axis along which to operate. By default flattened input is used. +// out : array, optional +// If provided, the result will be inserted into this array. It should +// be of the appropriate shape and dtype. +// +// .. versionadded:: 1.22.0 +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the array. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// index_array : ndarray +// An array of indices or a single index value. +// +// See Also +// -------- +// argmin, nanargmax +// +// Examples +// -------- +// >>> a = np.array([[np.nan, 4], [2, 3]]) +// >>> np.argmin(a) +// 0 +// >>> np.nanargmin(a) +// 2 +// >>> np.nanargmin(a, axis=0) +// array([1, 1]) +// >>> np.nanargmin(a, axis=1) +// array([1, 0]) +// +// +// +//go:linkname Nanargmin py.nanargmin +func Nanargmin(a *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// Compute the arithmetic mean along the specified axis, ignoring NaNs. +// +// Returns the average of the array elements. The average is taken over +// the flattened array by default, otherwise over the specified axis. +// `float64` intermediate and return values are used for integer inputs. +// +// For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. +// +// .. versionadded:: 1.8.0 +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose mean is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the means are computed. The default is to compute +// the mean of the flattened array. +// dtype : data-type, optional +// Type to use in computing the mean. For integer inputs, the default +// is `float64`; for inexact inputs, it is the same as the input +// dtype. +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``; if provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. See +// :ref:`ufuncs-output-type` for more details. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// If the value is anything but the default, then +// `keepdims` will be passed through to the `mean` or `sum` methods +// of sub-classes of `ndarray`. If the sub-classes methods +// does not implement `keepdims` any exceptions will be raised. +// where : array_like of bool, optional +// Elements to include in the mean. See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// m : ndarray, see dtype parameter above +// If `out=None`, returns a new array containing the mean values, +// otherwise a reference to the output array is returned. Nan is +// returned for slices that contain only NaNs. +// +// See Also +// -------- +// average : Weighted average +// mean : Arithmetic mean taken while not ignoring NaNs +// var, nanvar +// +// Notes +// ----- +// The arithmetic mean is the sum of the non-NaN elements along the axis +// divided by the number of non-NaN elements. +// +// Note that for floating-point input, the mean is computed using the same +// precision the input has. Depending on the input data, this can cause +// the results to be inaccurate, especially for `float32`. Specifying a +// higher-precision accumulator using the `dtype` keyword can alleviate +// this issue. +// +// Examples +// -------- +// >>> a = np.array([[1, np.nan], [3, 4]]) +// >>> np.nanmean(a) +// 2.6666666666666665 +// >>> np.nanmean(a, axis=0) +// array([2., 4.]) +// >>> np.nanmean(a, axis=1) +// array([1., 3.5]) # may vary +// +// +// +//go:linkname Nanmean py.nanmean +func Nanmean(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object) *py.Object +// +// Compute the median along the specified axis, while ignoring NaNs. +// +// Returns the median of the array elements. +// +// .. versionadded:: 1.9.0 +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array. +// axis : {int, sequence of int, None}, optional +// Axis or axes along which the medians are computed. The default +// is to compute the median along a flattened version of the array. +// A sequence of axes is supported since version 1.9.0. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output, +// but the type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow use of memory of input array `a` for +// calculations. The input array will be modified by the call to +// `median`. This will save memory when you do not need to preserve +// the contents of the input array. Treat the input as undefined, +// but it will probably be fully or partially sorted. Default is +// False. If `overwrite_input` is ``True`` and `a` is not already an +// `ndarray`, an error will be raised. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// If this is anything but the default value it will be passed +// through (in the special case of an empty array) to the +// `mean` function of the underlying array. If the array is +// a sub-class and `mean` does not have the kwarg `keepdims` this +// will raise a RuntimeError. +// +// Returns +// ------- +// median : ndarray +// A new array holding the result. If the input contains integers +// or floats smaller than ``float64``, then the output data-type is +// ``np.float64``. Otherwise, the data-type of the output is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// mean, median, percentile +// +// Notes +// ----- +// Given a vector ``V`` of length ``N``, the median of ``V`` is the +// middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., +// ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two +// middle values of ``V_sorted`` when ``N`` is even. +// +// Examples +// -------- +// >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) +// >>> a[0, 1] = np.nan +// >>> a +// array([[10., nan, 4.], +// [ 3., 2., 1.]]) +// >>> np.median(a) +// nan +// >>> np.nanmedian(a) +// 3.0 +// >>> np.nanmedian(a, axis=0) +// array([6.5, 2. , 2.5]) +// >>> np.median(a, axis=1) +// array([nan, 2.]) +// >>> b = a.copy() +// >>> np.nanmedian(b, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a==b) +// >>> b = a.copy() +// >>> np.nanmedian(b, axis=None, overwrite_input=True) +// 3.0 +// >>> assert not np.all(a==b) +// +// +// +//go:linkname Nanmedian py.nanmedian +func Nanmedian(a *py.Object, axis *py.Object, out *py.Object, overwriteInput *py.Object, keepdims *py.Object) *py.Object +// +// Compute the qth percentile of the data along the specified axis, +// while ignoring nan values. +// +// Returns the qth percentile(s) of the array elements. +// +// .. versionadded:: 1.9.0 +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array, containing +// nan values to be ignored. +// q : array_like of float +// Percentile or sequence of percentiles to compute, which must be +// between 0 and 100 inclusive. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the percentiles are computed. The default +// is to compute the percentile(s) along a flattened version of the +// array. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape and buffer length as the expected output, but the +// type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow the input array `a` to be modified by +// intermediate calculations, to save memory. In this case, the +// contents of the input `a` after this function completes is +// undefined. +// method : str, optional +// This parameter specifies the method to use for estimating the +// percentile. There are many different methods, some unique to NumPy. +// See the notes for explanation. The options sorted by their R type +// as summarized in the H&F paper [1]_ are: +// +// 1. 'inverted_cdf' +// 2. 'averaged_inverted_cdf' +// 3. 'closest_observation' +// 4. 'interpolated_inverted_cdf' +// 5. 'hazen' +// 6. 'weibull' +// 7. 'linear' (default) +// 8. 'median_unbiased' +// 9. 'normal_unbiased' +// +// The first three methods are discontinuous. NumPy further defines the +// following discontinuous variations of the default 'linear' (7.) option: +// +// * 'lower' +// * 'higher', +// * 'midpoint' +// * 'nearest' +// +// .. versionchanged:: 1.22.0 +// This argument was previously called "interpolation" and only +// offered the "linear" default and last four options. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left in +// the result as dimensions with size one. With this option, the +// result will broadcast correctly against the original array `a`. +// +// If this is anything but the default value it will be passed +// through (in the special case of an empty array) to the +// `mean` function of the underlying array. If the array is +// a sub-class and `mean` does not have the kwarg `keepdims` this +// will raise a RuntimeError. +// +// interpolation : str, optional +// Deprecated name for the method keyword argument. +// +// .. deprecated:: 1.22.0 +// +// Returns +// ------- +// percentile : scalar or ndarray +// If `q` is a single percentile and `axis=None`, then the result +// is a scalar. If multiple percentiles are given, first axis of +// the result corresponds to the percentiles. The other axes are +// the axes that remain after the reduction of `a`. If the input +// contains integers or floats smaller than ``float64``, the output +// data-type is ``float64``. Otherwise, the output data-type is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// nanmean +// nanmedian : equivalent to ``nanpercentile(..., 50)`` +// percentile, median, mean +// nanquantile : equivalent to nanpercentile, except q in range [0, 1]. +// +// Notes +// ----- +// For more information please see `numpy.percentile` +// +// Examples +// -------- +// >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) +// >>> a[0][1] = np.nan +// >>> a +// array([[10., nan, 4.], +// [ 3., 2., 1.]]) +// >>> np.percentile(a, 50) +// nan +// >>> np.nanpercentile(a, 50) +// 3.0 +// >>> np.nanpercentile(a, 50, axis=0) +// array([6.5, 2. , 2.5]) +// >>> np.nanpercentile(a, 50, axis=1, keepdims=True) +// array([[7.], +// [2.]]) +// >>> m = np.nanpercentile(a, 50, axis=0) +// >>> out = np.zeros_like(m) +// >>> np.nanpercentile(a, 50, axis=0, out=out) +// array([6.5, 2. , 2.5]) +// >>> m +// array([6.5, 2. , 2.5]) +// +// >>> b = a.copy() +// >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a==b) +// +// References +// ---------- +// .. [1] R. J. Hyndman and Y. Fan, +// "Sample quantiles in statistical packages," +// The American Statistician, 50(4), pp. 361-365, 1996 +// +// +// +//go:linkname Nanpercentile py.nanpercentile +func Nanpercentile(a *py.Object, q *py.Object, axis *py.Object, out *py.Object, overwriteInput *py.Object, method *py.Object, keepdims *py.Object) *py.Object +// +// Compute the variance along the specified axis, while ignoring NaNs. +// +// Returns the variance of the array elements, a measure of the spread of +// a distribution. The variance is computed for the flattened array by +// default, otherwise over the specified axis. +// +// For all-NaN slices or slices with zero degrees of freedom, NaN is +// returned and a `RuntimeWarning` is raised. +// +// .. versionadded:: 1.8.0 +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose variance is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the variance is computed. The default is to compute +// the variance of the flattened array. +// dtype : data-type, optional +// Type to use in computing the variance. For arrays of integer type +// the default is `float64`; for arrays of float types it is the same as +// the array type. +// out : ndarray, optional +// Alternate output array in which to place the result. It must have +// the same shape as the expected output, but the type is cast if +// necessary. +// ddof : int, optional +// "Delta Degrees of Freedom": the divisor used in the calculation is +// ``N - ddof``, where ``N`` represents the number of non-NaN +// elements. By default `ddof` is zero. +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// where : array_like of bool, optional +// Elements to include in the variance. See `~numpy.ufunc.reduce` for +// details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// variance : ndarray, see dtype parameter above +// If `out` is None, return a new array containing the variance, +// otherwise return a reference to the output array. If ddof is >= the +// number of non-NaN elements in a slice or the slice contains only +// NaNs, then the result for that slice is NaN. +// +// See Also +// -------- +// std : Standard deviation +// mean : Average +// var : Variance while not ignoring NaNs +// nanstd, nanmean +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// The variance is the average of the squared deviations from the mean, +// i.e., ``var = mean(abs(x - x.mean())**2)``. +// +// The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. +// If, however, `ddof` is specified, the divisor ``N - ddof`` is used +// instead. In standard statistical practice, ``ddof=1`` provides an +// unbiased estimator of the variance of a hypothetical infinite +// population. ``ddof=0`` provides a maximum likelihood estimate of the +// variance for normally distributed variables. +// +// Note that for complex numbers, the absolute value is taken before +// squaring, so that the result is always real and nonnegative. +// +// For floating-point input, the variance is computed using the same +// precision the input has. Depending on the input data, this can cause +// the results to be inaccurate, especially for `float32` (see example +// below). Specifying a higher-accuracy accumulator using the ``dtype`` +// keyword can alleviate this issue. +// +// For this function to work on sub-classes of ndarray, they must define +// `sum` with the kwarg `keepdims` +// +// Examples +// -------- +// >>> a = np.array([[1, np.nan], [3, 4]]) +// >>> np.nanvar(a) +// 1.5555555555555554 +// >>> np.nanvar(a, axis=0) +// array([1., 0.]) +// >>> np.nanvar(a, axis=1) +// array([0., 0.25]) # may vary +// +// +// +//go:linkname Nanvar py.nanvar +func Nanvar(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, ddof *py.Object, keepdims *py.Object) *py.Object +// +// Compute the standard deviation along the specified axis, while +// ignoring NaNs. +// +// Returns the standard deviation, a measure of the spread of a +// distribution, of the non-NaN array elements. The standard deviation is +// computed for the flattened array by default, otherwise over the +// specified axis. +// +// For all-NaN slices or slices with zero degrees of freedom, NaN is +// returned and a `RuntimeWarning` is raised. +// +// .. versionadded:: 1.8.0 +// +// Parameters +// ---------- +// a : array_like +// Calculate the standard deviation of the non-NaN values. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the standard deviation is computed. The default is +// to compute the standard deviation of the flattened array. +// dtype : dtype, optional +// Type to use in computing the standard deviation. For arrays of +// integer type the default is float64, for arrays of float types it +// is the same as the array type. +// out : ndarray, optional +// Alternative output array in which to place the result. It must have +// the same shape as the expected output but the type (of the +// calculated values) will be cast if necessary. +// ddof : int, optional +// Means Delta Degrees of Freedom. The divisor used in calculations +// is ``N - ddof``, where ``N`` represents the number of non-NaN +// elements. By default `ddof` is zero. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left +// in the result as dimensions with size one. With this option, +// the result will broadcast correctly against the original `a`. +// +// If this value is anything but the default it is passed through +// as-is to the relevant functions of the sub-classes. If these +// functions do not have a `keepdims` kwarg, a RuntimeError will +// be raised. +// where : array_like of bool, optional +// Elements to include in the standard deviation. +// See `~numpy.ufunc.reduce` for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// standard_deviation : ndarray, see dtype parameter above. +// If `out` is None, return a new array containing the standard +// deviation, otherwise return a reference to the output array. If +// ddof is >= the number of non-NaN elements in a slice or the slice +// contains only NaNs, then the result for that slice is NaN. +// +// See Also +// -------- +// var, mean, std +// nanvar, nanmean +// :ref:`ufuncs-output-type` +// +// Notes +// ----- +// The standard deviation is the square root of the average of the squared +// deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. +// +// The average squared deviation is normally calculated as +// ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is +// specified, the divisor ``N - ddof`` is used instead. In standard +// statistical practice, ``ddof=1`` provides an unbiased estimator of the +// variance of the infinite population. ``ddof=0`` provides a maximum +// likelihood estimate of the variance for normally distributed variables. +// The standard deviation computed in this function is the square root of +// the estimated variance, so even with ``ddof=1``, it will not be an +// unbiased estimate of the standard deviation per se. +// +// Note that, for complex numbers, `std` takes the absolute value before +// squaring, so that the result is always real and nonnegative. +// +// For floating-point input, the *std* is computed using the same +// precision the input has. Depending on the input data, this can cause +// the results to be inaccurate, especially for float32 (see example +// below). Specifying a higher-accuracy accumulator using the `dtype` +// keyword can alleviate this issue. +// +// Examples +// -------- +// >>> a = np.array([[1, np.nan], [3, 4]]) +// >>> np.nanstd(a) +// 1.247219128924647 +// >>> np.nanstd(a, axis=0) +// array([1., 0.]) +// >>> np.nanstd(a, axis=1) +// array([0., 0.5]) # may vary +// +// +// +//go:linkname Nanstd py.nanstd +func Nanstd(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, ddof *py.Object, keepdims *py.Object) *py.Object +// +// Return the product of array elements over a given axis treating Not a +// Numbers (NaNs) as ones. +// +// One is returned for slices that are all-NaN or empty. +// +// .. versionadded:: 1.10.0 +// +// Parameters +// ---------- +// a : array_like +// Array containing numbers whose product is desired. If `a` is not an +// array, a conversion is attempted. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the product is computed. The default is to compute +// the product of the flattened array. +// dtype : data-type, optional +// The type of the returned array and of the accumulator in which the +// elements are summed. By default, the dtype of `a` is used. An +// exception is when `a` has an integer type with less precision than +// the platform (u)intp. In that case, the default will be either +// (u)int32 or (u)int64 depending on whether the platform is 32 or 64 +// bits. For inexact inputs, dtype must be inexact. +// out : ndarray, optional +// Alternate output array in which to place the result. The default +// is ``None``. If provided, it must have the same shape as the +// expected output, but the type will be cast if necessary. See +// :ref:`ufuncs-output-type` for more details. The casting of NaN to integer +// can yield unexpected results. +// keepdims : bool, optional +// If True, the axes which are reduced are left in the result as +// dimensions with size one. With this option, the result will +// broadcast correctly against the original `arr`. +// initial : scalar, optional +// The starting value for this product. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.22.0 +// where : array_like of bool, optional +// Elements to include in the product. See `~numpy.ufunc.reduce` +// for details. +// +// .. versionadded:: 1.22.0 +// +// Returns +// ------- +// nanprod : ndarray +// A new array holding the result is returned unless `out` is +// specified, in which case it is returned. +// +// See Also +// -------- +// numpy.prod : Product across array propagating NaNs. +// isnan : Show which elements are NaN. +// +// Examples +// -------- +// >>> np.nanprod(1) +// 1 +// >>> np.nanprod([1]) +// 1 +// >>> np.nanprod([1, np.nan]) +// 1.0 +// >>> a = np.array([[1, 2], [3, np.nan]]) +// >>> np.nanprod(a) +// 6.0 +// >>> np.nanprod(a, axis=0) +// array([3., 2.]) +// +// +// +//go:linkname Nanprod py.nanprod +func Nanprod(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object, keepdims *py.Object, initial *py.Object, where *py.Object) *py.Object +// +// Return the cumulative sum of array elements over a given axis treating Not a +// Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are +// encountered and leading NaNs are replaced by zeros. +// +// Zeros are returned for slices that are all-NaN or empty. +// +// .. versionadded:: 1.12.0 +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// Axis along which the cumulative sum is computed. The default +// (None) is to compute the cumsum over the flattened array. +// dtype : dtype, optional +// Type of the returned array and of the accumulator in which the +// elements are summed. If `dtype` is not specified, it defaults +// to the dtype of `a`, unless `a` has an integer dtype with a +// precision less than that of the default platform integer. In +// that case, the default platform integer is used. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output +// but the type will be cast if necessary. See :ref:`ufuncs-output-type` for +// more details. +// +// Returns +// ------- +// nancumsum : ndarray. +// A new array holding the result is returned unless `out` is +// specified, in which it is returned. The result has the same +// size as `a`, and the same shape as `a` if `axis` is not None +// or `a` is a 1-d array. +// +// See Also +// -------- +// numpy.cumsum : Cumulative sum across array propagating NaNs. +// isnan : Show which elements are NaN. +// +// Examples +// -------- +// >>> np.nancumsum(1) +// array([1]) +// >>> np.nancumsum([1]) +// array([1]) +// >>> np.nancumsum([1, np.nan]) +// array([1., 1.]) +// >>> a = np.array([[1, 2], [3, np.nan]]) +// >>> np.nancumsum(a) +// array([1., 3., 6., 6.]) +// >>> np.nancumsum(a, axis=0) +// array([[1., 2.], +// [4., 2.]]) +// >>> np.nancumsum(a, axis=1) +// array([[1., 3.], +// [3., 3.]]) +// +// +// +//go:linkname Nancumsum py.nancumsum +func Nancumsum(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object) *py.Object +// +// Return the cumulative product of array elements over a given axis treating Not a +// Numbers (NaNs) as one. The cumulative product does not change when NaNs are +// encountered and leading NaNs are replaced by ones. +// +// Ones are returned for slices that are all-NaN or empty. +// +// .. versionadded:: 1.12.0 +// +// Parameters +// ---------- +// a : array_like +// Input array. +// axis : int, optional +// Axis along which the cumulative product is computed. By default +// the input is flattened. +// dtype : dtype, optional +// Type of the returned array, as well as of the accumulator in which +// the elements are multiplied. If *dtype* is not specified, it +// defaults to the dtype of `a`, unless `a` has an integer dtype with +// a precision less than that of the default platform integer. In +// that case, the default platform integer is used instead. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output +// but the type of the resulting values will be cast if necessary. +// +// Returns +// ------- +// nancumprod : ndarray +// A new array holding the result is returned unless `out` is +// specified, in which case it is returned. +// +// See Also +// -------- +// numpy.cumprod : Cumulative product across array propagating NaNs. +// isnan : Show which elements are NaN. +// +// Examples +// -------- +// >>> np.nancumprod(1) +// array([1]) +// >>> np.nancumprod([1]) +// array([1]) +// >>> np.nancumprod([1, np.nan]) +// array([1., 1.]) +// >>> a = np.array([[1, 2], [3, np.nan]]) +// >>> np.nancumprod(a) +// array([1., 2., 6., 6.]) +// >>> np.nancumprod(a, axis=0) +// array([[1., 2.], +// [3., 2.]]) +// >>> np.nancumprod(a, axis=1) +// array([[1., 2.], +// [3., 3.]]) +// +// +// +//go:linkname Nancumprod py.nancumprod +func Nancumprod(a *py.Object, axis *py.Object, dtype *py.Object, out *py.Object) *py.Object +// +// Compute the qth quantile of the data along the specified axis, +// while ignoring nan values. +// Returns the qth quantile(s) of the array elements. +// +// .. versionadded:: 1.15.0 +// +// Parameters +// ---------- +// a : array_like +// Input array or object that can be converted to an array, containing +// nan values to be ignored +// q : array_like of float +// Probability or sequence of probabilities for the quantiles to compute. +// Values must be between 0 and 1 inclusive. +// axis : {int, tuple of int, None}, optional +// Axis or axes along which the quantiles are computed. The +// default is to compute the quantile(s) along a flattened +// version of the array. +// out : ndarray, optional +// Alternative output array in which to place the result. It must +// have the same shape and buffer length as the expected output, +// but the type (of the output) will be cast if necessary. +// overwrite_input : bool, optional +// If True, then allow the input array `a` to be modified by intermediate +// calculations, to save memory. In this case, the contents of the input +// `a` after this function completes is undefined. +// method : str, optional +// This parameter specifies the method to use for estimating the +// quantile. There are many different methods, some unique to NumPy. +// See the notes for explanation. The options sorted by their R type +// as summarized in the H&F paper [1]_ are: +// +// 1. 'inverted_cdf' +// 2. 'averaged_inverted_cdf' +// 3. 'closest_observation' +// 4. 'interpolated_inverted_cdf' +// 5. 'hazen' +// 6. 'weibull' +// 7. 'linear' (default) +// 8. 'median_unbiased' +// 9. 'normal_unbiased' +// +// The first three methods are discontinuous. NumPy further defines the +// following discontinuous variations of the default 'linear' (7.) option: +// +// * 'lower' +// * 'higher', +// * 'midpoint' +// * 'nearest' +// +// .. versionchanged:: 1.22.0 +// This argument was previously called "interpolation" and only +// offered the "linear" default and last four options. +// +// keepdims : bool, optional +// If this is set to True, the axes which are reduced are left in +// the result as dimensions with size one. With this option, the +// result will broadcast correctly against the original array `a`. +// +// If this is anything but the default value it will be passed +// through (in the special case of an empty array) to the +// `mean` function of the underlying array. If the array is +// a sub-class and `mean` does not have the kwarg `keepdims` this +// will raise a RuntimeError. +// +// interpolation : str, optional +// Deprecated name for the method keyword argument. +// +// .. deprecated:: 1.22.0 +// +// Returns +// ------- +// quantile : scalar or ndarray +// If `q` is a single probability and `axis=None`, then the result +// is a scalar. If multiple probability levels are given, first axis of +// the result corresponds to the quantiles. The other axes are +// the axes that remain after the reduction of `a`. If the input +// contains integers or floats smaller than ``float64``, the output +// data-type is ``float64``. Otherwise, the output data-type is the +// same as that of the input. If `out` is specified, that array is +// returned instead. +// +// See Also +// -------- +// quantile +// nanmean, nanmedian +// nanmedian : equivalent to ``nanquantile(..., 0.5)`` +// nanpercentile : same as nanquantile, but with q in the range [0, 100]. +// +// Notes +// ----- +// For more information please see `numpy.quantile` +// +// Examples +// -------- +// >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) +// >>> a[0][1] = np.nan +// >>> a +// array([[10., nan, 4.], +// [ 3., 2., 1.]]) +// >>> np.quantile(a, 0.5) +// nan +// >>> np.nanquantile(a, 0.5) +// 3.0 +// >>> np.nanquantile(a, 0.5, axis=0) +// array([6.5, 2. , 2.5]) +// >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) +// array([[7.], +// [2.]]) +// >>> m = np.nanquantile(a, 0.5, axis=0) +// >>> out = np.zeros_like(m) +// >>> np.nanquantile(a, 0.5, axis=0, out=out) +// array([6.5, 2. , 2.5]) +// >>> m +// array([6.5, 2. , 2.5]) +// >>> b = a.copy() +// >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) +// array([7., 2.]) +// >>> assert not np.all(a==b) +// +// References +// ---------- +// .. [1] R. J. Hyndman and Y. Fan, +// "Sample quantiles in statistical packages," +// The American Statistician, 50(4), pp. 361-365, 1996 +// +// +// +//go:linkname Nanquantile py.nanquantile +func Nanquantile(a *py.Object, q *py.Object, axis *py.Object, out *py.Object, overwriteInput *py.Object, method *py.Object, keepdims *py.Object) *py.Object +// +// Compute the histogram of a dataset. +// +// Parameters +// ---------- +// a : array_like +// Input data. The histogram is computed over the flattened array. +// bins : int or sequence of scalars or str, optional +// If `bins` is an int, it defines the number of equal-width +// bins in the given range (10, by default). If `bins` is a +// sequence, it defines a monotonically increasing array of bin edges, +// including the rightmost edge, allowing for non-uniform bin widths. +// +// .. versionadded:: 1.11.0 +// +// If `bins` is a string, it defines the method used to calculate the +// optimal bin width, as defined by `histogram_bin_edges`. +// +// range : (float, float), optional +// The lower and upper range of the bins. If not provided, range +// is simply ``(a.min(), a.max())``. Values outside the range are +// ignored. The first element of the range must be less than or +// equal to the second. `range` affects the automatic bin +// computation as well. While bin width is computed to be optimal +// based on the actual data within `range`, the bin count will fill +// the entire range including portions containing no data. +// weights : array_like, optional +// An array of weights, of the same shape as `a`. Each value in +// `a` only contributes its associated weight towards the bin count +// (instead of 1). If `density` is True, the weights are +// normalized, so that the integral of the density over the range +// remains 1. +// density : bool, optional +// If ``False``, the result will contain the number of samples in +// each bin. If ``True``, the result is the value of the +// probability *density* function at the bin, normalized such that +// the *integral* over the range is 1. Note that the sum of the +// histogram values will not be equal to 1 unless bins of unity +// width are chosen; it is not a probability *mass* function. +// +// Returns +// ------- +// hist : array +// The values of the histogram. See `density` and `weights` for a +// description of the possible semantics. +// bin_edges : array of dtype float +// Return the bin edges ``(length(hist)+1)``. +// +// +// See Also +// -------- +// histogramdd, bincount, searchsorted, digitize, histogram_bin_edges +// +// Notes +// ----- +// All but the last (righthand-most) bin is half-open. In other words, +// if `bins` is:: +// +// [1, 2, 3, 4] +// +// then the first bin is ``[1, 2)`` (including 1, but excluding 2) and +// the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which +// *includes* 4. +// +// +// Examples +// -------- +// >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) +// (array([0, 2, 1]), array([0, 1, 2, 3])) +// >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) +// (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) +// >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) +// (array([1, 4, 1]), array([0, 1, 2, 3])) +// +// >>> a = np.arange(5) +// >>> hist, bin_edges = np.histogram(a, density=True) +// >>> hist +// array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) +// >>> hist.sum() +// 2.4999999999999996 +// >>> np.sum(hist * np.diff(bin_edges)) +// 1.0 +// +// .. versionadded:: 1.11.0 +// +// Automated Bin Selection Methods example, using 2 peak random data +// with 2000 points: +// +// >>> import matplotlib.pyplot as plt +// >>> rng = np.random.RandomState(10) # deterministic random data +// >>> a = np.hstack((rng.normal(size=1000), +// ... rng.normal(loc=5, scale=2, size=1000))) +// >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram +// >>> plt.title("Histogram with 'auto' bins") +// Text(0.5, 1.0, "Histogram with 'auto' bins") +// >>> plt.show() +// +// +// +//go:linkname Histogram py.histogram +func Histogram(a *py.Object, bins *py.Object, range_ *py.Object, density *py.Object, weights *py.Object) *py.Object +// +// Compute the multidimensional histogram of some data. +// +// Parameters +// ---------- +// sample : (N, D) array, or (N, D) array_like +// The data to be histogrammed. +// +// Note the unusual interpretation of sample when an array_like: +// +// * When an array, each row is a coordinate in a D-dimensional space - +// such as ``histogramdd(np.array([p1, p2, p3]))``. +// * When an array_like, each element is the list of values for single +// coordinate - such as ``histogramdd((X, Y, Z))``. +// +// The first form should be preferred. +// +// bins : sequence or int, optional +// The bin specification: +// +// * A sequence of arrays describing the monotonically increasing bin +// edges along each dimension. +// * The number of bins for each dimension (nx, ny, ... =bins) +// * The number of bins for all dimensions (nx=ny=...=bins). +// +// range : sequence, optional +// A sequence of length D, each an optional (lower, upper) tuple giving +// the outer bin edges to be used if the edges are not given explicitly in +// `bins`. +// An entry of None in the sequence results in the minimum and maximum +// values being used for the corresponding dimension. +// The default, None, is equivalent to passing a tuple of D None values. +// density : bool, optional +// If False, the default, returns the number of samples in each bin. +// If True, returns the probability *density* function at the bin, +// ``bin_count / sample_count / bin_volume``. +// weights : (N,) array_like, optional +// An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. +// Weights are normalized to 1 if density is True. If density is False, +// the values of the returned histogram are equal to the sum of the +// weights belonging to the samples falling into each bin. +// +// Returns +// ------- +// H : ndarray +// The multidimensional histogram of sample x. See density and weights +// for the different possible semantics. +// edges : list +// A list of D arrays describing the bin edges for each dimension. +// +// See Also +// -------- +// histogram: 1-D histogram +// histogram2d: 2-D histogram +// +// Examples +// -------- +// >>> r = np.random.randn(100,3) +// >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) +// >>> H.shape, edges[0].size, edges[1].size, edges[2].size +// ((5, 8, 4), 6, 9, 5) +// +// +// +//go:linkname Histogramdd py.histogramdd +func Histogramdd(sample *py.Object, bins *py.Object, range_ *py.Object, density *py.Object, weights *py.Object) *py.Object +// +// Function to calculate only the edges of the bins used by the `histogram` +// function. +// +// Parameters +// ---------- +// a : array_like +// Input data. The histogram is computed over the flattened array. +// bins : int or sequence of scalars or str, optional +// If `bins` is an int, it defines the number of equal-width +// bins in the given range (10, by default). If `bins` is a +// sequence, it defines the bin edges, including the rightmost +// edge, allowing for non-uniform bin widths. +// +// If `bins` is a string from the list below, `histogram_bin_edges` will use +// the method chosen to calculate the optimal bin width and +// consequently the number of bins (see `Notes` for more detail on +// the estimators) from the data that falls within the requested +// range. While the bin width will be optimal for the actual data +// in the range, the number of bins will be computed to fill the +// entire range, including the empty portions. For visualisation, +// using the 'auto' option is suggested. Weighted data is not +// supported for automated bin size selection. +// +// 'auto' +// Maximum of the 'sturges' and 'fd' estimators. Provides good +// all around performance. +// +// 'fd' (Freedman Diaconis Estimator) +// Robust (resilient to outliers) estimator that takes into +// account data variability and data size. +// +// 'doane' +// An improved version of Sturges' estimator that works better +// with non-normal datasets. +// +// 'scott' +// Less robust estimator that takes into account data variability +// and data size. +// +// 'stone' +// Estimator based on leave-one-out cross-validation estimate of +// the integrated squared error. Can be regarded as a generalization +// of Scott's rule. +// +// 'rice' +// Estimator does not take variability into account, only data +// size. Commonly overestimates number of bins required. +// +// 'sturges' +// R's default method, only accounts for data size. Only +// optimal for gaussian data and underestimates number of bins +// for large non-gaussian datasets. +// +// 'sqrt' +// Square root (of data size) estimator, used by Excel and +// other programs for its speed and simplicity. +// +// range : (float, float), optional +// The lower and upper range of the bins. If not provided, range +// is simply ``(a.min(), a.max())``. Values outside the range are +// ignored. The first element of the range must be less than or +// equal to the second. `range` affects the automatic bin +// computation as well. While bin width is computed to be optimal +// based on the actual data within `range`, the bin count will fill +// the entire range including portions containing no data. +// +// weights : array_like, optional +// An array of weights, of the same shape as `a`. Each value in +// `a` only contributes its associated weight towards the bin count +// (instead of 1). This is currently not used by any of the bin estimators, +// but may be in the future. +// +// Returns +// ------- +// bin_edges : array of dtype float +// The edges to pass into `histogram` +// +// See Also +// -------- +// histogram +// +// Notes +// ----- +// The methods to estimate the optimal number of bins are well founded +// in literature, and are inspired by the choices R provides for +// histogram visualisation. Note that having the number of bins +// proportional to :math:`n^{1/3}` is asymptotically optimal, which is +// why it appears in most estimators. These are simply plug-in methods +// that give good starting points for number of bins. In the equations +// below, :math:`h` is the binwidth and :math:`n_h` is the number of +// bins. All estimators that compute bin counts are recast to bin width +// using the `ptp` of the data. The final bin count is obtained from +// ``np.round(np.ceil(range / h))``. The final bin width is often less +// than what is returned by the estimators below. +// +// 'auto' (maximum of the 'sturges' and 'fd' estimators) +// A compromise to get a good value. For small datasets the Sturges +// value will usually be chosen, while larger datasets will usually +// default to FD. Avoids the overly conservative behaviour of FD +// and Sturges for small and large datasets respectively. +// Switchover point is usually :math:`a.size \approx 1000`. +// +// 'fd' (Freedman Diaconis Estimator) +// .. math:: h = 2 \frac{IQR}{n^{1/3}} +// +// The binwidth is proportional to the interquartile range (IQR) +// and inversely proportional to cube root of a.size. Can be too +// conservative for small datasets, but is quite good for large +// datasets. The IQR is very robust to outliers. +// +// 'scott' +// .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} +// +// The binwidth is proportional to the standard deviation of the +// data and inversely proportional to cube root of ``x.size``. Can +// be too conservative for small datasets, but is quite good for +// large datasets. The standard deviation is not very robust to +// outliers. Values are very similar to the Freedman-Diaconis +// estimator in the absence of outliers. +// +// 'rice' +// .. math:: n_h = 2n^{1/3} +// +// The number of bins is only proportional to cube root of +// ``a.size``. It tends to overestimate the number of bins and it +// does not take into account data variability. +// +// 'sturges' +// .. math:: n_h = \log _{2}(n) + 1 +// +// The number of bins is the base 2 log of ``a.size``. This +// estimator assumes normality of data and is too conservative for +// larger, non-normal datasets. This is the default method in R's +// ``hist`` method. +// +// 'doane' +// .. math:: n_h = 1 + \log_{2}(n) + +// \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) +// +// g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] +// +// \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} +// +// An improved version of Sturges' formula that produces better +// estimates for non-normal datasets. This estimator attempts to +// account for the skew of the data. +// +// 'sqrt' +// .. math:: n_h = \sqrt n +// +// The simplest and fastest estimator. Only takes into account the +// data size. +// +// Examples +// -------- +// >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) +// >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) +// array([0. , 0.25, 0.5 , 0.75, 1. ]) +// >>> np.histogram_bin_edges(arr, bins=2) +// array([0. , 2.5, 5. ]) +// +// For consistency with histogram, an array of pre-computed bins is +// passed through unmodified: +// +// >>> np.histogram_bin_edges(arr, [1, 2]) +// array([1, 2]) +// +// This function allows one set of bins to be computed, and reused across +// multiple histograms: +// +// >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') +// >>> shared_bins +// array([0., 1., 2., 3., 4., 5.]) +// +// >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) +// >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) +// >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) +// +// >>> hist_0; hist_1 +// array([1, 1, 0, 1, 0]) +// array([2, 0, 1, 1, 2]) +// +// Which gives more easily comparable results than using separate bins for +// each histogram: +// +// >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') +// >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') +// >>> hist_0; hist_1 +// array([1, 1, 1]) +// array([2, 1, 1, 2]) +// >>> bins_0; bins_1 +// array([0., 1., 2., 3.]) +// array([0. , 1.25, 2.5 , 3.75, 5. ]) +// +// +// +//go:linkname HistogramBinEdges py.histogram_bin_edges +func HistogramBinEdges(a *py.Object, bins *py.Object, range_ *py.Object, weights *py.Object) *py.Object +// +// Build a matrix object from a string, nested sequence, or array. +// +// Parameters +// ---------- +// obj : str or array_like +// Input data. If a string, variables in the current scope may be +// referenced by name. +// ldict : dict, optional +// A dictionary that replaces local operands in current frame. +// Ignored if `obj` is not a string or `gdict` is None. +// gdict : dict, optional +// A dictionary that replaces global operands in current frame. +// Ignored if `obj` is not a string. +// +// Returns +// ------- +// out : matrix +// Returns a matrix object, which is a specialized 2-D array. +// +// See Also +// -------- +// block : +// A generalization of this function for N-d arrays, that returns normal +// ndarrays. +// +// Examples +// -------- +// >>> A = np.mat('1 1; 1 1') +// >>> B = np.mat('2 2; 2 2') +// >>> C = np.mat('3 4; 5 6') +// >>> D = np.mat('7 8; 9 0') +// +// All the following expressions construct the same block matrix: +// +// >>> np.bmat([[A, B], [C, D]]) +// matrix([[1, 1, 2, 2], +// [1, 1, 2, 2], +// [3, 4, 7, 8], +// [5, 6, 9, 0]]) +// >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) +// matrix([[1, 1, 2, 2], +// [1, 1, 2, 2], +// [3, 4, 7, 8], +// [5, 6, 9, 0]]) +// >>> np.bmat('A,B; C,D') +// matrix([[1, 1, 2, 2], +// [1, 1, 2, 2], +// [3, 4, 7, 8], +// [5, 6, 9, 0]]) +// +// +// +//go:linkname Bmat py.bmat +func Bmat(obj *py.Object, ldict *py.Object, gdict *py.Object) *py.Object +// +// Interpret the input as a matrix. +// +// Unlike `matrix`, `asmatrix` does not make a copy if the input is already +// a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. +// +// Parameters +// ---------- +// data : array_like +// Input data. +// dtype : data-type +// Data-type of the output matrix. +// +// Returns +// ------- +// mat : matrix +// `data` interpreted as a matrix. +// +// Examples +// -------- +// >>> x = np.array([[1, 2], [3, 4]]) +// +// >>> m = np.asmatrix(x) +// +// >>> x[0,0] = 5 +// +// >>> m +// matrix([[5, 2], +// [3, 4]]) +// +// +// +//go:linkname Mat py.mat +func Mat(data *py.Object, dtype *py.Object) *py.Object +// +// Interpret the input as a matrix. +// +// Unlike `matrix`, `asmatrix` does not make a copy if the input is already +// a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. +// +// Parameters +// ---------- +// data : array_like +// Input data. +// dtype : data-type +// Data-type of the output matrix. +// +// Returns +// ------- +// mat : matrix +// `data` interpreted as a matrix. +// +// Examples +// -------- +// >>> x = np.array([[1, 2], [3, 4]]) +// +// >>> m = np.asmatrix(x) +// +// >>> x[0,0] = 5 +// +// >>> m +// matrix([[5, 2], +// [3, 4]]) +// +// +// +//go:linkname Asmatrix py.asmatrix +func Asmatrix(data *py.Object, dtype *py.Object) *py.Object +// absolute(x, /, out=None, *, where=True, casting='same_kind', order='K', dtype=None, subok=True[, signature, extobj]) +// +// Calculate the absolute value element-wise. +// +// ``np.abs`` is a shorthand for this function. +// +// Parameters +// ---------- +// x : array_like +// Input array. +// out : ndarray, None, or tuple of ndarray and None, optional +// A location into which the result is stored. If provided, it must have +// a shape that the inputs broadcast to. If not provided or None, +// a freshly-allocated array is returned. A tuple (possible only as a +// keyword argument) must have length equal to the number of outputs. +// where : array_like, optional +// This condition is broadcast over the input. At locations where the +// condition is True, the `out` array will be set to the ufunc result. +// Elsewhere, the `out` array will retain its original value. +// Note that if an uninitialized `out` array is created via the default +// ``out=None``, locations within it where the condition is False will +// remain uninitialized. +// **kwargs +// For other keyword-only arguments, see the +// :ref:`ufunc docs `. +// +// Returns +// ------- +// absolute : ndarray +// An ndarray containing the absolute value of +// each element in `x`. For complex input, ``a + ib``, the +// absolute value is :math:`\sqrt{ a^2 + b^2 }`. +// This is a scalar if `x` is a scalar. +// +// Examples +// -------- +// >>> x = np.array([-1.2, 1.2]) +// >>> np.absolute(x) +// array([ 1.2, 1.2]) +// >>> np.absolute(1.2 + 1j) +// 1.5620499351813308 +// +// Plot the function over ``[-10, 10]``: +// +// >>> import matplotlib.pyplot as plt +// +// >>> x = np.linspace(start=-10, stop=10, num=101) +// >>> plt.plot(x, np.absolute(x)) +// >>> plt.show() +// +// Plot the function over the complex plane: +// +// >>> xx = x + 1j * x[:, np.newaxis] +// >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray') +// >>> plt.show() +// +// The `abs` function can be used as a shorthand for ``np.absolute`` on +// ndarrays. +// +// >>> x = np.array([-1.2, 1.2]) +// >>> abs(x) +// array([1.2, 1.2]) +// +//go:linkname Abs py.abs +func Abs(__llgo_va_list ...interface{}) *py.Object diff --git a/py/tabulate/go.mod b/py/tabulate/go.mod new file mode 100644 index 00000000..adb83866 --- /dev/null +++ b/py/tabulate/go.mod @@ -0,0 +1,5 @@ +module github.com/PengPengPeng717/llpkg/py/tabulate + +go 1.24.5 + +require github.com/goplus/lib v0.3.0 diff --git a/py/tabulate/go.sum b/py/tabulate/go.sum new file mode 100644 index 00000000..54e0f00c --- /dev/null +++ b/py/tabulate/go.sum @@ -0,0 +1,2 @@ +github.com/goplus/lib v0.3.0 h1:y0ZGb5Q/RikW1oMMB4Di7XIZIpuzh/7mlrR8HNbxXCA= +github.com/goplus/lib v0.3.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/py/tabulate/llpkg.cfg b/py/tabulate/llpkg.cfg new file mode 100644 index 00000000..b5d80b88 --- /dev/null +++ b/py/tabulate/llpkg.cfg @@ -0,0 +1,17 @@ +{ + "type": "python", + "upstream": { + "installer": { + "name": "pip" + }, + "package": { + "name": "tabulate", + "version": "0.9.0" + } + }, + "llpyg": { + "output_dir": "./test", + "mod_name": "github.com/PengPengPeng717/llpkg/py/tabulate", + "mod_depth": 1 + } +} diff --git a/py/tabulate/llpyg.cfg b/py/tabulate/llpyg.cfg new file mode 100644 index 00000000..1d427184 --- /dev/null +++ b/py/tabulate/llpyg.cfg @@ -0,0 +1,7 @@ +{ + "name": "tabulate", + "libName": "tabulate", + "modules": [ + "tabulate" + ] +} diff --git a/py/tabulate/tabulate.go b/py/tabulate/tabulate.go new file mode 100644 index 00000000..d463d585 --- /dev/null +++ b/py/tabulate/tabulate.go @@ -0,0 +1,537 @@ +package tabulate + +import ( + "github.com/goplus/lib/py" + _ "unsafe" +) + +const LLGoPackage = "py.tabulate" +// Returns a new subclass of tuple with named fields. +// +// >>> Point = namedtuple('Point', ['x', 'y']) +// >>> Point.__doc__ # docstring for the new class +// 'Point(x, y)' +// >>> p = Point(11, y=22) # instantiate with positional args or keywords +// >>> p[0] + p[1] # indexable like a plain tuple +// 33 +// >>> x, y = p # unpack like a regular tuple +// >>> x, y +// (11, 22) +// >>> p.x + p.y # fields also accessible by name +// 33 +// >>> d = p._asdict() # convert to a dictionary +// >>> d['x'] +// 11 +// >>> Point(**d) # convert from a dictionary +// Point(x=11, y=22) +// >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields +// Point(x=100, y=22) +// +// +// +//go:linkname Namedtuple py.namedtuple +func Namedtuple(typename *py.Object, fieldNames *py.Object) *py.Object +// +// Replace special characters "&", "<" and ">" to HTML-safe sequences. +// If the optional flag quote is true (the default), the quotation mark +// characters, both double quote (") and single quote (') characters are also +// translated. +// +// +//go:linkname Htmlescape py.htmlescape +func Htmlescape(s *py.Object, quote *py.Object) *py.Object +// reduce(function, iterable[, initial]) -> value +// +// Apply a function of two arguments cumulatively to the items of an iterable, from left to right. +// +// This effectively reduces the iterable to a single value. If initial is present, +// it is placed before the items of the iterable in the calculation, and serves as +// a default when the iterable is empty. +// +// For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) +// calculates ((((1 + 2) + 3) + 4) + 5). +// +//go:linkname Reduce py.reduce +func Reduce(function *py.Object, iterable *py.Object, initial *py.Object) *py.Object +// Construct a simple TableFormat with columns separated by a separator. +// +// >>> tsv = simple_separated_format("\t") ; tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \t 1\nspam\t23' +// True +// +// +// +//go:linkname SimpleSeparatedFormat py.simple_separated_format +func SimpleSeparatedFormat(separator *py.Object) *py.Object +// Format a fixed width table for pretty printing. +// +// >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) +// --- --------- +// 1 2.34 +// -56 8.999 +// 2 10001 +// --- --------- +// +// The first required argument (`tabular_data`) can be a +// list-of-lists (or another iterable of iterables), a list of named +// tuples, a dictionary of iterables, an iterable of dictionaries, +// an iterable of dataclasses (Python 3.7+), a two-dimensional NumPy array, +// NumPy record array, or a Pandas' dataframe. +// +// +// Table headers +// ------------- +// +// To print nice column headers, supply the second argument (`headers`): +// +// - `headers` can be an explicit list of column headers +// - if `headers="firstrow"`, then the first row of data is used +// - if `headers="keys"`, then dictionary keys or column indices are used +// +// Otherwise a headerless table is produced. +// +// If the number of headers is less than the number of columns, they +// are supposed to be names of the last columns. This is consistent +// with the plain-text format of R and Pandas' dataframes. +// +// >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], +// ... headers="firstrow")) +// sex age +// ----- ----- ----- +// Alice F 24 +// Bob M 19 +// +// By default, pandas.DataFrame data have an additional column called +// row index. To add a similar column to all other types of data, +// use `showindex="always"` or `showindex=True`. To suppress row indices +// for all types of data, pass `showindex="never" or `showindex=False`. +// To add a custom row index column, pass `showindex=some_iterable`. +// +// >>> print(tabulate([["F",24],["M",19]], showindex="always")) +// - - -- +// 0 F 24 +// 1 M 19 +// - - -- +// +// +// Column alignment +// ---------------- +// +// `tabulate` tries to detect column types automatically, and aligns +// the values properly. By default it aligns decimal points of the +// numbers (or flushes integer numbers to the right), and flushes +// everything else to the left. Possible column alignments +// (`numalign`, `stralign`) are: "right", "center", "left", "decimal" +// (only for `numalign`), and None (to disable alignment). +// +// +// Table formats +// ------------- +// +// `intfmt` is a format specification used for columns which +// contain numeric data without a decimal point. This can also be +// a list or tuple of format strings, one per column. +// +// `floatfmt` is a format specification used for columns which +// contain numeric data with a decimal point. This can also be +// a list or tuple of format strings, one per column. +// +// `None` values are replaced with a `missingval` string (like +// `floatfmt`, this can also be a list of values for different +// columns): +// +// >>> print(tabulate([["spam", 1, None], +// ... ["eggs", 42, 3.14], +// ... ["other", None, 2.7]], missingval="?")) +// ----- -- ---- +// spam 1 ? +// eggs 42 3.14 +// other ? 2.7 +// ----- -- ---- +// +// Various plain-text table formats (`tablefmt`) are supported: +// 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', +// 'latex', 'latex_raw', 'latex_booktabs', 'latex_longtable' and tsv. +// Variable `tabulate_formats`contains the list of currently supported formats. +// +// "plain" format doesn't use any pseudographics to draw tables, +// it separates columns with a double space: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "plain")) +// strings numbers +// spam 41.9999 +// eggs 451 +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) +// spam 41.9999 +// eggs 451 +// +// "simple" format is like Pandoc simple_tables: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "simple")) +// strings numbers +// --------- --------- +// spam 41.9999 +// eggs 451 +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) +// ---- -------- +// spam 41.9999 +// eggs 451 +// ---- -------- +// +// "grid" is similar to tables produced by Emacs table.el package or +// Pandoc grid_tables: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "grid")) +// +-----------+-----------+ +// | strings | numbers | +// +===========+===========+ +// | spam | 41.9999 | +// +-----------+-----------+ +// | eggs | 451 | +// +-----------+-----------+ +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +// +------+----------+ +// | spam | 41.9999 | +// +------+----------+ +// | eggs | 451 | +// +------+----------+ +// +// "simple_grid" draws a grid using single-line box-drawing +// characters: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "simple_grid")) +// ┌───────────┬───────────┐ +// │ strings │ numbers │ +// ├───────────┼───────────┤ +// │ spam │ 41.9999 │ +// ├───────────┼───────────┤ +// │ eggs │ 451 │ +// └───────────┴───────────┘ +// +// "rounded_grid" draws a grid using single-line box-drawing +// characters with rounded corners: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "rounded_grid")) +// ╭───────────┬───────────╮ +// │ strings │ numbers │ +// ├───────────┼───────────┤ +// │ spam │ 41.9999 │ +// ├───────────┼───────────┤ +// │ eggs │ 451 │ +// ╰───────────┴───────────╯ +// +// "heavy_grid" draws a grid using bold (thick) single-line box-drawing +// characters: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "heavy_grid")) +// ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ +// ┃ strings ┃ numbers ┃ +// ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ +// ┃ spam ┃ 41.9999 ┃ +// ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ +// ┃ eggs ┃ 451 ┃ +// ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ +// +// "mixed_grid" draws a grid using a mix of light (thin) and heavy (thick) lines +// box-drawing characters: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "mixed_grid")) +// ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ +// │ strings │ numbers │ +// ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ +// │ spam │ 41.9999 │ +// ├───────────┼───────────┤ +// │ eggs │ 451 │ +// ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ +// +// "double_grid" draws a grid using double-line box-drawing +// characters: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "double_grid")) +// ╔═══════════╦═══════════╗ +// ║ strings ║ numbers ║ +// ╠═══════════╬═══════════╣ +// ║ spam ║ 41.9999 ║ +// ╠═══════════╬═══════════╣ +// ║ eggs ║ 451 ║ +// ╚═══════════╩═══════════╝ +// +// "fancy_grid" draws a grid using a mix of single and +// double-line box-drawing characters: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "fancy_grid")) +// ╒═══════════╤═══════════╕ +// │ strings │ numbers │ +// ╞═══════════╪═══════════╡ +// │ spam │ 41.9999 │ +// ├───────────┼───────────┤ +// │ eggs │ 451 │ +// ╘═══════════╧═══════════╛ +// +// "outline" is the same as the "grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "outline")) +// +-----------+-----------+ +// | strings | numbers | +// +===========+===========+ +// | spam | 41.9999 | +// | eggs | 451 | +// +-----------+-----------+ +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="outline")) +// +------+----------+ +// | spam | 41.9999 | +// | eggs | 451 | +// +------+----------+ +// +// "simple_outline" is the same as the "simple_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "simple_outline")) +// ┌───────────┬───────────┐ +// │ strings │ numbers │ +// ├───────────┼───────────┤ +// │ spam │ 41.9999 │ +// │ eggs │ 451 │ +// └───────────┴───────────┘ +// +// "rounded_outline" is the same as the "rounded_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "rounded_outline")) +// ╭───────────┬───────────╮ +// │ strings │ numbers │ +// ├───────────┼───────────┤ +// │ spam │ 41.9999 │ +// │ eggs │ 451 │ +// ╰───────────┴───────────╯ +// +// "heavy_outline" is the same as the "heavy_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "heavy_outline")) +// ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ +// ┃ strings ┃ numbers ┃ +// ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ +// ┃ spam ┃ 41.9999 ┃ +// ┃ eggs ┃ 451 ┃ +// ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ +// +// "mixed_outline" is the same as the "mixed_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "mixed_outline")) +// ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ +// │ strings │ numbers │ +// ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ +// │ spam │ 41.9999 │ +// │ eggs │ 451 │ +// ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ +// +// "double_outline" is the same as the "double_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "double_outline")) +// ╔═══════════╦═══════════╗ +// ║ strings ║ numbers ║ +// ╠═══════════╬═══════════╣ +// ║ spam ║ 41.9999 ║ +// ║ eggs ║ 451 ║ +// ╚═══════════╩═══════════╝ +// +// "fancy_outline" is the same as the "fancy_grid" format but doesn't draw lines between rows: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "fancy_outline")) +// ╒═══════════╤═══════════╕ +// │ strings │ numbers │ +// ╞═══════════╪═══════════╡ +// │ spam │ 41.9999 │ +// │ eggs │ 451 │ +// ╘═══════════╧═══════════╛ +// +// "pipe" is like tables in PHP Markdown Extra extension or Pandoc +// pipe_tables: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "pipe")) +// | strings | numbers | +// |:----------|----------:| +// | spam | 41.9999 | +// | eggs | 451 | +// +// "presto" is like tables produce by the Presto CLI: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "presto")) +// strings | numbers +// -----------+----------- +// spam | 41.9999 +// eggs | 451 +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) +// |:-----|---------:| +// | spam | 41.9999 | +// | eggs | 451 | +// +// "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They +// are slightly different from "pipe" format by not using colons to +// define column alignment, and using a "+" sign to indicate line +// intersections: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "orgtbl")) +// | strings | numbers | +// |-----------+-----------| +// | spam | 41.9999 | +// | eggs | 451 | +// +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) +// | spam | 41.9999 | +// | eggs | 451 | +// +// "rst" is like a simple table format from reStructuredText; please +// note that reStructuredText accepts also "grid" tables: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], +// ... ["strings", "numbers"], "rst")) +// ========= ========= +// strings numbers +// ========= ========= +// spam 41.9999 +// eggs 451 +// ========= ========= +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) +// ==== ======== +// spam 41.9999 +// eggs 451 +// ==== ======== +// +// "mediawiki" produces a table markup used in Wikipedia and on other +// MediaWiki-based sites: +// +// >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], +// ... headers="firstrow", tablefmt="mediawiki")) +// {| class="wikitable" style="text-align: left;" +// |+ +// |- +// ! strings !! align="right"| numbers +// |- +// | spam || align="right"| 41.9999 +// |- +// | eggs || align="right"| 451 +// |} +// +// "html" produces HTML markup as an html.escape'd str +// with a ._repr_html_ method so that Jupyter Lab and Notebook display the HTML +// and a .str property so that the raw HTML remains accessible +// the unsafehtml table format can be used if an unescaped HTML format is required: +// +// >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], +// ... headers="firstrow", tablefmt="html")) +// +// +// +// +// +// +// +// +//
strings numbers
spam 41.9999
eggs 451
+// +// "latex" produces a tabular environment of LaTeX document markup: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) +// \begin{tabular}{lr} +// \hline +// spam & 41.9999 \\ +// eggs & 451 \\ +// \hline +// \end{tabular} +// +// "latex_raw" is similar to "latex", but doesn't escape special characters, +// such as backslash and underscore, so LaTeX commands may embedded into +// cells' values: +// +// >>> print(tabulate([["spam$_9$", 41.9999], ["\\emph{eggs}", "451.0"]], tablefmt="latex_raw")) +// \begin{tabular}{lr} +// \hline +// spam$_9$ & 41.9999 \\ +// \emph{eggs} & 451 \\ +// \hline +// \end{tabular} +// +// "latex_booktabs" produces a tabular environment of LaTeX document markup +// using the booktabs.sty package: +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) +// \begin{tabular}{lr} +// \toprule +// spam & 41.9999 \\ +// eggs & 451 \\ +// \bottomrule +// \end{tabular} +// +// "latex_longtable" produces a tabular environment that can stretch along +// multiple pages, using the longtable package for LaTeX. +// +// >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_longtable")) +// \begin{longtable}{lr} +// \hline +// spam & 41.9999 \\ +// eggs & 451 \\ +// \hline +// \end{longtable} +// +// +// Number parsing +// -------------- +// By default, anything which can be parsed as a number is a number. +// This ensures numbers represented as strings are aligned properly. +// This can lead to weird results for particular strings such as +// specific git SHAs e.g. "42992e1" will be parsed into the number +// 429920 and aligned as such. +// +// To completely disable number parsing (and alignment), use +// `disable_numparse=True`. For more fine grained control, a list column +// indices is used to disable number parsing only on those columns +// e.g. `disable_numparse=[0, 2]` would disable number parsing only on the +// first and third columns. +// +// Column Widths and Auto Line Wrapping +// ------------------------------------ +// Tabulate will, by default, set the width of each column to the length of the +// longest element in that column. However, in situations where fields are expected +// to reasonably be too long to look good as a single line, tabulate can help automate +// word wrapping long fields for you. Use the parameter `maxcolwidth` to provide a +// list of maximal column widths +// +// >>> print(tabulate( [('1', 'John Smith', 'This is a rather long description that might look better if it is wrapped a bit')], headers=("Issue Id", "Author", "Description"), maxcolwidths=[None, None, 30], tablefmt="grid" )) +// +------------+------------+-------------------------------+ +// | Issue Id | Author | Description | +// +============+============+===============================+ +// | 1 | John Smith | This is a rather long | +// | | | description that might look | +// | | | better if it is wrapped a bit | +// +------------+------------+-------------------------------+ +// +// Header column width can be specified in a similar way using `maxheadercolwidth` +// +// +// +//go:linkname Tabulate py.tabulate +func Tabulate(tabularData *py.Object, headers *py.Object, tablefmt *py.Object, floatfmt *py.Object, intfmt *py.Object, numalign *py.Object, stralign *py.Object, missingval *py.Object, showindex *py.Object, disableNumparse *py.Object, colalign *py.Object, maxcolwidths *py.Object, rowalign *py.Object, maxheadercolwidths *py.Object) *py.Object diff --git a/py/torch/go.mod b/py/torch/go.mod new file mode 100644 index 00000000..f9f98d60 --- /dev/null +++ b/py/torch/go.mod @@ -0,0 +1,5 @@ +module github.com/PengPengPeng717/llpkg/py/torch + +go 1.24.5 + +require github.com/goplus/lib v0.3.0 diff --git a/py/torch/go.sum b/py/torch/go.sum new file mode 100644 index 00000000..54e0f00c --- /dev/null +++ b/py/torch/go.sum @@ -0,0 +1,2 @@ +github.com/goplus/lib v0.3.0 h1:y0ZGb5Q/RikW1oMMB4Di7XIZIpuzh/7mlrR8HNbxXCA= +github.com/goplus/lib v0.3.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/py/torch/llpkg.cfg b/py/torch/llpkg.cfg new file mode 100644 index 00000000..f34ef5a8 --- /dev/null +++ b/py/torch/llpkg.cfg @@ -0,0 +1,17 @@ +{ + "type": "python", + "upstream": { + "installer": { + "name": "pip" + }, + "package": { + "name": "torch", + "version": "2.2.0" + } + }, + "llpyg": { + "output_dir": "./test", + "mod_name": "github.com/PengPengPeng717/llpkg/py/torch", + "mod_depth": 1 + } +} \ No newline at end of file diff --git a/py/torch/llpyg.cfg b/py/torch/llpyg.cfg new file mode 100644 index 00000000..2356ed26 --- /dev/null +++ b/py/torch/llpyg.cfg @@ -0,0 +1,7 @@ +{ + "name": "torch", + "libName": "torch", + "modules": [ + "torch" + ] +} diff --git a/py/torch/torch.go b/py/torch/torch.go new file mode 100644 index 00000000..e110a331 --- /dev/null +++ b/py/torch/torch.go @@ -0,0 +1,17319 @@ +package torch + +import ( + "github.com/goplus/lib/py" + _ "unsafe" +) + +const LLGoPackage = "py.torch" +// None +// +//go:linkname Classproperty py.classproperty +func Classproperty(func_ *py.Object) *py.Object +// None +// +//go:linkname GetFilePath py.get_file_path +func GetFilePath(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname PrepareMultiprocessingEnvironment py.prepare_multiprocessing_environment +func PrepareMultiprocessingEnvironment(path *py.Object) *py.Object +// +// get_num_threads() -> int +// +// Returns the number of threads used for parallelizing CPU operations +// +// +//go:linkname GetNumThreads py.get_num_threads +func GetNumThreads() *py.Object +// +// set_num_threads(int) +// +// Sets the number of threads used for intraop parallelism on CPU. +// +// .. warning:: +// To ensure that the correct number of threads is used, set_num_threads +// must be called before running eager, JIT or autograd code. +// +// +//go:linkname SetNumThreads py.set_num_threads +func SetNumThreads(int *py.Object) *py.Object +// +// get_num_interop_threads() -> int +// +// Returns the number of threads used for inter-op parallelism on CPU +// (e.g. in JIT interpreter) +// +// +//go:linkname GetNumInteropThreads py.get_num_interop_threads +func GetNumInteropThreads() *py.Object +// +// set_num_interop_threads(int) +// +// Sets the number of threads used for interop parallelism +// (e.g. in JIT interpreter) on CPU. +// +// .. warning:: +// Can only be called once and before any inter-op parallel work +// is started (e.g. JIT execution). +// +// +//go:linkname SetNumInteropThreads py.set_num_interop_threads +func SetNumInteropThreads(int *py.Object) *py.Object +// +// set_flush_denormal(mode) -> bool +// +// Disables denormal floating numbers on CPU. +// +// Returns ``True`` if your system supports flushing denormal numbers and it +// successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal` +// is only supported on x86 architectures supporting SSE3. +// +// Args: +// mode (bool): Controls whether to enable flush denormal mode or not +// +// Example:: +// +// >>> torch.set_flush_denormal(True) +// True +// >>> torch.tensor([1e-323], dtype=torch.float64) +// tensor([ 0.], dtype=torch.float64) +// >>> torch.set_flush_denormal(False) +// True +// >>> torch.tensor([1e-323], dtype=torch.float64) +// tensor(9.88131e-324 * +// [ 1.0000], dtype=torch.float64) +// +// +//go:linkname SetFlushDenormal py.set_flush_denormal +func SetFlushDenormal(mode *py.Object) *py.Object +// +// get_default_dtype() -> torch.dtype +// +// Get the current default floating point :class:`torch.dtype`. +// +// Example:: +// +// >>> torch.get_default_dtype() # initial default for floating point is torch.float32 +// torch.float32 +// >>> torch.set_default_dtype(torch.float64) +// >>> torch.get_default_dtype() # default is now changed to torch.float64 +// torch.float64 +// >>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this +// >>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor +// torch.float32 +// +// +// +//go:linkname GetDefaultDtype py.get_default_dtype +func GetDefaultDtype() *py.Object +// +// is_grad_enabled() -> (bool) +// +// Returns True if grad mode is currently enabled. +// +// +//go:linkname IsGradEnabled py.is_grad_enabled +func IsGradEnabled() *py.Object +// +// is_inference_mode_enabled() -> (bool) +// +// Returns True if inference mode is currently enabled. +// +// +//go:linkname IsInferenceModeEnabled py.is_inference_mode_enabled +func IsInferenceModeEnabled() *py.Object +// None +// +//go:linkname SetAutocastEnabled py.set_autocast_enabled +func SetAutocastEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAutocastEnabled py.is_autocast_enabled +func IsAutocastEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ClearAutocastCache py.clear_autocast_cache +func ClearAutocastCache(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastCpuEnabled py.set_autocast_cpu_enabled +func SetAutocastCpuEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAutocastCpuEnabled py.is_autocast_cpu_enabled +func IsAutocastCpuEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastCpuDtype py.set_autocast_cpu_dtype +func SetAutocastCpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GetAutocastCpuDtype py.get_autocast_cpu_dtype +func GetAutocastCpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastGpuDtype py.set_autocast_gpu_dtype +func SetAutocastGpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GetAutocastGpuDtype py.get_autocast_gpu_dtype +func GetAutocastGpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastXlaEnabled py.set_autocast_xla_enabled +func SetAutocastXlaEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAutocastXlaEnabled py.is_autocast_xla_enabled +func IsAutocastXlaEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastXlaDtype py.set_autocast_xla_dtype +func SetAutocastXlaDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GetAutocastXlaDtype py.get_autocast_xla_dtype +func GetAutocastXlaDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastIpuEnabled py.set_autocast_ipu_enabled +func SetAutocastIpuEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAutocastIpuEnabled py.is_autocast_ipu_enabled +func IsAutocastIpuEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastIpuDtype py.set_autocast_ipu_dtype +func SetAutocastIpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GetAutocastIpuDtype py.get_autocast_ipu_dtype +func GetAutocastIpuDtype(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname AutocastIncrementNesting py.autocast_increment_nesting +func AutocastIncrementNesting(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname AutocastDecrementNesting py.autocast_decrement_nesting +func AutocastDecrementNesting(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAutocastCacheEnabled py.is_autocast_cache_enabled +func IsAutocastCacheEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAutocastCacheEnabled py.set_autocast_cache_enabled +func SetAutocastCacheEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SetAnomalyEnabled py.set_anomaly_enabled +func SetAnomalyEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAnomalyEnabled py.is_anomaly_enabled +func IsAnomalyEnabled(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsAnomalyCheckNanEnabled py.is_anomaly_check_nan_enabled +func IsAnomalyCheckNanEnabled(__llgo_va_list ...interface{}) *py.Object +// parse_ir(input: str, parse_tensor_constants: bool = False) -> torch::jit::Graph +// +// +//go:linkname ParseIr py.parse_ir +func ParseIr(input *py.Object, parseTensorConstants *py.Object) *py.Object +// parse_schema(arg0: str) -> c10::FunctionSchema +// +// +//go:linkname ParseSchema py.parse_schema +func ParseSchema(arg0 *py.Object) *py.Object +// unify_type_list(arg0: List[c10::Type]) -> c10::Type +// +// +//go:linkname UnifyTypeList py.unify_type_list +func UnifyTypeList(arg0 *py.Object) *py.Object +// fork(*args, **kwargs) -> torch._C.Future +// +// +//go:linkname Fork py.fork +func Fork(__llgo_va_list ...interface{}) *py.Object +// wait(arg0: torch._C.Future) -> object +// +// +//go:linkname Wait py.wait +func Wait(arg0 *py.Object) *py.Object +// parse_type_comment(arg0: str) -> torch._C._jit_tree_views.Decl +// +// +//go:linkname ParseTypeComment py.parse_type_comment +func ParseTypeComment(arg0 *py.Object) *py.Object +// merge_type_from_type_comment(arg0: torch._C._jit_tree_views.Decl, arg1: torch._C._jit_tree_views.Decl, arg2: bool) -> torch._C._jit_tree_views.Decl +// +// +//go:linkname MergeTypeFromTypeComment py.merge_type_from_type_comment +func MergeTypeFromTypeComment(arg0 *py.Object, arg1 *py.Object, arg2 *py.Object) *py.Object +// import_ir_module(arg0: torch._C.CompilationUnit, arg1: str, arg2: object, arg3: dict, arg4: bool) -> torch._C.ScriptModule +// +// +//go:linkname ImportIrModule py.import_ir_module +func ImportIrModule(arg0 *py.Object, arg1 *py.Object, arg2 *py.Object, arg3 *py.Object, arg4 *py.Object) *py.Object +// import_ir_module_from_buffer(arg0: torch._C.CompilationUnit, arg1: str, arg2: object, arg3: dict, arg4: bool) -> torch._C.ScriptModule +// +// +//go:linkname ImportIrModuleFromBuffer py.import_ir_module_from_buffer +func ImportIrModuleFromBuffer(arg0 *py.Object, arg1 *py.Object, arg2 *py.Object, arg3 *py.Object, arg4 *py.Object) *py.Object +// vitals_enabled() -> bool +// +// +//go:linkname VitalsEnabled py.vitals_enabled +func VitalsEnabled() *py.Object +// set_vital(arg0: str, arg1: str, arg2: str) -> bool +// +// +//go:linkname SetVital py.set_vital +func SetVital(arg0 *py.Object, arg1 *py.Object, arg2 *py.Object) *py.Object +// read_vitals() -> str +// +// +//go:linkname ReadVitals py.read_vitals +func ReadVitals() *py.Object +// init_num_threads() -> None +// +// +// init_num_threads() +// +// Initializes the number of parallel threads used on the current thread. +// +// Call this whenever a new thread is created in order to propagate values from +// :func:`torch.set_num_threads` onto the new thread. +// +// +// +//go:linkname InitNumThreads py.init_num_threads +func InitNumThreads() *py.Object +// SymInt-aware utility for logical negation. +// +// Args: +// a (SymBool or bool): Object to negate +// +// +//go:linkname SymNot py.sym_not +func SymNot(a *py.Object) *py.Object +// SymInt-aware utility for float casting. +// +// Args: +// a (SymInt, SymFloat, or object): Object to cast +// +// +//go:linkname SymFloat py.sym_float +func SymFloat(a *py.Object) *py.Object +// SymInt-aware utility for int casting. +// +// Args: +// a (SymInt, SymFloat, or object): Object to cast +// +// +//go:linkname SymInt py.sym_int +func SymInt(a *py.Object) *py.Object +// SymInt-aware utility for max(). +// +//go:linkname SymMax py.sym_max +func SymMax(a *py.Object, b *py.Object) *py.Object +// SymInt-aware utility for max(). +// +//go:linkname SymMin py.sym_min +func SymMin(a *py.Object, b *py.Object) *py.Object +// None +// +//go:linkname SymSqrt py.sym_sqrt +func SymSqrt(a *py.Object) *py.Object +// None +// +//go:linkname SymIte py.sym_ite +func SymIte(b *py.Object, t *py.Object, f *py.Object) *py.Object +// +// zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor filled with the scalar value `0`, with the same size as +// :attr:`input`. ``torch.zeros_like(input)`` is equivalent to +// ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// .. warning:: +// As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, +// the old ``torch.zeros_like(input, out=output)`` is equivalent to +// ``torch.zeros(input.size(), out=output)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// Example:: +// +// >>> input = torch.empty(2, 3) +// >>> torch.zeros_like(input) +// tensor([[ 0., 0., 0.], +// [ 0., 0., 0.]]) +// +// +//go:linkname Obj py.obj +func Obj(__llgo_va_list ...interface{}) *py.Object +// wait(arg0: torch._C.Future) -> object +// +// +//go:linkname Candidate py.candidate +func Candidate(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Typename py.typename +func Typename(o *py.Object) *py.Object +// Returns True if `obj` is a PyTorch tensor. +// +// Note that this function is simply doing ``isinstance(obj, Tensor)``. +// Using that ``isinstance`` check is better for typechecking with mypy, +// and more explicit - so it's recommended to use that instead of +// ``is_tensor``. +// +// Args: +// obj (Object): Object to test +// Example:: +// +// >>> x = torch.tensor([1, 2, 3]) +// >>> torch.is_tensor(x) +// True +// +// +// +//go:linkname IsTensor py.is_tensor +func IsTensor(obj *py.Object) *py.Object +// Returns True if `obj` is a PyTorch storage object. +// +// Args: +// obj (Object): Object to test +// +// +//go:linkname IsStorage py.is_storage +func IsStorage(obj *py.Object) *py.Object +// Sets the default ``torch.Tensor`` to be allocated on ``device``. This +// does not affect factory function calls which are called with an explicit +// ``device`` argument. Factory calls will be performed as if they +// were passed ``device`` as an argument. +// +// To only temporarily change the default device instead of setting it +// globally, use ``with torch.device(device):`` instead. +// +// The default device is initially ``cpu``. If you set the default tensor +// device to another device (e.g., ``cuda``) without a device index, tensors +// will be allocated on whatever the current device for the device type, +// even after :func:`torch.cuda.set_device` is called. +// +// .. warning:: +// +// This function imposes a slight performance cost on every Python +// call to the torch API (not just factory functions). If this +// is causing problems for you, please comment on +// https://github.com/pytorch/pytorch/issues/92701 +// +// .. note:: +// +// This doesn't affect functions that create tensors that share the same memory as the input, like: +// :func:`torch.from_numpy` and :func:`torch.frombuffer` +// +// Args: +// device (device or string): the device to set as default +// +// Example:: +// +// >>> # xdoctest: +SKIP("requires cuda, changes global state") +// >>> torch.tensor([1.2, 3]).device +// device(type='cpu') +// >>> torch.set_default_device('cuda') # current device is 0 +// >>> torch.tensor([1.2, 3]).device +// device(type='cuda', index=0) +// >>> torch.set_default_device('cuda:1') +// >>> torch.tensor([1.2, 3]).device +// device(type='cuda', index=1) +// +// +// +//go:linkname SetDefaultDevice py.set_default_device +func SetDefaultDevice(device *py.Object) *py.Object +// Sets the default ``torch.Tensor`` type to floating point tensor type +// ``t``. This type will also be used as default floating point type for +// type inference in :func:`torch.tensor`. +// +// The default floating point tensor type is initially ``torch.FloatTensor``. +// +// Args: +// t (type or string): the floating point tensor type or its name +// +// Example:: +// +// >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") +// >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 +// torch.float32 +// >>> torch.set_default_tensor_type(torch.DoubleTensor) +// >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor +// torch.float64 +// +// +// +//go:linkname SetDefaultTensorType py.set_default_tensor_type +func SetDefaultTensorType(t *py.Object) *py.Object +// +// +// Sets the default floating point dtype to :attr:`d`. Supports torch.float32 +// and torch.float64 as inputs. Other dtypes may be accepted without complaint +// but are not supported and are unlikely to work as expected. +// +// When PyTorch is initialized its default floating point dtype is torch.float32, +// and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like +// type inference. The default floating point dtype is used to: +// +// 1. Implicitly determine the default complex dtype. When the default floating point +// type is float32 the default complex dtype is complex64, and when the default +// floating point type is float64 the default complex type is complex128. +// 2. Infer the dtype for tensors constructed using Python floats or complex Python +// numbers. See examples below. +// 3. Determine the result of type promotion between bool and integer tensors and +// Python floats and complex Python numbers. +// +// Args: +// d (:class:`torch.dtype`): the floating point dtype to make the default. +// Either torch.float32 or torch.float64. +// +// Example: +// >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") +// >>> # initial default for floating point is torch.float32 +// >>> # Python floats are interpreted as float32 +// >>> torch.tensor([1.2, 3]).dtype +// torch.float32 +// >>> # initial default for floating point is torch.complex64 +// >>> # Complex Python numbers are interpreted as complex64 +// >>> torch.tensor([1.2, 3j]).dtype +// torch.complex64 +// +// >>> torch.set_default_dtype(torch.float64) +// +// >>> # Python floats are now interpreted as float64 +// >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor +// torch.float64 +// >>> # Complex Python numbers are now interpreted as complex128 +// >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor +// torch.complex128 +// +// +// +//go:linkname SetDefaultDtype py.set_default_dtype +func SetDefaultDtype(d *py.Object) *py.Object +// Sets whether PyTorch operations must use "deterministic" +// algorithms. That is, algorithms which, given the same input, and when +// run on the same software and hardware, always produce the same output. +// When enabled, operations will use deterministic algorithms when available, +// and if only nondeterministic algorithms are available they will throw a +// :class:`RuntimeError` when called. +// +// .. note:: This setting alone is not always enough to make an application +// reproducible. Refer to :ref:`reproducibility` for more information. +// +// .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative +// interface for this feature. +// +// The following normally-nondeterministic operations will act +// deterministically when ``mode=True``: +// +// * :class:`torch.nn.Conv1d` when called on CUDA tensor +// * :class:`torch.nn.Conv2d` when called on CUDA tensor +// * :class:`torch.nn.Conv3d` when called on CUDA tensor +// * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor +// * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor +// * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor +// * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor +// * :func:`torch.bmm` when called on sparse-dense CUDA tensors +// * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor +// and the index is a list of tensors +// * :func:`torch.Tensor.index_put` with ``accumulate=False`` +// * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU +// tensor +// * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU +// tensor +// * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor +// * :func:`torch.gather` when called on a CUDA tensor that requires grad +// * :func:`torch.index_add` when called on CUDA tensor +// * :func:`torch.index_select` when attempting to differentiate a CUDA tensor +// * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor +// * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor +// * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor +// * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor +// +// The following normally-nondeterministic operations will throw a +// :class:`RuntimeError` when ``mode=True``: +// +// * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.MaxUnpool1d` +// * :class:`torch.nn.MaxUnpool2d` +// * :class:`torch.nn.MaxUnpool3d` +// * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor +// and one of the following modes is used: +// +// - ``linear`` +// - ``bilinear`` +// - ``bicubic`` +// - ``trilinear`` +// +// * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.NLLLoss` when called on a CUDA tensor +// * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor +// * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when +// ``mode='max'`` +// * :func:`torch.Tensor.put_` when ``accumulate=False`` +// * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor +// * :func:`torch.histc` when called on a CUDA tensor +// * :func:`torch.bincount` when called on a CUDA tensor and ``weights`` +// tensor is given +// * :func:`torch.kthvalue` with called on a CUDA tensor +// * :func:`torch.median` with indices output when called on a CUDA tensor +// * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor +// * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex +// * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor +// * :func:`torch.Tensor.resize_` when called with a quantized tensor +// +// In addition, several operations fill uninitialized memory when this setting +// is turned on and when +// :attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on. +// See the documentation for that attribute for more information. +// +// A handful of CUDA operations are nondeterministic if the CUDA version is +// 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8`` +// or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more +// details: ``_ +// If one of these environment variable configurations is not set, a :class:`RuntimeError` +// will be raised from these operations when called with CUDA tensors: +// +// * :func:`torch.mm` +// * :func:`torch.mv` +// * :func:`torch.bmm` +// +// Note that deterministic operations tend to have worse performance than +// nondeterministic operations. +// +// .. note:: +// +// This flag does not detect or prevent nondeterministic behavior caused +// by calling an inplace operation on a tensor with an internal memory +// overlap or by giving such a tensor as the :attr:`out` argument for an +// operation. In these cases, multiple writes of different data may target +// a single memory location, and the order of writes is not guaranteed. +// +// Args: +// mode (:class:`bool`): If True, makes potentially nondeterministic +// operations switch to a deterministic algorithm or throw a runtime +// error. If False, allows nondeterministic operations. +// +// Keyword args: +// warn_only (:class:`bool`, optional): If True, operations that do not +// have a deterministic implementation will throw a warning instead of +// an error. Default: ``False`` +// +// Example:: +// +// >>> # xdoctest: +SKIP +// >>> torch.use_deterministic_algorithms(True) +// +// # Forward mode nondeterministic error +// >>> torch.randn(10, device='cuda').kthvalue(1) +// ... +// RuntimeError: kthvalue CUDA does not have a deterministic implementation... +// +// # Backward mode nondeterministic error +// >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward() +// ... +// RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation... +// +// +//go:linkname UseDeterministicAlgorithms py.use_deterministic_algorithms +func UseDeterministicAlgorithms(mode *py.Object) *py.Object +// Returns True if the global deterministic flag is turned on. Refer to +// :func:`torch.use_deterministic_algorithms` documentation for more details. +// +// +//go:linkname AreDeterministicAlgorithmsEnabled py.are_deterministic_algorithms_enabled +func AreDeterministicAlgorithmsEnabled() *py.Object +// Returns True if the global deterministic flag is set to warn only. +// Refer to :func:`torch.use_deterministic_algorithms` documentation for more +// details. +// +// +//go:linkname IsDeterministicAlgorithmsWarnOnlyEnabled py.is_deterministic_algorithms_warn_only_enabled +func IsDeterministicAlgorithmsWarnOnlyEnabled() *py.Object +// Sets the debug mode for deterministic operations. +// +// .. note:: This is an alternative interface for +// :func:`torch.use_deterministic_algorithms`. Refer to that function's +// documentation for details about affected operations. +// +// Args: +// debug_mode(str or int): If "default" or 0, don't error or warn on +// nondeterministic operations. If "warn" or 1, warn on +// nondeterministic operations. If "error" or 2, error on +// nondeterministic operations. +// +// +//go:linkname SetDeterministicDebugMode py.set_deterministic_debug_mode +func SetDeterministicDebugMode(debugMode *py.Object) *py.Object +// Returns the current value of the debug mode for deterministic +// operations. Refer to :func:`torch.set_deterministic_debug_mode` +// documentation for more details. +// +// +//go:linkname GetDeterministicDebugMode py.get_deterministic_debug_mode +func GetDeterministicDebugMode() *py.Object +// Returns the current value of float32 matrix multiplication precision. Refer to +// :func:`torch.set_float32_matmul_precision` documentation for more details. +// +// +//go:linkname GetFloat32MatmulPrecision py.get_float32_matmul_precision +func GetFloat32MatmulPrecision() *py.Object +// Sets the internal precision of float32 matrix multiplications. +// +// Running float32 matrix multiplications in lower precision may significantly increase +// performance, and in some programs the loss of precision has a negligible impact. +// +// Supports three settings: +// +// * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa +// bits) for internal computations. +// * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10 +// mantissa bits) or treat each float32 number as the sum of two bfloat16 numbers +// (approximately 16 mantissa bits), if the appropriate fast matrix multiplication +// algorithms are available. Otherwise float32 matrix multiplications are computed +// as if the precision is "highest". See below for more information on the bfloat16 +// approach. +// * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa +// bits) for internal computations, if a fast matrix multiplication algorithm +// using that datatype internally is available. Otherwise float32 +// matrix multiplications are computed as if the precision is "high". +// +// When using "high" precision, float32 multiplications may use a bfloat16-based algorithm +// that is more complicated than simply truncating to some smaller number mantissa bits +// (e.g. 10 for TensorFloat32, 8 for bfloat16). Refer to [Henry2019]_ for a complete +// description of this algorithm. To briefly explain here, the first step is to realize +// that we can perfectly encode a single float32 number as the sum of three bfloat16 +// numbers (because float32 has 24 mantissa bits while bfloat16 has 8, and both have the +// same number of exponent bits). This means that the product of two float32 numbers can +// be exactly given by the sum of nine products of bfloat16 numbers. We can then trade +// accuracy for speed by dropping some of these products. The "high" precision algorithm +// specifically keeps only the three most significant products, which conveniently excludes +// all of the products involving the last 8 mantissa bits of either input. This means that +// we can represent our inputs as the sum of two bfloat16 numbers rather than three. +// Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than +// float32 ones, it's faster to do three multiplications and 2 additions with bfloat16 +// precision than it is to do a single multiplication with float32 precision. +// +// .. [Henry2019] http://arxiv.org/abs/1904.06376 +// +// .. note:: +// +// This does not change the output dtype of float32 matrix multiplications, +// it controls how the internal computation of the matrix multiplication is performed. +// +// .. note:: +// +// This does not change the precision of convolution operations. Other flags, +// like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution +// operations. +// +// .. note:: +// +// This flag currently only affects one native device type: CUDA. +// If "high" or "medium" are set then the TensorFloat32 datatype will be used +// when computing float32 matrix multiplications, equivalent to setting +// `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default) +// is set then the float32 datatype is used for internal computations, equivalent +// to setting `torch.backends.cuda.matmul.allow_tf32 = False`. +// +// Args: +// precision(str): can be set to "highest" (default), "high", or "medium" (see above). +// +// +// +//go:linkname SetFloat32MatmulPrecision py.set_float32_matmul_precision +func SetFloat32MatmulPrecision(precision *py.Object) *py.Object +// When this flag is False (default) then some PyTorch warnings may only +// appear once per process. This helps avoid excessive warning information. +// Setting it to True causes these warnings to always appear, which may be +// helpful when debugging. +// +// Args: +// b (:class:`bool`): If True, force warnings to always be emitted +// If False, set to the default behaviour +// +// +//go:linkname SetWarnAlways py.set_warn_always +func SetWarnAlways(b *py.Object) *py.Object +// Returns True if the global warn_always flag is turned on. Refer to +// :func:`torch.set_warn_always` documentation for more details. +// +// +//go:linkname IsWarnAlwaysEnabled py.is_warn_always_enabled +func IsWarnAlwaysEnabled() *py.Object +// Sets the random number generator state. +// +// .. note: This function only works for CPU. For CUDA, please use +// torch.manual_seed(seed), which works for both CPU and CUDA. +// +// Args: +// new_state (torch.ByteTensor): The desired state +// +// +//go:linkname SetRngState py.set_rng_state +func SetRngState(newState *py.Object) *py.Object +// Returns the random number generator state as a `torch.ByteTensor`. +// +//go:linkname GetRngState py.get_rng_state +func GetRngState() *py.Object +// Sets the seed for generating random numbers. Returns a +// `torch.Generator` object. +// +// Args: +// seed (int): The desired seed. Value must be within the inclusive range +// `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError +// is raised. Negative inputs are remapped to positive values with the formula +// `0xffff_ffff_ffff_ffff + seed`. +// +// +//go:linkname ManualSeed py.manual_seed +func ManualSeed(seed *py.Object) *py.Object +// Returns the initial seed for generating random numbers as a +// Python `long`. +// +// +//go:linkname InitialSeed py.initial_seed +func InitialSeed() *py.Object +// Sets the seed for generating random numbers to a non-deterministic +// random number. Returns a 64 bit number used to seed the RNG. +// +// +//go:linkname Seed py.seed +func Seed() *py.Object +// save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL, _use_new_zipfile_serialization=True) +// +// Saves an object to a disk file. +// +// See also: :ref:`saving-loading-tensors` +// +// Args: +// obj: saved object +// f: a file-like object (has to implement write and flush) or a string or +// os.PathLike object containing a file name +// pickle_module: module used for pickling metadata and objects +// pickle_protocol: can be specified to override the default protocol +// +// .. note:: +// A common PyTorch convention is to save tensors using .pt file extension. +// +// .. note:: +// PyTorch preserves storage sharing across serialization. See +// :ref:`preserve-storage-sharing` for more details. +// +// .. note:: +// The 1.6 release of PyTorch switched ``torch.save`` to use a new +// zipfile-based file format. ``torch.load`` still retains the ability to +// load files in the old format. If for any reason you want ``torch.save`` +// to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``. +// +// Example: +// >>> # xdoctest: +SKIP("makes cwd dirty") +// >>> # Save to file +// >>> x = torch.tensor([0, 1, 2, 3, 4]) +// >>> torch.save(x, 'tensor.pt') +// >>> # Save to io.BytesIO buffer +// >>> buffer = io.BytesIO() +// >>> torch.save(x, buffer) +// +// +//go:linkname Save py.save +func Save(obj *py.Object, f *py.Object, pickleModule *py.Object, pickleProtocol *py.Object, UseNewZipfileSerialization *py.Object, DisableByteorderRecord *py.Object) *py.Object +// load(f, map_location=None, pickle_module=pickle, *, weights_only=False, mmap=None, **pickle_load_args) +// +// Loads an object saved with :func:`torch.save` from a file. +// +// :func:`torch.load` uses Python's unpickling facilities but treats storages, +// which underlie tensors, specially. They are first deserialized on the +// CPU and are then moved to the device they were saved from. If this fails +// (e.g. because the run time system doesn't have certain devices), an exception +// is raised. However, storages can be dynamically remapped to an alternative +// set of devices using the :attr:`map_location` argument. +// +// If :attr:`map_location` is a callable, it will be called once for each serialized +// storage with two arguments: storage and location. The storage argument +// will be the initial deserialization of the storage, residing on the CPU. +// Each serialized storage has a location tag associated with it which +// identifies the device it was saved from, and this tag is the second +// argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'`` +// for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors. +// :attr:`map_location` should return either ``None`` or a storage. If +// :attr:`map_location` returns a storage, it will be used as the final deserialized +// object, already moved to the right device. Otherwise, :func:`torch.load` will +// fall back to the default behavior, as if :attr:`map_location` wasn't specified. +// +// If :attr:`map_location` is a :class:`torch.device` object or a string containing +// a device tag, it indicates the location where all tensors should be loaded. +// +// Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags +// appearing in the file (keys), to ones that specify where to put the +// storages (values). +// +// User extensions can register their own location tags and tagging and +// deserialization methods using :func:`torch.serialization.register_package`. +// +// Args: +// f: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`), +// or a string or os.PathLike object containing a file name +// map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage +// locations +// pickle_module: module used for unpickling metadata and objects (has to +// match the :attr:`pickle_module` used to serialize file) +// weights_only: Indicates whether unpickler should be restricted to +// loading only tensors, primitive types and dictionaries +// mmap: Indicates whether the file should be mmaped rather than loading all the storages into memory. +// Typically, tensor storages in the file will first be moved from disk to CPU memory, after which they +// are moved to the location that they were tagged with when saving, or specified by ``map_location``. This +// second step is a no-op if the final location is CPU. When the ``mmap`` flag is set, instead of copying the +// tensor storages from disk to CPU memory in the first step, ``f`` is mmaped. +// pickle_load_args: (Python 3 only) optional keyword arguments passed over to +// :func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g., +// :attr:`errors=...`. +// +// .. warning:: +// :func:`torch.load()` unless `weights_only` parameter is set to `True`, +// uses ``pickle`` module implicitly, which is known to be insecure. +// It is possible to construct malicious pickle data which will execute arbitrary code +// during unpickling. Never load data that could have come from an untrusted +// source in an unsafe mode, or that could have been tampered with. **Only load data you trust**. +// +// .. note:: +// When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors +// will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')`` +// and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint. +// +// .. note:: +// By default, we decode byte strings as ``utf-8``. This is to avoid a common error +// case ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...`` +// when loading files saved by Python 2 in Python 3. If this default +// is incorrect, you may use an extra :attr:`encoding` keyword argument to specify how +// these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them +// to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them +// as byte arrays which can be decoded later with ``byte_array.decode(...)``. +// +// Example: +// >>> # xdoctest: +SKIP("undefined filepaths") +// >>> torch.load('tensors.pt', weights_only=True) +// # Load all tensors onto the CPU +// >>> torch.load('tensors.pt', map_location=torch.device('cpu'), weights_only=True) +// # Load all tensors onto the CPU, using a function +// >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage, weights_only=True) +// # Load all tensors onto GPU 1 +// >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1), weights_only=True) +// # Map tensors from GPU 1 to GPU 0 +// >>> torch.load('tensors.pt', map_location={'cuda:1': 'cuda:0'}, weights_only=True) +// # Load tensor from io.BytesIO object +// # Loading from a buffer setting weights_only=False, warning this can be unsafe +// >>> with open('tensor.pt', 'rb') as f: +// ... buffer = io.BytesIO(f.read()) +// >>> torch.load(buffer, weights_only=False) +// # Load a module with 'ascii' encoding for unpickling +// # Loading from a module setting weights_only=False, warning this can be unsafe +// >>> torch.load('module.pt', encoding='ascii', weights_only=False) +// +// +//go:linkname Load py.load +func Load(f *py.Object, mapLocation *py.Object, pickleModule *py.Object) *py.Object +// Set options for printing. Items shamelessly taken from NumPy +// +// Args: +// precision: Number of digits of precision for floating point output +// (default = 4). +// threshold: Total number of array elements which trigger summarization +// rather than full `repr` (default = 1000). +// edgeitems: Number of array items in summary at beginning and end of +// each dimension (default = 3). +// linewidth: The number of characters per line for the purpose of +// inserting line breaks (default = 80). Thresholded matrices will +// ignore this parameter. +// profile: Sane defaults for pretty printing. Can override with any of +// the above options. (any one of `default`, `short`, `full`) +// sci_mode: Enable (True) or disable (False) scientific notation. If +// None (default) is specified, the value is defined by +// `torch._tensor_str._Formatter`. This value is automatically chosen +// by the framework. +// +// Example:: +// +// >>> # Limit the precision of elements +// >>> torch.set_printoptions(precision=2) +// >>> torch.tensor([1.12345]) +// tensor([1.12]) +// >>> # Limit the number of elements shown +// >>> torch.set_printoptions(threshold=5) +// >>> torch.arange(10) +// tensor([0, 1, 2, ..., 7, 8, 9]) +// >>> # Restore defaults +// >>> torch.set_printoptions(profile='default') +// >>> torch.tensor([1.12345]) +// tensor([1.1235]) +// >>> torch.arange(10) +// tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) +// +// +// +//go:linkname SetPrintoptions py.set_printoptions +func SetPrintoptions(precision *py.Object, threshold *py.Object, edgeitems *py.Object, linewidth *py.Object, profile *py.Object, sciMode *py.Object) *py.Object +// +// abs(input, *, out=None) -> Tensor +// +// Computes the absolute value of each element in :attr:`input`. +// +// .. math:: +// \text{out}_{i} = |\text{input}_{i}| +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.abs(torch.tensor([-1, -2, 3])) +// tensor([ 1, 2, 3]) +// +// +//go:linkname Abs py.abs +func Abs(input *py.Object) *py.Object +// None +// +//go:linkname Abs_ py.abs_ +func Abs_(__llgo_va_list ...interface{}) *py.Object +// +// absolute(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.abs` +// +// +//go:linkname Absolute py.absolute +func Absolute(input *py.Object) *py.Object +// +// acos(input, *, out=None) -> Tensor +// +// Computes the inverse cosine of each element in :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \cos^{-1}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) +// >>> torch.acos(a) +// tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) +// +// +//go:linkname Acos py.acos +func Acos(input *py.Object) *py.Object +// None +// +//go:linkname Acos_ py.acos_ +func Acos_(__llgo_va_list ...interface{}) *py.Object +// +// acosh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \cosh^{-1}(\text{input}_{i}) +// +// Note: +// The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range +// will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4).uniform_(1, 2) +// >>> a +// tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ]) +// >>> torch.acosh(a) +// tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ]) +// +// +//go:linkname Acosh py.acosh +func Acosh(input *py.Object) *py.Object +// None +// +//go:linkname Acosh_ py.acosh_ +func Acosh_(__llgo_va_list ...interface{}) *py.Object +// +// adaptive_avg_pool1d(input, output_size) -> Tensor +// +// Applies a 1D adaptive average pooling over an input signal composed of +// several input planes. +// +// See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape. +// +// Args: +// output_size: the target output size (single integer) +// +// +//go:linkname AdaptiveAvgPool1d py.adaptive_avg_pool1d +func AdaptiveAvgPool1d(input *py.Object, outputSize *py.Object) *py.Object +// None +// +//go:linkname AdaptiveMaxPool1d py.adaptive_max_pool1d +func AdaptiveMaxPool1d(__llgo_va_list ...interface{}) *py.Object +// +// add(input, other, *, alpha=1, out=None) -> Tensor +// +// Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. +// +// .. math:: +// \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i +// +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer, float, and complex inputs. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor or Number): the tensor or number to add to :attr:`input`. +// +// Keyword arguments: +// alpha (Number): the multiplier for :attr:`other`. +// out (Tensor, optional): the output tensor. +// +// Examples:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) +// >>> torch.add(a, 20) +// tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) +// +// >>> b = torch.randn(4) +// >>> b +// tensor([-0.9732, -0.3497, 0.6245, 0.4022]) +// >>> c = torch.randn(4, 1) +// >>> c +// tensor([[ 0.3743], +// [-1.7724], +// [-0.5811], +// [-0.8017]]) +// >>> torch.add(b, c, alpha=10) +// tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], +// [-18.6971, -18.0736, -17.0994, -17.3216], +// [ -6.7845, -6.1610, -5.1868, -5.4090], +// [ -8.9902, -8.3667, -7.3925, -7.6147]]) +// +// +//go:linkname Add py.add +func Add(input *py.Object, other *py.Object) *py.Object +// +// addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor +// +// Performs a batch matrix-matrix product of matrices stored +// in :attr:`batch1` and :attr:`batch2`, +// with a reduced add step (all matrix multiplications get accumulated +// along the first dimension). +// :attr:`input` is added to the final result. +// +// :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the +// same number of matrices. +// +// If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +// :math:`(b \times m \times p)` tensor, :attr:`input` must be +// :ref:`broadcastable ` with a :math:`(n \times p)` tensor +// and :attr:`out` will be a :math:`(n \times p)` tensor. +// +// .. math:: +// out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) +// +// If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +// it will not be propagated. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` +// must be real numbers, otherwise they should be integers. +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// Args: +// batch1 (Tensor): the first batch of matrices to be multiplied +// batch2 (Tensor): the second batch of matrices to be multiplied +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) +// input (Tensor): matrix to be added +// alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> M = torch.randn(3, 5) +// >>> batch1 = torch.randn(10, 3, 4) +// >>> batch2 = torch.randn(10, 4, 5) +// >>> torch.addbmm(M, batch1, batch2) +// tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], +// [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], +// [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) +// +// +//go:linkname Addbmm py.addbmm +func Addbmm(input *py.Object, batch1 *py.Object, batch2 *py.Object) *py.Object +// +// addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor +// +// Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, +// multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. +// +// .. warning:: +// Integer division with addcdiv is no longer supported, and in a future +// release addcdiv will perform a true division of tensor1 and tensor2. +// The historic addcdiv behavior can be implemented as +// (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) +// for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. +// The future addcdiv behavior is just the latter implementation: +// (input + value * tensor1 / tensor2), for all dtypes. +// +// .. math:: +// \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} +// +// +// The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be +// :ref:`broadcastable `. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +// a real number, otherwise an integer. +// +// Args: +// input (Tensor): the tensor to be added +// tensor1 (Tensor): the numerator tensor +// tensor2 (Tensor): the denominator tensor +// +// Keyword args: +// value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.randn(1, 3) +// >>> t1 = torch.randn(3, 1) +// >>> t2 = torch.randn(1, 3) +// >>> torch.addcdiv(t, t1, t2, value=0.1) +// tensor([[-0.2312, -3.6496, 0.1312], +// [-1.0428, 3.4292, -0.1030], +// [-0.5369, -0.9829, 0.0430]]) +// +// +//go:linkname Addcdiv py.addcdiv +func Addcdiv(input *py.Object, tensor1 *py.Object, tensor2 *py.Object) *py.Object +// +// addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor +// +// Performs the element-wise multiplication of :attr:`tensor1` +// by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` +// and adds it to :attr:`input`. +// +// .. math:: +// \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i +// +// The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be +// :ref:`broadcastable `. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +// a real number, otherwise an integer. +// +// Args: +// input (Tensor): the tensor to be added +// tensor1 (Tensor): the tensor to be multiplied +// tensor2 (Tensor): the tensor to be multiplied +// +// Keyword args: +// value (Number, optional): multiplier for :math:`tensor1 .* tensor2` +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.randn(1, 3) +// >>> t1 = torch.randn(3, 1) +// >>> t2 = torch.randn(1, 3) +// >>> torch.addcmul(t, t1, t2, value=0.1) +// tensor([[-0.8635, -0.6391, 1.6174], +// [-0.7617, -0.5879, 1.7388], +// [-0.8353, -0.6249, 1.6511]]) +// +// +//go:linkname Addcmul py.addcmul +func Addcmul(input *py.Object, tensor1 *py.Object, tensor2 *py.Object) *py.Object +// +// addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor +// +// Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. +// The matrix :attr:`input` is added to the final result. +// +// If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +// :math:`(m \times p)` tensor, then :attr:`input` must be +// :ref:`broadcastable ` with a :math:`(n \times p)` tensor +// and :attr:`out` will be a :math:`(n \times p)` tensor. +// +// :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +// :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. +// +// .. math:: +// \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) +// +// If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +// it will not be propagated. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +// :attr:`alpha` must be real numbers, otherwise they should be integers. +// +// This operation has support for arguments with :ref:`sparse layouts`. If +// :attr:`input` is sparse the result will have the same layout and if :attr:`out` +// is provided it must have the same layout as :attr:`input`. +// +// +// .. warning:: +// Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, +// or may not have autograd support. If you notice missing functionality please +// open a feature request. +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// Args: +// input (Tensor): matrix to be added +// mat1 (Tensor): the first matrix to be matrix multiplied +// mat2 (Tensor): the second matrix to be matrix multiplied +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) +// alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> M = torch.randn(2, 3) +// >>> mat1 = torch.randn(2, 3) +// >>> mat2 = torch.randn(3, 3) +// >>> torch.addmm(M, mat1, mat2) +// tensor([[-4.8716, 1.4671, -1.3746], +// [ 0.7573, -3.9555, -2.8681]]) +// +// +//go:linkname Addmm py.addmm +func Addmm(input *py.Object, mat1 *py.Object, mat2 *py.Object) *py.Object +// +// addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor +// +// Performs a matrix-vector product of the matrix :attr:`mat` and +// the vector :attr:`vec`. +// The vector :attr:`input` is added to the final result. +// +// If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +// size `m`, then :attr:`input` must be +// :ref:`broadcastable ` with a 1-D tensor of size `n` and +// :attr:`out` will be 1-D tensor of size `n`. +// +// :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +// :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. +// +// .. math:: +// \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) +// +// If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +// it will not be propagated. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +// :attr:`alpha` must be real numbers, otherwise they should be integers. +// +// Args: +// input (Tensor): vector to be added +// mat (Tensor): matrix to be matrix multiplied +// vec (Tensor): vector to be matrix multiplied +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) +// alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> M = torch.randn(2) +// >>> mat = torch.randn(2, 3) +// >>> vec = torch.randn(3) +// >>> torch.addmv(M, mat, vec) +// tensor([-0.3768, -5.5565]) +// +// +//go:linkname Addmv py.addmv +func Addmv(input *py.Object, mat *py.Object, vec *py.Object) *py.Object +// None +// +//go:linkname Addmv_ py.addmv_ +func Addmv_(__llgo_va_list ...interface{}) *py.Object +// +// addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor +// +// Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` +// and adds it to the matrix :attr:`input`. +// +// Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the +// outer product between :attr:`vec1` and :attr:`vec2` and the added matrix +// :attr:`input` respectively. +// +// .. math:: +// \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) +// +// If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +// it will not be propagated. +// +// If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector +// of size `m`, then :attr:`input` must be +// :ref:`broadcastable ` with a matrix of size +// :math:`(n \times m)` and :attr:`out` will be a matrix of size +// :math:`(n \times m)`. +// +// Args: +// input (Tensor): matrix to be added +// vec1 (Tensor): the first vector of the outer product +// vec2 (Tensor): the second vector of the outer product +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) +// alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> vec1 = torch.arange(1., 4.) +// >>> vec2 = torch.arange(1., 3.) +// >>> M = torch.zeros(3, 2) +// >>> torch.addr(M, vec1, vec2) +// tensor([[ 1., 2.], +// [ 2., 4.], +// [ 3., 6.]]) +// +// +//go:linkname Addr py.addr +func Addr(input *py.Object, vec1 *py.Object, vec2 *py.Object) *py.Object +// +// adjoint(Tensor) -> Tensor +// Returns a view of the tensor conjugated and with the last two dimensions transposed. +// +// ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and +// to ``x.transpose(-2, -1)`` for real tensors. +// +// Example:: +// >>> x = torch.arange(4, dtype=torch.float) +// >>> A = torch.complex(x, x).reshape(2, 2) +// >>> A +// tensor([[0.+0.j, 1.+1.j], +// [2.+2.j, 3.+3.j]]) +// >>> A.adjoint() +// tensor([[0.-0.j, 2.-2.j], +// [1.-1.j, 3.-3.j]]) +// >>> (A.adjoint() == A.mH).all() +// tensor(True) +// +// +//go:linkname Adjoint py.adjoint +func Adjoint(Tensor *py.Object) *py.Object +// None +// +//go:linkname AffineGridGenerator py.affine_grid_generator +func AffineGridGenerator(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.alias`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname AliasCopy py.alias_copy +func AliasCopy(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname AlignTensors py.align_tensors +func AlignTensors(__llgo_va_list ...interface{}) *py.Object +// +// all(input) -> Tensor +// +// Tests if all elements in :attr:`input` evaluate to `True`. +// +// .. note:: This function matches the behaviour of NumPy in returning +// output of dtype `bool` for all supported dtypes except `uint8`. +// For `uint8` the dtype of output is `uint8` itself. +// +// Example:: +// +// >>> a = torch.rand(1, 2).bool() +// >>> a +// tensor([[False, True]], dtype=torch.bool) +// >>> torch.all(a) +// tensor(False, dtype=torch.bool) +// >>> a = torch.arange(0, 3) +// >>> a +// tensor([0, 1, 2]) +// >>> torch.all(a) +// tensor(False) +// +// .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor +// :noindex: +// +// For each row of :attr:`input` in the given dimension :attr:`dim`, +// returns `True` if all elements in the row evaluate to `True` and `False` otherwise. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.rand(4, 2).bool() +// >>> a +// tensor([[True, True], +// [True, False], +// [True, True], +// [True, True]], dtype=torch.bool) +// >>> torch.all(a, dim=1) +// tensor([ True, False, True, True], dtype=torch.bool) +// >>> torch.all(a, dim=0) +// tensor([ True, False], dtype=torch.bool) +// +// +//go:linkname All py.all +func All(input *py.Object) *py.Object +// +// allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool +// +// This function checks if :attr:`input` and :attr:`other` satisfy the condition: +// +// .. math:: +// \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +// +// elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to +// `numpy.allclose `_ +// +// Args: +// input (Tensor): first tensor to compare +// other (Tensor): second tensor to compare +// atol (float, optional): absolute tolerance. Default: 1e-08 +// rtol (float, optional): relative tolerance. Default: 1e-05 +// equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` +// +// Example:: +// +// >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08])) +// False +// >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09])) +// True +// >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')])) +// False +// >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True) +// True +// +// +//go:linkname Allclose py.allclose +func Allclose(input *py.Object, other *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object +// None +// +//go:linkname AlphaDropout py.alpha_dropout +func AlphaDropout(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname AlphaDropout_ py.alpha_dropout_ +func AlphaDropout_(__llgo_va_list ...interface{}) *py.Object +// +// amax(input, dim, keepdim=False, *, out=None) -> Tensor +// +// Returns the maximum value of each slice of the :attr:`input` tensor in the given +// dimension(s) :attr:`dim`. +// +// .. note:: +// The difference between ``max``/``min`` and ``amax``/``amin`` is: +// - ``amax``/``amin`` supports reducing on multiple dimensions, +// - ``amax``/``amin`` does not return indices, +// - ``amax``/``amin`` evenly distributes gradient between equal values, +// while ``max(dim)``/``min(dim)`` propagates gradient only to a single +// index in the source tensor. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.8177, 1.4878, -0.2491, 0.9130], +// [-0.7158, 1.1775, 2.0992, 0.4817], +// [-0.0053, 0.0164, -1.3738, -0.0507], +// [ 1.9700, 1.1106, -1.0318, -1.0816]]) +// >>> torch.amax(a, 1) +// tensor([1.4878, 2.0992, 0.0164, 1.9700]) +// +// +//go:linkname Amax py.amax +func Amax(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// amin(input, dim, keepdim=False, *, out=None) -> Tensor +// +// Returns the minimum value of each slice of the :attr:`input` tensor in the given +// dimension(s) :attr:`dim`. +// +// .. note:: +// The difference between ``max``/``min`` and ``amax``/``amin`` is: +// - ``amax``/``amin`` supports reducing on multiple dimensions, +// - ``amax``/``amin`` does not return indices, +// - ``amax``/``amin`` evenly distributes gradient between equal values, +// while ``max(dim)``/``min(dim)`` propagates gradient only to a single +// index in the source tensor. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.6451, -0.4866, 0.2987, -1.3312], +// [-0.5744, 1.2980, 1.8397, -0.2713], +// [ 0.9128, 0.9214, -1.7268, -0.2995], +// [ 0.9023, 0.4853, 0.9075, -1.6165]]) +// >>> torch.amin(a, 1) +// tensor([-1.3312, -0.5744, -1.7268, -1.6165]) +// +// +//go:linkname Amin py.amin +func Amin(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max) +// +// Computes the minimum and maximum values of the :attr:`input` tensor. +// +// Args: +// input (Tensor): +// The input tensor +// +// Keyword Args: +// dim (Optional[int]): +// The dimension along which to compute the values. If `None`, +// computes the values over the entire :attr:`input` tensor. +// Default is `None`. +// keepdim (bool): +// If `True`, the reduced dimensions will be kept in the output +// tensor as dimensions with size 1 for broadcasting, otherwise +// they will be removed, as if calling (:func:`torch.squeeze`). +// Default is `False`. +// out (Optional[Tuple[Tensor, Tensor]]): +// Optional tensors on which to write the result. Must have the same +// shape and dtype as the expected output. +// Default is `None`. +// +// Returns: +// A named tuple `(min, max)` containing the minimum and maximum values. +// +// Raises: +// RuntimeError +// If any of the dimensions to compute the values over has size 0. +// +// .. note:: +// NaN values are propagated to the output if at least one value is NaN. +// +// .. seealso:: +// :func:`torch.amin` computes just the minimum value +// :func:`torch.amax` computes just the maximum value +// +// Example:: +// +// >>> torch.aminmax(torch.tensor([1, -3, 5])) +// torch.return_types.aminmax( +// min=tensor(-3), +// max=tensor(5)) +// +// >>> # aminmax propagates NaNs +// >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan])) +// torch.return_types.aminmax( +// min=tensor(nan), +// max=tensor(nan)) +// +// >>> t = torch.arange(10).view(2, 5) +// >>> t +// tensor([[0, 1, 2, 3, 4], +// [5, 6, 7, 8, 9]]) +// >>> t.aminmax(dim=0, keepdim=True) +// torch.return_types.aminmax( +// min=tensor([[0, 1, 2, 3, 4]]), +// max=tensor([[5, 6, 7, 8, 9]])) +// +// +//go:linkname Aminmax py.aminmax +func Aminmax(input *py.Object) *py.Object +// +// angle(input, *, out=None) -> Tensor +// +// Computes the element-wise angle (in radians) of the given :attr:`input` tensor. +// +// .. math:: +// \text{out}_{i} = angle(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers, +// zero for non-negative real numbers, and propagates NaNs. Previously +// the function would return zero for all real numbers and not propagate +// floating-point NaNs. +// +// Example:: +// +// >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 +// tensor([ 135., 135, -45]) +// +// +//go:linkname Angle py.angle +func Angle(input *py.Object) *py.Object +// +// any(input) -> Tensor +// +// Tests if any element in :attr:`input` evaluates to `True`. +// +// .. note:: This function matches the behaviour of NumPy in returning +// output of dtype `bool` for all supported dtypes except `uint8`. +// For `uint8` the dtype of output is `uint8` itself. +// +// Example:: +// +// >>> a = torch.rand(1, 2).bool() +// >>> a +// tensor([[False, True]], dtype=torch.bool) +// >>> torch.any(a) +// tensor(True, dtype=torch.bool) +// >>> a = torch.arange(0, 3) +// >>> a +// tensor([0, 1, 2]) +// >>> torch.any(a) +// tensor(True) +// +// .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor +// :noindex: +// +// For each row of :attr:`input` in the given dimension :attr:`dim`, +// returns `True` if any element in the row evaluate to `True` and `False` otherwise. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4, 2) < 0 +// >>> a +// tensor([[ True, True], +// [False, True], +// [ True, True], +// [False, False]]) +// >>> torch.any(a, 1) +// tensor([ True, True, True, False]) +// >>> torch.any(a, 0) +// tensor([True, True]) +// +// +//go:linkname Any py.any +func Any(input *py.Object) *py.Object +// +// arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` +// with values from the interval ``[start, end)`` taken with common difference +// :attr:`step` beginning from `start`. +// +// Note that non-integer :attr:`step` is subject to floating point rounding errors when +// comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` +// in such cases. +// +// .. math:: +// \text{out}_{{i+1}} = \text{out}_{i} + \text{step} +// +// Args: +// start (Number): the starting value for the set of points. Default: ``0``. +// end (Number): the ending value for the set of points +// step (Number): the gap between each pair of adjacent points. Default: ``1``. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). If `dtype` is not given, infer the data type from the other input +// arguments. If any of `start`, `end`, or `stop` are floating-point, the +// `dtype` is inferred to be the default dtype, see +// :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to +// be `torch.int64`. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.arange(5) +// tensor([ 0, 1, 2, 3, 4]) +// >>> torch.arange(1, 4) +// tensor([ 1, 2, 3]) +// >>> torch.arange(1, 2.5, 0.5) +// tensor([ 1.0000, 1.5000, 2.0000]) +// +// +//go:linkname Arange py.arange +func Arange(start *py.Object, end *py.Object, step *py.Object) *py.Object +// +// arccos(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.acos`. +// +// +//go:linkname Arccos py.arccos +func Arccos(input *py.Object) *py.Object +// None +// +//go:linkname Arccos_ py.arccos_ +func Arccos_(__llgo_va_list ...interface{}) *py.Object +// +// arccosh(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.acosh`. +// +// +//go:linkname Arccosh py.arccosh +func Arccosh(input *py.Object) *py.Object +// None +// +//go:linkname Arccosh_ py.arccosh_ +func Arccosh_(__llgo_va_list ...interface{}) *py.Object +// +// arcsin(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.asin`. +// +// +//go:linkname Arcsin py.arcsin +func Arcsin(input *py.Object) *py.Object +// None +// +//go:linkname Arcsin_ py.arcsin_ +func Arcsin_(__llgo_va_list ...interface{}) *py.Object +// +// arcsinh(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.asinh`. +// +// +//go:linkname Arcsinh py.arcsinh +func Arcsinh(input *py.Object) *py.Object +// None +// +//go:linkname Arcsinh_ py.arcsinh_ +func Arcsinh_(__llgo_va_list ...interface{}) *py.Object +// +// arctan(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.atan`. +// +// +//go:linkname Arctan py.arctan +func Arctan(input *py.Object) *py.Object +// +// arctan2(input, other, *, out=None) -> Tensor +// Alias for :func:`torch.atan2`. +// +// +//go:linkname Arctan2 py.arctan2 +func Arctan2(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Arctan_ py.arctan_ +func Arctan_(__llgo_va_list ...interface{}) *py.Object +// +// arctanh(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.atanh`. +// +// +//go:linkname Arctanh py.arctanh +func Arctanh(input *py.Object) *py.Object +// None +// +//go:linkname Arctanh_ py.arctanh_ +func Arctanh_(__llgo_va_list ...interface{}) *py.Object +// +// argmax(input) -> LongTensor +// +// Returns the indices of the maximum value of all elements in the :attr:`input` tensor. +// +// This is the second value returned by :meth:`torch.max`. See its +// documentation for the exact semantics of this method. +// +// .. note:: If there are multiple maximal values then the indices of the first maximal value are returned. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], +// [-0.7401, -0.8805, -0.3402, -1.1936], +// [ 0.4907, -1.3948, -1.0691, -0.3132], +// [-1.6092, 0.5419, -0.2993, 0.3195]]) +// >>> torch.argmax(a) +// tensor(0) +// +// .. function:: argmax(input, dim, keepdim=False) -> LongTensor +// :noindex: +// +// Returns the indices of the maximum values of a tensor across a dimension. +// +// This is the second value returned by :meth:`torch.max`. See its +// documentation for the exact semantics of this method. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], +// [-0.7401, -0.8805, -0.3402, -1.1936], +// [ 0.4907, -1.3948, -1.0691, -0.3132], +// [-1.6092, 0.5419, -0.2993, 0.3195]]) +// >>> torch.argmax(a, dim=1) +// tensor([ 0, 2, 0, 1]) +// +// +//go:linkname Argmax py.argmax +func Argmax(input *py.Object) *py.Object +// +// argmin(input, dim=None, keepdim=False) -> LongTensor +// +// Returns the indices of the minimum value(s) of the flattened tensor or along a dimension +// +// This is the second value returned by :meth:`torch.min`. See its +// documentation for the exact semantics of this method. +// +// .. note:: If there are multiple minimal values then the indices of the first minimal value are returned. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.1139, 0.2254, -0.1381, 0.3687], +// [ 1.0100, -1.1975, -0.0102, -0.4732], +// [-0.9240, 0.1207, -0.7506, -1.0213], +// [ 1.7809, -1.2960, 0.9384, 0.1438]]) +// >>> torch.argmin(a) +// tensor(13) +// >>> torch.argmin(a, dim=1) +// tensor([ 2, 1, 3, 1]) +// >>> torch.argmin(a, dim=1, keepdim=True) +// tensor([[2], +// [1], +// [3], +// [1]]) +// +// +//go:linkname Argmin py.argmin +func Argmin(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// argsort(input, dim=-1, descending=False, stable=False) -> Tensor +// +// Returns the indices that sort a tensor along a given dimension in ascending +// order by value. +// +// This is the second value returned by :meth:`torch.sort`. See its documentation +// for the exact semantics of this method. +// +// If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +// the order of equivalent elements. If ``False``, the relative order of values +// which compare equal is not guaranteed. ``True`` is slower. +// +// Args: +// input (Tensor): the input tensor. +// dim (int, optional): the dimension to sort along +// descending (bool, optional): controls the sorting order (ascending or descending) +// stable (bool, optional): controls the relative order of equivalent elements +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], +// [ 0.1598, 0.0788, -0.0745, -1.2700], +// [ 1.2208, 1.0722, -0.7064, 1.2564], +// [ 0.0669, -0.2318, -0.8229, -0.9280]]) +// +// +// >>> torch.argsort(a, dim=1) +// tensor([[2, 0, 3, 1], +// [3, 2, 1, 0], +// [2, 1, 0, 3], +// [3, 2, 1, 0]]) +// +// +//go:linkname Argsort py.argsort +func Argsort(input *py.Object, dim *py.Object, descending *py.Object, stable *py.Object) *py.Object +// +// argwhere(input) -> Tensor +// +// Returns a tensor containing the indices of all non-zero elements of +// :attr:`input`. Each row in the result contains the indices of a non-zero +// element in :attr:`input`. The result is sorted lexicographically, with +// the last index changing the fastest (C-style). +// +// If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +// :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +// non-zero elements in the :attr:`input` tensor. +// +// .. note:: +// This function is similar to NumPy's `argwhere`. +// +// When :attr:`input` is on CUDA, this function causes host-device synchronization. +// +// Args: +// {input} +// +// Example:: +// +// >>> t = torch.tensor([1, 0, 1]) +// >>> torch.argwhere(t) +// tensor([[0], +// [2]]) +// >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]]) +// >>> torch.argwhere(t) +// tensor([[0, 0], +// [0, 2], +// [1, 1], +// [1, 2]]) +// +// +//go:linkname Argwhere py.argwhere +func Argwhere(input *py.Object) *py.Object +// +// as_strided(input, size, stride, storage_offset=None) -> Tensor +// +// Create a view of an existing `torch.Tensor` :attr:`input` with specified +// :attr:`size`, :attr:`stride` and :attr:`storage_offset`. +// +// .. warning:: +// Prefer using other view functions, like :meth:`torch.Tensor.expand`, +// to setting a view's strides manually with `as_strided`, as this +// function's behavior depends on the implementation of a tensor's storage. +// The constructed view of the storage must only refer to elements within +// the storage or a runtime error will be thrown, and if the view is +// "overlapped" (with multiple indices referring to the same element in +// memory) its behavior is undefined. +// +// Args: +// input (Tensor): the input tensor. +// size (tuple or ints): the shape of the output tensor +// stride (tuple or ints): the stride of the output tensor +// storage_offset (int, optional): the offset in the underlying storage of the output tensor. +// If ``None``, the storage_offset of the output tensor will match the input tensor. +// +// Example:: +// +// >>> x = torch.randn(3, 3) +// >>> x +// tensor([[ 0.9039, 0.6291, 1.0795], +// [ 0.1586, 2.1939, -0.4900], +// [-0.1909, -0.7503, 1.9355]]) +// >>> t = torch.as_strided(x, (2, 2), (1, 2)) +// >>> t +// tensor([[0.9039, 1.0795], +// [0.6291, 0.1586]]) +// >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) +// tensor([[0.6291, 0.1586], +// [1.0795, 2.1939]]) +// +// +//go:linkname AsStrided py.as_strided +func AsStrided(input *py.Object, size *py.Object, stride *py.Object, storageOffset *py.Object) *py.Object +// None +// +//go:linkname AsStrided_ py.as_strided_ +func AsStrided_(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.as_strided`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname AsStridedCopy py.as_strided_copy +func AsStridedCopy(__llgo_va_list ...interface{}) *py.Object +// +// as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor +// +// Embeds the values of the :attr:`src` tensor into :attr:`input` along +// the elements corresponding to the result of calling +// input.as_strided(size, stride, storage_offset). +// +// This function returns a tensor with fresh storage; it does not +// return a view. +// +// Args: +// input (Tensor): the input tensor. +// size (tuple or ints): the shape of the output tensor +// stride (tuple or ints): the stride of the output tensor +// storage_offset (int, optional): the offset in the underlying storage of the output tensor +// +// .. note:: +// +// :attr:`src` must be of the proper size in order to be embedded +// into :attr:`input`. Specifically, it should have the same shape as +// `torch.as_strided(input, size, stride, storage_offset)` +// +// Example:: +// +// >>> a = torch.arange(4).reshape(2, 2) + 1 +// >>> a +// tensor([[1, 2], +// [3, 4]]) +// >>> b = torch.zeros(3, 3) +// >>> b +// tensor([[0., 0., 0.], +// [0., 0., 0.], +// [0., 0., 0.]]) +// >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2)) +// tensor([[1., 3., 2.], +// [4., 0., 0.], +// [0., 0., 0.]]) +// +// +// +//go:linkname AsStridedScatter py.as_strided_scatter +func AsStridedScatter(input *py.Object, src *py.Object, size *py.Object, stride *py.Object, storageOffset *py.Object) *py.Object +// +// as_tensor(data, dtype=None, device=None) -> Tensor +// +// Converts :attr:`data` into a tensor, sharing data and preserving autograd +// history if possible. +// +// If :attr:`data` is already a tensor with the requested dtype and device +// then :attr:`data` itself is returned, but if :attr:`data` is a +// tensor with a different dtype or device then it's copied as if using +// `data.to(dtype=dtype, device=device)`. +// +// If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a +// tensor is constructed using :func:`torch.from_numpy`. +// +// .. seealso:: +// +// :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`). +// +// +// Args: +// data (array_like): Initial data for the tensor. Can be a list, tuple, +// NumPy ``ndarray``, scalar, and other types. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, infers data type from :attr:`data`. +// device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor +// then the device of data is used. If None and data is not a tensor then +// the result tensor is constructed on the current device. +// +// +// Example:: +// +// >>> a = numpy.array([1, 2, 3]) +// >>> t = torch.as_tensor(a) +// >>> t +// tensor([ 1, 2, 3]) +// >>> t[0] = -1 +// >>> a +// array([-1, 2, 3]) +// +// >>> a = numpy.array([1, 2, 3]) +// >>> t = torch.as_tensor(a, device=torch.device('cuda')) +// >>> t +// tensor([ 1, 2, 3]) +// >>> t[0] = -1 +// >>> a +// array([1, 2, 3]) +// +// +//go:linkname AsTensor py.as_tensor +func AsTensor(data *py.Object, dtype *py.Object, device *py.Object) *py.Object +// +// asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor +// +// Converts :attr:`obj` to a tensor. +// +// :attr:`obj` can be one of: +// +// 1. a tensor +// 2. a NumPy array or a NumPy scalar +// 3. a DLPack capsule +// 4. an object that implements Python's buffer protocol +// 5. a scalar +// 6. a sequence of scalars +// +// When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will, +// by default, not require a gradient, have the same datatype as :attr:`obj`, be on the +// same device, and share memory with it. These properties can be controlled with the +// :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments. +// If the returned tensor is of a different datatype, on a different device, or a copy is +// requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad` +// is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is +// also a tensor with an autograd history then the returned tensor will have the same history. +// +// When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's +// buffer protocol then the buffer is interpreted as an array of bytes grouped according to +// the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is +// passed then the default floating point datatype is used, instead.) The returned tensor +// will have the specified datatype (or default floating point datatype if none is specified) +// and, by default, be on the CPU device and share memory with the buffer. +// +// When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on +// the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will +// be the PyTorch datatype corresponding to the NumPy's scalar's datatype. +// +// When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the +// returned tensor will, by default, infer its datatype from the scalar values, be on the +// current default device, and not share its memory. +// +// .. seealso:: +// +// :func:`torch.tensor` creates a tensor that always copies the data from the input object. +// :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays. +// :func:`torch.frombuffer` creates a tensor that always shares memory from objects that +// implement the buffer protocol. +// :func:`torch.from_dlpack` creates a tensor that always shares memory from +// DLPack capsules. +// +// Args: +// obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's +// buffer protocol, scalar, or sequence of scalars. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor. +// Default: ``None``, which causes the datatype of the returned tensor to be +// inferred from :attr:`obj`. +// copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`. +// Default: ``None``, which causes the returned tensor to share memory with :attr:`obj` +// whenever possible. If ``True`` then the returned tensor does not share its memory. +// If ``False`` then the returned tensor shares its memory with :attr:`obj` and an +// error is thrown if it cannot. +// device (:class:`torch.device`, optional): the device of the returned tensor. +// Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if +// :attr:`obj` is a Python sequence, the current default device will be used. +// requires_grad (bool, optional): whether the returned tensor requires grad. +// Default: ``False``, which causes the returned tensor not to require a gradient. +// If ``True``, then the returned tensor will require a gradient, and if :attr:`obj` +// is also a tensor with an autograd history then the returned tensor will have +// the same history. +// +// Example:: +// +// >>> a = torch.tensor([1, 2, 3]) +// >>> # Shares memory with tensor 'a' +// >>> b = torch.asarray(a) +// >>> a.data_ptr() == b.data_ptr() +// True +// >>> # Forces memory copy +// >>> c = torch.asarray(a, copy=True) +// >>> a.data_ptr() == c.data_ptr() +// False +// +// >>> a = torch.tensor([1., 2., 3.], requires_grad=True) +// >>> b = a + 2 +// >>> b +// tensor([3., 4., 5.], grad_fn=) +// >>> # Shares memory with tensor 'b', with no grad +// >>> c = torch.asarray(b) +// >>> c +// tensor([3., 4., 5.]) +// >>> # Shares memory with tensor 'b', retaining autograd history +// >>> d = torch.asarray(b, requires_grad=True) +// >>> d +// tensor([3., 4., 5.], grad_fn=) +// +// >>> array = numpy.array([1, 2, 3]) +// >>> # Shares memory with array 'array' +// >>> t1 = torch.asarray(array) +// >>> array.__array_interface__['data'][0] == t1.data_ptr() +// True +// >>> # Copies memory due to dtype mismatch +// >>> t2 = torch.asarray(array, dtype=torch.float32) +// >>> array.__array_interface__['data'][0] == t2.data_ptr() +// False +// +// >>> scalar = numpy.float64(0.5) +// >>> torch.asarray(scalar) +// tensor(0.5000, dtype=torch.float64) +// +// +//go:linkname Asarray py.asarray +func Asarray(obj *py.Object) *py.Object +// +// asin(input, *, out=None) -> Tensor +// +// Returns a new tensor with the arcsine of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \sin^{-1}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.5962, 1.4985, -0.4396, 1.4525]) +// >>> torch.asin(a) +// tensor([-0.6387, nan, -0.4552, nan]) +// +// +//go:linkname Asin py.asin +func Asin(input *py.Object) *py.Object +// None +// +//go:linkname Asin_ py.asin_ +func Asin_(__llgo_va_list ...interface{}) *py.Object +// +// asinh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \sinh^{-1}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ]) +// >>> torch.asinh(a) +// tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ]) +// +// +//go:linkname Asinh py.asinh +func Asinh(input *py.Object) *py.Object +// None +// +//go:linkname Asinh_ py.asinh_ +func Asinh_(__llgo_va_list ...interface{}) *py.Object +// +// atan(input, *, out=None) -> Tensor +// +// Returns a new tensor with the arctangent of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \tan^{-1}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) +// >>> torch.atan(a) +// tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) +// +// +//go:linkname Atan py.atan +func Atan(input *py.Object) *py.Object +// +// atan2(input, other, *, out=None) -> Tensor +// +// Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}` +// with consideration of the quadrant. Returns a new tensor with the signed angles +// in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})` +// and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second +// parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first +// parameter, is the y-coordinate.) +// +// The shapes of ``input`` and ``other`` must be +// :ref:`broadcastable `. +// +// Args: +// input (Tensor): the first input tensor +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) +// >>> torch.atan2(a, torch.randn(4)) +// tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) +// +// +//go:linkname Atan2 py.atan2 +func Atan2(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Atan_ py.atan_ +func Atan_(__llgo_va_list ...interface{}) *py.Object +// +// atanh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. +// +// Note: +// The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range +// will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is +// mapped to `+/-INF` respectively. +// +// .. math:: +// \text{out}_{i} = \tanh^{-1}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4).uniform_(-1, 1) +// >>> a +// tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ]) +// >>> torch.atanh(a) +// tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ]) +// +// +//go:linkname Atanh py.atanh +func Atanh(input *py.Object) *py.Object +// None +// +//go:linkname Atanh_ py.atanh_ +func Atanh_(__llgo_va_list ...interface{}) *py.Object +// +// Returns a 1-dimensional view of each input tensor with zero dimensions. +// Input tensors with one or more dimensions are returned as-is. +// +// Args: +// input (Tensor or list of Tensors) +// +// Returns: +// output (Tensor or tuple of Tensors) +// +// Example:: +// +// >>> x = torch.arange(2) +// >>> x +// tensor([0, 1]) +// >>> torch.atleast_1d(x) +// tensor([0, 1]) +// >>> x = torch.tensor(1.) +// >>> x +// tensor(1.) +// >>> torch.atleast_1d(x) +// tensor([1.]) +// >>> x = torch.tensor(0.5) +// >>> y = torch.tensor(1.) +// >>> torch.atleast_1d((x, y)) +// (tensor([0.5000]), tensor([1.])) +// +// +//go:linkname Atleast1d py.atleast_1d +func Atleast1d(__llgo_va_list ...interface{}) *py.Object +// +// Returns a 2-dimensional view of each input tensor with zero dimensions. +// Input tensors with two or more dimensions are returned as-is. +// +// Args: +// input (Tensor or list of Tensors) +// +// Returns: +// output (Tensor or tuple of Tensors) +// +// Example:: +// +// >>> x = torch.tensor(1.) +// >>> x +// tensor(1.) +// >>> torch.atleast_2d(x) +// tensor([[1.]]) +// >>> x = torch.arange(4).view(2, 2) +// >>> x +// tensor([[0, 1], +// [2, 3]]) +// >>> torch.atleast_2d(x) +// tensor([[0, 1], +// [2, 3]]) +// >>> x = torch.tensor(0.5) +// >>> y = torch.tensor(1.) +// >>> torch.atleast_2d((x, y)) +// (tensor([[0.5000]]), tensor([[1.]])) +// +// +//go:linkname Atleast2d py.atleast_2d +func Atleast2d(__llgo_va_list ...interface{}) *py.Object +// +// Returns a 3-dimensional view of each input tensor with zero dimensions. +// Input tensors with three or more dimensions are returned as-is. +// +// Args: +// input (Tensor or list of Tensors) +// +// Returns: +// output (Tensor or tuple of Tensors) +// +// Example: +// +// >>> x = torch.tensor(0.5) +// >>> x +// tensor(0.5000) +// >>> torch.atleast_3d(x) +// tensor([[[0.5000]]]) +// >>> y = torch.arange(4).view(2, 2) +// >>> y +// tensor([[0, 1], +// [2, 3]]) +// >>> torch.atleast_3d(y) +// tensor([[[0], +// [1]], +// +// [[2], +// [3]]]) +// >>> x = torch.tensor(1).view(1, 1, 1) +// >>> x +// tensor([[[1]]]) +// >>> torch.atleast_3d(x) +// tensor([[[1]]]) +// >>> x = torch.tensor(0.5) +// >>> y = torch.tensor(1.) +// >>> torch.atleast_3d((x, y)) +// (tensor([[[0.5000]]]), tensor([[[1.]]])) +// +// +//go:linkname Atleast3d py.atleast_3d +func Atleast3d(__llgo_va_list ...interface{}) *py.Object +// +// avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor +// +// Applies a 1D average pooling over an input signal composed of several +// input planes. +// +// See :class:`~torch.nn.AvgPool1d` for details and output shape. +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` +// kernel_size: the size of the window. Can be a single number or a +// tuple `(kW,)` +// stride: the stride of the window. Can be a single number or a tuple +// `(sW,)`. Default: :attr:`kernel_size` +// padding: implicit zero paddings on both sides of the input. Can be a +// single number or a tuple `(padW,)`. Default: 0 +// ceil_mode: when True, will use `ceil` instead of `floor` to compute the +// output shape. Default: ``False`` +// count_include_pad: when True, will include the zero-padding in the +// averaging calculation. Default: ``True`` +// +// Examples:: +// +// >>> # pool of square window of size=3, stride=2 +// >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32) +// >>> F.avg_pool1d(input, kernel_size=3, stride=2) +// tensor([[[ 2., 4., 6.]]]) +// +// +// +//go:linkname AvgPool1d py.avg_pool1d +func AvgPool1d(input *py.Object, kernelSize *py.Object, stride *py.Object, padding *py.Object, ceilMode *py.Object, countIncludePad *py.Object) *py.Object +// +// baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor +// +// Performs a batch matrix-matrix product of matrices in :attr:`batch1` +// and :attr:`batch2`. +// :attr:`input` is added to the final result. +// +// :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same +// number of matrices. +// +// If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +// :math:`(b \times m \times p)` tensor, then :attr:`input` must be +// :ref:`broadcastable ` with a +// :math:`(b \times n \times p)` tensor and :attr:`out` will be a +// :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the +// same as the scaling factors used in :meth:`torch.addbmm`. +// +// .. math:: +// \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) +// +// If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +// it will not be propagated. +// +// For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +// :attr:`alpha` must be real numbers, otherwise they should be integers. +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// Args: +// input (Tensor): the tensor to be added +// batch1 (Tensor): the first batch of matrices to be multiplied +// batch2 (Tensor): the second batch of matrices to be multiplied +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) +// alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> M = torch.randn(10, 3, 5) +// >>> batch1 = torch.randn(10, 3, 4) +// >>> batch2 = torch.randn(10, 4, 5) +// >>> torch.baddbmm(M, batch1, batch2).size() +// torch.Size([10, 3, 5]) +// +// +//go:linkname Baddbmm py.baddbmm +func Baddbmm(input *py.Object, batch1 *py.Object, batch2 *py.Object) *py.Object +// +// bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Bartlett window function. +// +// .. math:: +// w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} +// \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ +// 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ +// \end{cases}, +// +// where :math:`N` is the full window size. +// +// The input :attr:`window_length` is a positive integer controlling the +// returned window size. :attr:`periodic` flag determines whether the returned +// window trims off the last duplicate value from the symmetric window and is +// ready to be used as a periodic window with functions like +// :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +// above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +// ``torch.bartlett_window(L, periodic=True)`` equal to +// ``torch.bartlett_window(L + 1, periodic=False)[:-1])``. +// +// .. note:: +// If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +// +// Arguments: +// window_length (int): the size of returned window +// periodic (bool, optional): If True, returns a window to be used as periodic +// function. If False, return a symmetric window. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). Only floating point types are supported. +// layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only +// ``torch.strided`` (dense layout) is supported. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Returns: +// Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window +// +// +// +//go:linkname BartlettWindow py.bartlett_window +func BartlettWindow(windowLength *py.Object, periodic *py.Object) *py.Object +// None +// +//go:linkname BatchNorm py.batch_norm +func BatchNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormBackwardElemt py.batch_norm_backward_elemt +func BatchNormBackwardElemt(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormBackwardReduce py.batch_norm_backward_reduce +func BatchNormBackwardReduce(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormElemt py.batch_norm_elemt +func BatchNormElemt(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormGatherStats py.batch_norm_gather_stats +func BatchNormGatherStats(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormGatherStatsWithCounts py.batch_norm_gather_stats_with_counts +func BatchNormGatherStatsWithCounts(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormStats py.batch_norm_stats +func BatchNormStats(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname BatchNormUpdateStats py.batch_norm_update_stats +func BatchNormUpdateStats(__llgo_va_list ...interface{}) *py.Object +// +// bernoulli(input, *, generator=None, out=None) -> Tensor +// +// Draws binary random numbers (0 or 1) from a Bernoulli distribution. +// +// The :attr:`input` tensor should be a tensor containing probabilities +// to be used for drawing the binary random number. +// Hence, all values in :attr:`input` have to be in the range: +// :math:`0 \leq \text{input}_i \leq 1`. +// +// The :math:`\text{i}^{th}` element of the output tensor will draw a +// value :math:`1` according to the :math:`\text{i}^{th}` probability value given +// in :attr:`input`. +// +// .. math:: +// \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) +// +// The returned :attr:`out` tensor only has values 0 or 1 and is of the same +// shape as :attr:`input`. +// +// :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating +// point ``dtype``. +// +// Args: +// input (Tensor): the input tensor of probability values for the Bernoulli distribution +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] +// >>> a +// tensor([[ 0.1737, 0.0950, 0.3609], +// [ 0.7148, 0.0289, 0.2676], +// [ 0.9456, 0.8937, 0.7202]]) +// >>> torch.bernoulli(a) +// tensor([[ 1., 0., 0.], +// [ 0., 0., 0.], +// [ 1., 1., 1.]]) +// +// >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 +// >>> torch.bernoulli(a) +// tensor([[ 1., 1., 1.], +// [ 1., 1., 1.], +// [ 1., 1., 1.]]) +// >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 +// >>> torch.bernoulli(a) +// tensor([[ 0., 0., 0.], +// [ 0., 0., 0.], +// [ 0., 0., 0.]]) +// +// +//go:linkname Bernoulli py.bernoulli +func Bernoulli(input *py.Object) *py.Object +// +// bilinear(input1, input2, weight, bias=None) -> Tensor +// +// Applies a bilinear transformation to the incoming data: +// :math:`y = x_1^T A x_2 + b` +// +// Shape: +// +// - input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` +// and :math:`*` means any number of additional dimensions. +// All but the last dimension of the inputs should be the same. +// - input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}` +// - weight: :math:`(\text{out\_features}, \text{in1\_features}, +// \text{in2\_features})` +// - bias: :math:`(\text{out\_features})` +// - output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}` +// and all but the last dimension are the same shape as the input. +// +// +//go:linkname Bilinear py.bilinear +func Bilinear(input1 *py.Object, input2 *py.Object, weight *py.Object, bias *py.Object) *py.Object +// None +// +//go:linkname BinaryCrossEntropyWithLogits py.binary_cross_entropy_with_logits +func BinaryCrossEntropyWithLogits(__llgo_va_list ...interface{}) *py.Object +// +// bincount(input, weights=None, minlength=0) -> Tensor +// +// Count the frequency of each value in an array of non-negative ints. +// +// The number of bins (size 1) is one larger than the largest value in +// :attr:`input` unless :attr:`input` is empty, in which case the result is a +// tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least +// :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size +// :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``, +// ``out[n] += weights[i]`` if :attr:`weights` is specified else +// ``out[n] += 1``. +// +// Note: +// This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. +// +// Arguments: +// input (Tensor): 1-d int tensor +// weights (Tensor): optional, weight for each value in the input tensor. +// Should be of same size as input tensor. +// minlength (int): optional, minimum number of bins. Should be non-negative. +// +// Returns: +// output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if +// :attr:`input` is non-empty, else ``Size(0)`` +// +// Example:: +// +// >>> input = torch.randint(0, 8, (5,), dtype=torch.int64) +// >>> weights = torch.linspace(0, 1, steps=5) +// >>> input, weights +// (tensor([4, 3, 6, 3, 4]), +// tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) +// +// >>> torch.bincount(input) +// tensor([0, 0, 0, 2, 2, 0, 1]) +// +// >>> input.bincount(weights) +// tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000]) +// +// +//go:linkname Bincount py.bincount +func Bincount(input *py.Object, weights *py.Object, minlength *py.Object) *py.Object +// None +// +//go:linkname Binomial py.binomial +func Binomial(__llgo_va_list ...interface{}) *py.Object +// +// bitwise_and(input, other, *, out=None) -> Tensor +// +// Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of +// integral or Boolean types. For bool tensors, it computes the logical AND. +// +// Args: +// input: the first input tensor +// other: the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) +// tensor([1, 0, 3], dtype=torch.int8) +// >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) +// tensor([ False, True, False]) +// +// +//go:linkname BitwiseAnd py.bitwise_and +func BitwiseAnd(input *py.Object, other *py.Object) *py.Object +// +// bitwise_left_shift(input, other, *, out=None) -> Tensor +// +// Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. +// The input tensor must be of integral type. This operator supports +// :ref:`broadcasting to a common shape ` and +// :ref:`type promotion `. +// +// The operation applied is: +// +// .. math:: +// \text{out}_i = \text{input}_i << \text{other}_i +// +// Args: +// input (Tensor or Scalar): the first input tensor +// other (Tensor or Scalar): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) +// tensor([-2, -2, 24], dtype=torch.int8) +// +// +//go:linkname BitwiseLeftShift py.bitwise_left_shift +func BitwiseLeftShift(input *py.Object, other *py.Object) *py.Object +// +// bitwise_not(input, *, out=None) -> Tensor +// +// Computes the bitwise NOT of the given input tensor. The input tensor must be of +// integral or Boolean types. For bool tensors, it computes the logical NOT. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8)) +// tensor([ 0, 1, -4], dtype=torch.int8) +// +// +//go:linkname BitwiseNot py.bitwise_not +func BitwiseNot(input *py.Object) *py.Object +// +// bitwise_or(input, other, *, out=None) -> Tensor +// +// Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of +// integral or Boolean types. For bool tensors, it computes the logical OR. +// +// Args: +// input: the first input tensor +// other: the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) +// tensor([-1, -2, 3], dtype=torch.int8) +// >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) +// tensor([ True, True, False]) +// +// +//go:linkname BitwiseOr py.bitwise_or +func BitwiseOr(input *py.Object, other *py.Object) *py.Object +// +// bitwise_right_shift(input, other, *, out=None) -> Tensor +// +// Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. +// The input tensor must be of integral type. This operator supports +// :ref:`broadcasting to a common shape ` and +// :ref:`type promotion `. +// In any case, if the value of the right operand is negative or is greater +// or equal to the number of bits in the promoted left operand, the behavior is undefined. +// +// The operation applied is: +// +// .. math:: +// \text{out}_i = \text{input}_i >> \text{other}_i +// +// Args: +// input (Tensor or Scalar): the first input tensor +// other (Tensor or Scalar): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) +// tensor([-1, -7, 3], dtype=torch.int8) +// +// +//go:linkname BitwiseRightShift py.bitwise_right_shift +func BitwiseRightShift(input *py.Object, other *py.Object) *py.Object +// +// bitwise_xor(input, other, *, out=None) -> Tensor +// +// Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of +// integral or Boolean types. For bool tensors, it computes the logical XOR. +// +// Args: +// input: the first input tensor +// other: the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) +// tensor([-2, -2, 0], dtype=torch.int8) +// >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) +// tensor([ True, False, False]) +// +// +//go:linkname BitwiseXor py.bitwise_xor +func BitwiseXor(input *py.Object, other *py.Object) *py.Object +// +// blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Blackman window function. +// +// .. math:: +// w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) +// +// where :math:`N` is the full window size. +// +// The input :attr:`window_length` is a positive integer controlling the +// returned window size. :attr:`periodic` flag determines whether the returned +// window trims off the last duplicate value from the symmetric window and is +// ready to be used as a periodic window with functions like +// :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +// above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +// ``torch.blackman_window(L, periodic=True)`` equal to +// ``torch.blackman_window(L + 1, periodic=False)[:-1])``. +// +// .. note:: +// If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +// +// Arguments: +// window_length (int): the size of returned window +// periodic (bool, optional): If True, returns a window to be used as periodic +// function. If False, return a symmetric window. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). Only floating point types are supported. +// layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only +// ``torch.strided`` (dense layout) is supported. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Returns: +// Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window +// +// +// +//go:linkname BlackmanWindow py.blackman_window +func BlackmanWindow(windowLength *py.Object, periodic *py.Object) *py.Object +// Create a block diagonal matrix from provided tensors. +// +// Args: +// *tensors: One or more tensors with 0, 1, or 2 dimensions. +// +// Returns: +// Tensor: A 2 dimensional tensor with all the input tensors arranged in +// order such that their upper left and lower right corners are +// diagonally adjacent. All other elements are set to 0. +// +// Example:: +// +// >>> import torch +// >>> A = torch.tensor([[0, 1], [1, 0]]) +// >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]]) +// >>> C = torch.tensor(7) +// >>> D = torch.tensor([1, 2, 3]) +// >>> E = torch.tensor([[4], [5], [6]]) +// >>> torch.block_diag(A, B, C, D, E) +// tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], +// [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], +// [0, 0, 3, 4, 5, 0, 0, 0, 0, 0], +// [0, 0, 6, 7, 8, 0, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 7, 0, 0, 0, 0], +// [0, 0, 0, 0, 0, 0, 1, 2, 3, 0], +// [0, 0, 0, 0, 0, 0, 0, 0, 0, 4], +// [0, 0, 0, 0, 0, 0, 0, 0, 0, 5], +// [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]]) +// +// +//go:linkname BlockDiag py.block_diag +func BlockDiag(__llgo_va_list ...interface{}) *py.Object +// +// bmm(input, mat2, *, out=None) -> Tensor +// +// Performs a batch matrix-matrix product of matrices stored in :attr:`input` +// and :attr:`mat2`. +// +// :attr:`input` and :attr:`mat2` must be 3-D tensors each containing +// the same number of matrices. +// +// If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a +// :math:`(b \times m \times p)` tensor, :attr:`out` will be a +// :math:`(b \times n \times p)` tensor. +// +// .. math:: +// \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// .. note:: This function does not :ref:`broadcast `. +// For broadcasting matrix products, see :func:`torch.matmul`. +// +// Args: +// input (Tensor): the first batch of matrices to be multiplied +// mat2 (Tensor): the second batch of matrices to be multiplied +// +// Keyword Args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> input = torch.randn(10, 3, 4) +// >>> mat2 = torch.randn(10, 4, 5) +// >>> res = torch.bmm(input, mat2) +// >>> res.size() +// torch.Size([10, 3, 5]) +// +// +//go:linkname Bmm py.bmm +func Bmm(input *py.Object, mat2 *py.Object) *py.Object +// broadcast_tensors(*tensors) -> List of Tensors +// +// Broadcasts the given tensors according to :ref:`broadcasting-semantics`. +// +// Args: +// *tensors: any number of tensors of the same type +// +// .. warning:: +// +// More than one element of a broadcasted tensor may refer to a single +// memory location. As a result, in-place operations (especially ones that +// are vectorized) may result in incorrect behavior. If you need to write +// to the tensors, please clone them first. +// +// Example:: +// +// >>> x = torch.arange(3).view(1, 3) +// >>> y = torch.arange(2).view(2, 1) +// >>> a, b = torch.broadcast_tensors(x, y) +// >>> a.size() +// torch.Size([2, 3]) +// >>> a +// tensor([[0, 1, 2], +// [0, 1, 2]]) +// +// +//go:linkname BroadcastTensors py.broadcast_tensors +func BroadcastTensors(__llgo_va_list ...interface{}) *py.Object +// +// broadcast_to(input, shape) -> Tensor +// +// Broadcasts :attr:`input` to the shape :attr:`\shape`. +// Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details. +// +// Args: +// input (Tensor): the input tensor. +// shape (list, tuple, or :class:`torch.Size`): the new shape. +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3]) +// >>> torch.broadcast_to(x, (3, 3)) +// tensor([[1, 2, 3], +// [1, 2, 3], +// [1, 2, 3]]) +// +// +//go:linkname BroadcastTo py.broadcast_to +func BroadcastTo(input *py.Object, shape *py.Object) *py.Object +// +// bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor +// +// Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the +// boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size +// as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that +// this behavior is opposite the behavior of +// `numpy.digitize `_. +// More formally, the returned index satisfies the following rules: +// +// .. list-table:: +// :widths: 15 85 +// :header-rows: 1 +// +// * - :attr:`right` +// - *returned index satisfies* +// * - False +// - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` +// * - True +// - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` +// +// Args: +// input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). +// boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. +// +// Keyword args: +// out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. +// Default value is False, i.e. default output data type is torch.int64. +// right (bool, optional): if False, return the first suitable location that is found. If True, return the +// last such index. If no suitable index found, return 0 for non-numerical value +// (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). +// In other words, if False, gets the lower bound index for each value in :attr:`input` +// from :attr:`boundaries`. If True, gets the upper bound index instead. +// Default value is False. +// out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. +// +// +// Example:: +// +// >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) +// >>> boundaries +// tensor([1, 3, 5, 7, 9]) +// >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) +// >>> v +// tensor([[3, 6, 9], +// [3, 6, 9]]) +// >>> torch.bucketize(v, boundaries) +// tensor([[1, 3, 4], +// [1, 3, 4]]) +// >>> torch.bucketize(v, boundaries, right=True) +// tensor([[2, 3, 5], +// [2, 3, 5]]) +// +// +//go:linkname Bucketize py.bucketize +func Bucketize(input *py.Object, boundaries *py.Object) *py.Object +// +// can_cast(from, to) -> bool +// +// Determines if a type conversion is allowed under PyTorch casting rules +// described in the type promotion :ref:`documentation `. +// +// Args: +// from (dtype): The original :class:`torch.dtype`. +// to (dtype): The target :class:`torch.dtype`. +// +// Example:: +// +// >>> torch.can_cast(torch.double, torch.float) +// True +// >>> torch.can_cast(torch.float, torch.int) +// False +// +// +//go:linkname CanCast py.can_cast +func CanCast(from *py.Object, to *py.Object) *py.Object +// Do cartesian product of the given sequence of tensors. The behavior is similar to +// python's `itertools.product`. +// +// Args: +// *tensors: any number of 1 dimensional tensors. +// +// Returns: +// Tensor: A tensor equivalent to converting all the input tensors into lists, +// do `itertools.product` on these lists, and finally convert the resulting list +// into tensor. +// +// Example:: +// +// >>> import itertools +// >>> a = [1, 2, 3] +// >>> b = [4, 5] +// >>> list(itertools.product(a, b)) +// [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] +// >>> tensor_a = torch.tensor(a) +// >>> tensor_b = torch.tensor(b) +// >>> torch.cartesian_prod(tensor_a, tensor_b) +// tensor([[1, 4], +// [1, 5], +// [2, 4], +// [2, 5], +// [3, 4], +// [3, 5]]) +// +// +//go:linkname CartesianProd py.cartesian_prod +func CartesianProd(__llgo_va_list ...interface{}) *py.Object +// +// cat(tensors, dim=0, *, out=None) -> Tensor +// +// Concatenates the given sequence of :attr:`seq` tensors in the given dimension. +// All tensors must either have the same shape (except in the concatenating +// dimension) or be empty. +// +// :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` +// and :func:`torch.chunk`. +// +// :func:`torch.cat` can be best understood via examples. +// +// .. seealso:: +// +// :func:`torch.stack` concatenates the given sequence along a new dimension. +// +// Args: +// tensors (sequence of Tensors): any python sequence of tensors of the same type. +// Non-empty tensors provided must have the same shape, except in the +// cat dimension. +// dim (int, optional): the dimension over which the tensors are concatenated +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.randn(2, 3) +// >>> x +// tensor([[ 0.6580, -1.0969, -0.4614], +// [-0.1034, -0.5790, 0.1497]]) +// >>> torch.cat((x, x, x), 0) +// tensor([[ 0.6580, -1.0969, -0.4614], +// [-0.1034, -0.5790, 0.1497], +// [ 0.6580, -1.0969, -0.4614], +// [-0.1034, -0.5790, 0.1497], +// [ 0.6580, -1.0969, -0.4614], +// [-0.1034, -0.5790, 0.1497]]) +// >>> torch.cat((x, x, x), 1) +// tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, +// -1.0969, -0.4614], +// [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, +// -0.5790, 0.1497]]) +// +// +//go:linkname Cat py.cat +func Cat(tensors *py.Object, dim *py.Object) *py.Object +// None +// +//go:linkname CcolIndicesCopy py.ccol_indices_copy +func CcolIndicesCopy(__llgo_va_list ...interface{}) *py.Object +// Computes batched the p-norm distance between each pair of the two collections of row vectors. +// +// Args: +// x1 (Tensor): input tensor of shape :math:`B \times P \times M`. +// x2 (Tensor): input tensor of shape :math:`B \times R \times M`. +// p: p value for the p-norm distance to calculate between each vector pair +// :math:`\in [0, \infty]`. +// compute_mode: +// 'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate +// euclidean distance (p = 2) if P > 25 or R > 25 +// 'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate +// euclidean distance (p = 2) +// 'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate +// euclidean distance (p = 2) +// Default: use_mm_for_euclid_dist_if_necessary. +// +// If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the +// output will have shape :math:`B \times P \times R`. +// +// This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)` +// if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to +// `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest +// scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`. +// +// Example: +// +// >>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]]) +// >>> a +// tensor([[ 0.9041, 0.0196], +// [-0.3108, -2.4423], +// [-0.4821, 1.0590]]) +// >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]]) +// >>> b +// tensor([[-2.1763, -0.4713], +// [-0.6986, 1.3702]]) +// >>> torch.cdist(a, b, p=2) +// tensor([[3.1193, 2.0959], +// [2.7138, 3.8322], +// [2.2830, 0.3791]]) +// +// +//go:linkname Cdist py.cdist +func Cdist(x1 *py.Object, x2 *py.Object, p *py.Object, computeMode *py.Object) *py.Object +// +// ceil(input, *, out=None) -> Tensor +// +// Returns a new tensor with the ceil of the elements of :attr:`input`, +// the smallest integer greater than or equal to each element. +// +// For integer inputs, follows the array-api convention of returning a +// copy of the input tensor. +// +// .. math:: +// \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.6341, -1.4208, -1.0900, 0.5826]) +// >>> torch.ceil(a) +// tensor([-0., -1., -1., 1.]) +// +// +//go:linkname Ceil py.ceil +func Ceil(input *py.Object) *py.Object +// None +// +//go:linkname Ceil_ py.ceil_ +func Ceil_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Celu py.celu +func Celu(__llgo_va_list ...interface{}) *py.Object +// +// celu_(input, alpha=1.) -> Tensor +// +// In-place version of :func:`~celu`. +// +// +//go:linkname Celu_ py.celu_ +func Celu_(input *py.Object, alpha *py.Object) *py.Object +// Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed +// using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms +// of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N` +// needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. +// If :math:`N` is 1, then this is a no-op - the original matrix is returned as is. +// +// .. warning:: +// +// :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release. +// Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors +// rather than multiple arguments. +// +// Args: +// matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined. +// out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``. +// +// Returns: +// Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product +// would be of dimensions :math:`p_{1} \times p_{N + 1}`. +// +// Example:: +// +// >>> # xdoctest: +SKIP +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> a = torch.randn(3, 4) +// >>> b = torch.randn(4, 5) +// >>> c = torch.randn(5, 6) +// >>> d = torch.randn(6, 7) +// >>> # will raise a deprecation warning +// >>> torch.chain_matmul(a, b, c, d) +// tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614], +// [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163], +// [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]]) +// +// .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition +// +// +//go:linkname ChainMatmul py.chain_matmul +func ChainMatmul(__llgo_va_list ...interface{}) *py.Object +// +// channel_shuffle(input, groups) -> Tensor +// +// Divide the channels in a tensor of shape :math:`(*, C , H, W)` +// into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`, +// while keeping the original tensor shape. +// +// See :class:`~torch.nn.ChannelShuffle` for details. +// +// Args: +// input (Tensor): the input tensor +// groups (int): number of groups to divide channels in and rearrange. +// +// Examples:: +// +// >>> input = torch.randn(1, 4, 2, 2) +// >>> print(input) +// [[[[1, 2], +// [3, 4]], +// [[5, 6], +// [7, 8]], +// [[9, 10], +// [11, 12]], +// [[13, 14], +// [15, 16]], +// ]] +// >>> output = torch.nn.functional.channel_shuffle(input, 2) +// >>> print(output) +// [[[[1, 2], +// [3, 4]], +// [[9, 10], +// [11, 12]], +// [[5, 6], +// [7, 8]], +// [[13, 14], +// [15, 16]], +// ]] +// +// +//go:linkname ChannelShuffle py.channel_shuffle +func ChannelShuffle(input *py.Object, groups *py.Object) *py.Object +// +// cholesky(input, upper=False, *, out=None) -> Tensor +// +// Computes the Cholesky decomposition of a symmetric positive-definite +// matrix :math:`A` or for batches of symmetric positive-definite matrices. +// +// If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and +// the decomposition has the form: +// +// .. math:: +// +// A = U^TU +// +// If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and +// the decomposition has the form: +// +// .. math:: +// +// A = LL^T +// +// If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite +// matrices, then the returned tensor will be composed of upper-triangular Cholesky factors +// of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned +// tensor will be composed of lower-triangular Cholesky factors of each of the individual +// matrices. +// +// .. warning:: +// +// :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky` +// and will be removed in a future PyTorch release. +// +// ``L = torch.cholesky(A)`` should be replaced with +// +// .. code:: python +// +// L = torch.linalg.cholesky(A) +// +// ``U = torch.cholesky(A, upper=True)`` should be replaced with +// +// .. code:: python +// +// U = torch.linalg.cholesky(A).mH +// +// This transform will produce equivalent results for all valid (symmetric positive definite) inputs. +// +// Args: +// input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more +// batch dimensions consisting of symmetric positive-definite matrices. +// upper (bool, optional): flag that indicates whether to return a +// upper or lower triangular matrix. Default: ``False`` +// +// Keyword args: +// out (Tensor, optional): the output matrix +// +// Example:: +// +// >>> a = torch.randn(3, 3) +// >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite +// >>> l = torch.cholesky(a) +// >>> a +// tensor([[ 2.4112, -0.7486, 1.4551], +// [-0.7486, 1.3544, 0.1294], +// [ 1.4551, 0.1294, 1.6724]]) +// >>> l +// tensor([[ 1.5528, 0.0000, 0.0000], +// [-0.4821, 1.0592, 0.0000], +// [ 0.9371, 0.5487, 0.7023]]) +// >>> l @ l.mT +// tensor([[ 2.4112, -0.7486, 1.4551], +// [-0.7486, 1.3544, 0.1294], +// [ 1.4551, 0.1294, 1.6724]]) +// >>> a = torch.randn(3, 2, 2) # Example for batched input +// >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite +// >>> l = torch.cholesky(a) +// >>> z = l @ l.mT +// >>> torch.dist(z, a) +// tensor(2.3842e-07) +// +// +//go:linkname Cholesky py.cholesky +func Cholesky(input *py.Object, upper *py.Object) *py.Object +// +// cholesky_inverse(L, upper=False, *, out=None) -> Tensor +// +// Computes the inverse of a complex Hermitian or real symmetric +// positive-definite matrix given its Cholesky decomposition. +// +// Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +// and :math:`L` its Cholesky decomposition such that: +// +// .. math:: +// +// A = LL^{\text{H}} +// +// where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +// and the transpose when :math:`L` is real-valued. +// +// Computes the inverse matrix :math:`A^{-1}`. +// +// Supports input of float, double, cfloat and cdouble dtypes. +// Also supports batches of matrices, and if :math:`A` is a batch of matrices +// then the output has the same batch dimensions. +// +// Args: +// L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions +// consisting of lower or upper triangular Cholesky decompositions of +// symmetric or Hermitian positive-definite matrices. +// upper (bool, optional): flag that indicates whether :math:`L` is lower triangular +// or upper triangular. Default: ``False`` +// +// Keyword args: +// out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. +// +// Example:: +// +// >>> A = torch.randn(3, 3) +// >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix +// >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition +// >>> torch.cholesky_inverse(L) +// tensor([[ 1.9314, 1.2251, -0.0889], +// [ 1.2251, 2.4439, 0.2122], +// [-0.0889, 0.2122, 0.1412]]) +// >>> A.inverse() +// tensor([[ 1.9314, 1.2251, -0.0889], +// [ 1.2251, 2.4439, 0.2122], +// [-0.0889, 0.2122, 0.1412]]) +// +// >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) +// >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices +// >>> L = torch.linalg.cholesky(A) +// >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L)) +// tensor(5.6358e-7) +// +// +//go:linkname CholeskyInverse py.cholesky_inverse +func CholeskyInverse(L *py.Object, upper *py.Object) *py.Object +// +// cholesky_solve(B, L, upper=False, *, out=None) -> Tensor +// +// Computes the solution of a system of linear equations with complex Hermitian +// or real symmetric positive-definite lhs given its Cholesky decomposition. +// +// Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +// and :math:`L` its Cholesky decomposition such that: +// +// .. math:: +// +// A = LL^{\text{H}} +// +// where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +// and the transpose when :math:`L` is real-valued. +// +// Returns the solution :math:`X` of the following linear system: +// +// .. math:: +// +// AX = B +// +// Supports inputs of float, double, cfloat and cdouble dtypes. +// Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices +// then the output has the same batch dimensions. +// +// Args: +// B (Tensor): right-hand side tensor of shape `(*, n, k)` +// where :math:`*` is zero or more batch dimensions +// L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions +// consisting of lower or upper triangular Cholesky decompositions of +// symmetric or Hermitian positive-definite matrices. +// upper (bool, optional): flag that indicates whether :math:`L` is lower triangular +// or upper triangular. Default: ``False``. +// +// Keyword args: +// out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. +// +// Example:: +// +// >>> A = torch.randn(3, 3) +// >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix +// >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition +// >>> B = torch.randn(3, 2) +// >>> torch.cholesky_solve(B, L) +// tensor([[ -8.1625, 19.6097], +// [ -5.8398, 14.2387], +// [ -4.3771, 10.4173]]) +// >>> A.inverse() @ B +// tensor([[ -8.1626, 19.6097], +// [ -5.8398, 14.2387], +// [ -4.3771, 10.4173]]) +// +// >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) +// >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices +// >>> L = torch.linalg.cholesky(A) +// >>> B = torch.randn(2, 1, dtype=torch.complex64) +// >>> X = torch.cholesky_solve(B, L) +// >>> torch.dist(X, A.inverse() @ B) +// tensor(1.6881e-5) +// +// +//go:linkname CholeskySolve py.cholesky_solve +func CholeskySolve(B *py.Object, L *py.Object, upper *py.Object) *py.Object +// None +// +//go:linkname ChooseQparamsOptimized py.choose_qparams_optimized +func ChooseQparamsOptimized(__llgo_va_list ...interface{}) *py.Object +// +// chunk(input, chunks, dim=0) -> List of Tensors +// +// Attempts to split a tensor into the specified number of chunks. Each chunk is a view of +// the input tensor. +// +// +// .. note:: +// +// This function may return fewer than the specified number of chunks! +// +// .. seealso:: +// +// :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks +// +// If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`, +// all returned chunks will be the same size. +// If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`, +// all returned chunks will be the same size, except the last one. +// If such division is not possible, this function may return fewer +// than the specified number of chunks. +// +// Arguments: +// input (Tensor): the tensor to split +// chunks (int): number of chunks to return +// dim (int): dimension along which to split the tensor +// +// Example: +// >>> torch.arange(11).chunk(6) +// (tensor([0, 1]), +// tensor([2, 3]), +// tensor([4, 5]), +// tensor([6, 7]), +// tensor([8, 9]), +// tensor([10])) +// >>> torch.arange(12).chunk(6) +// (tensor([0, 1]), +// tensor([2, 3]), +// tensor([4, 5]), +// tensor([6, 7]), +// tensor([8, 9]), +// tensor([10, 11])) +// >>> torch.arange(13).chunk(6) +// (tensor([0, 1, 2]), +// tensor([3, 4, 5]), +// tensor([6, 7, 8]), +// tensor([ 9, 10, 11]), +// tensor([12])) +// +// +//go:linkname Chunk py.chunk +func Chunk(input *py.Object, chunks *py.Object, dim *py.Object) *py.Object +// +// clamp(input, min=None, max=None, *, out=None) -> Tensor +// +// Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. +// Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: +// +// .. math:: +// y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) +// +// If :attr:`min` is ``None``, there is no lower bound. +// Or, if :attr:`max` is ``None`` there is no upper bound. +// +// +// .. note:: +// If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` +// sets all elements in :attr:`input` to the value of :attr:`max`. +// +// Args: +// input (Tensor): the input tensor. +// min (Number or Tensor, optional): lower-bound of the range to be clamped to +// max (Number or Tensor, optional): upper-bound of the range to be clamped to +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-1.7120, 0.1734, -0.0478, -0.0922]) +// >>> torch.clamp(a, min=-0.5, max=0.5) +// tensor([-0.5000, 0.1734, -0.0478, -0.0922]) +// +// >>> min = torch.linspace(-1, 1, steps=4) +// >>> torch.clamp(a, min=min) +// tensor([-1.0000, 0.1734, 0.3333, 1.0000]) +// +// +// +//go:linkname Clamp py.clamp +func Clamp(input *py.Object, min *py.Object, max *py.Object) *py.Object +// None +// +//go:linkname Clamp_ py.clamp_ +func Clamp_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ClampMax py.clamp_max +func ClampMax(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ClampMax_ py.clamp_max_ +func ClampMax_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ClampMin py.clamp_min +func ClampMin(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ClampMin_ py.clamp_min_ +func ClampMin_(__llgo_va_list ...interface{}) *py.Object +// +// clip(input, min=None, max=None, *, out=None) -> Tensor +// +// Alias for :func:`torch.clamp`. +// +// +//go:linkname Clip py.clip +func Clip(input *py.Object, min *py.Object, max *py.Object) *py.Object +// None +// +//go:linkname Clip_ py.clip_ +func Clip_(__llgo_va_list ...interface{}) *py.Object +// +// clone(input, *, memory_format=torch.preserve_format) -> Tensor +// +// Returns a copy of :attr:`input`. +// +// .. note:: +// +// This function is differentiable, so gradients will flow back from the +// result of this operation to :attr:`input`. To create a tensor without an +// autograd relationship to :attr:`input` see :meth:`~Tensor.detach`. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned tensor. Default: ``torch.preserve_format``. +// +// +//go:linkname Clone py.clone +func Clone(input *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.col_indices`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ColIndicesCopy py.col_indices_copy +func ColIndicesCopy(__llgo_va_list ...interface{}) *py.Object +// +// column_stack(tensors, *, out=None) -> Tensor +// +// Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`. +// +// Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t`` +// in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally. +// +// Args: +// tensors (sequence of Tensors): sequence of tensors to concatenate +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, 2, 3]) +// >>> b = torch.tensor([4, 5, 6]) +// >>> torch.column_stack((a, b)) +// tensor([[1, 4], +// [2, 5], +// [3, 6]]) +// >>> a = torch.arange(5) +// >>> b = torch.arange(10).reshape(5, 2) +// >>> torch.column_stack((a, b, b)) +// tensor([[0, 0, 1, 0, 1], +// [1, 2, 3, 2, 3], +// [2, 4, 5, 4, 5], +// [3, 6, 7, 6, 7], +// [4, 8, 9, 8, 9]]) +// +// +// +//go:linkname ColumnStack py.column_stack +func ColumnStack(tensors *py.Object) *py.Object +// +// combinations(input, r=2, with_replacement=False) -> seq +// +// Compute combinations of length :math:`r` of the given tensor. The behavior is similar to +// python's `itertools.combinations` when `with_replacement` is set to `False`, and +// `itertools.combinations_with_replacement` when `with_replacement` is set to `True`. +// +// Arguments: +// input (Tensor): 1D vector. +// r (int, optional): number of elements to combine +// with_replacement (bool, optional): whether to allow duplication in combination +// +// Returns: +// Tensor: A tensor equivalent to converting all the input tensors into lists, do +// `itertools.combinations` or `itertools.combinations_with_replacement` on these +// lists, and finally convert the resulting list into tensor. +// +// Example:: +// +// >>> a = [1, 2, 3] +// >>> list(itertools.combinations(a, r=2)) +// [(1, 2), (1, 3), (2, 3)] +// >>> list(itertools.combinations(a, r=3)) +// [(1, 2, 3)] +// >>> list(itertools.combinations_with_replacement(a, r=2)) +// [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] +// >>> tensor_a = torch.tensor(a) +// >>> torch.combinations(tensor_a) +// tensor([[1, 2], +// [1, 3], +// [2, 3]]) +// >>> torch.combinations(tensor_a, r=3) +// tensor([[1, 2, 3]]) +// >>> torch.combinations(tensor_a, with_replacement=True) +// tensor([[1, 1], +// [1, 2], +// [1, 3], +// [2, 2], +// [2, 3], +// [3, 3]]) +// +// +// +//go:linkname Combinations py.combinations +func Combinations(input *py.Object, r *py.Object, withReplacement *py.Object) *py.Object +// +// complex(real, imag, *, out=None) -> Tensor +// +// Constructs a complex tensor with its real part equal to :attr:`real` and its +// imaginary part equal to :attr:`imag`. +// +// Args: +// real (Tensor): The real part of the complex tensor. Must be half, float or double. +// imag (Tensor): The imaginary part of the complex tensor. Must be same dtype +// as :attr:`real`. +// +// Keyword args: +// out (Tensor): If the inputs are ``torch.float32``, must be +// ``torch.complex64``. If the inputs are ``torch.float64``, must be +// ``torch.complex128``. +// +// Example:: +// +// >>> real = torch.tensor([1, 2], dtype=torch.float32) +// >>> imag = torch.tensor([3, 4], dtype=torch.float32) +// >>> z = torch.complex(real, imag) +// >>> z +// tensor([(1.+3.j), (2.+4.j)]) +// >>> z.dtype +// torch.complex64 +// +// +// +//go:linkname Complex py.complex +func Complex(real *py.Object, imag *py.Object) *py.Object +// +// concat(tensors, dim=0, *, out=None) -> Tensor +// +// Alias of :func:`torch.cat`. +// +// +//go:linkname Concat py.concat +func Concat(tensors *py.Object, dim *py.Object) *py.Object +// +// concatenate(tensors, axis=0, out=None) -> Tensor +// +// Alias of :func:`torch.cat`. +// +// +//go:linkname Concatenate py.concatenate +func Concatenate(tensors *py.Object, axis *py.Object, out *py.Object) *py.Object +// +// conj(input) -> Tensor +// +// Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype, +// this function just returns :attr:`input`. +// +// .. note:: +// :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized +// at any time using :func:`torch.resolve_conj`. +// +// .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of +// non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` +// when :attr:`input` is of non-complex dtype to be compatible with this change. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) +// >>> x.is_conj() +// False +// >>> y = torch.conj(x) +// >>> y.is_conj() +// True +// +// +//go:linkname Conj py.conj +func Conj(input *py.Object) *py.Object +// +// conj_physical(input, *, out=None) -> Tensor +// +// Computes the element-wise conjugate of the given :attr:`input` tensor. +// If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`. +// +// .. note:: +// This performs the conjugate operation regardless of the fact conjugate bit is set or not. +// +// .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of +// non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` +// when :attr:`input` is of non-complex dtype to be compatible with this change. +// +// .. math:: +// \text{out}_{i} = conj(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) +// tensor([-1 - 1j, -2 - 2j, 3 + 3j]) +// +// +//go:linkname ConjPhysical py.conj_physical +func ConjPhysical(input *py.Object) *py.Object +// None +// +//go:linkname ConjPhysical_ py.conj_physical_ +func ConjPhysical_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ConstantPadNd py.constant_pad_nd +func ConstantPadNd(__llgo_va_list ...interface{}) *py.Object +// +// conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor +// +// Applies a 1D convolution over an input signal composed of several input +// planes. +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.Conv1d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// Note: +// This operator supports complex data types i.e. ``complex32, complex64, complex128``. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` +// weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)` +// bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None`` +// stride: the stride of the convolving kernel. Can be a single number or +// a one-element tuple `(sW,)`. Default: 1 +// padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, +// single number or a one-element tuple `(padW,)`. Default: 0 +// ``padding='valid'`` is the same as no padding. ``padding='same'`` pads +// the input so the output has the same shape as the input. However, this mode +// doesn't support any stride values other than 1. +// +// .. warning:: +// For ``padding='same'``, if the ``weight`` is even-length and +// ``dilation`` is odd in any dimension, a full :func:`pad` operation +// may be needed internally. Lowering performance. +// dilation: the spacing between kernel elements. Can be a single number or +// a one-element tuple `(dW,)`. Default: 1 +// groups: split input into groups, :math:`\text{in\_channels}` should be divisible by +// the number of groups. Default: 1 +// +// Examples:: +// +// >>> inputs = torch.randn(33, 16, 30) +// >>> filters = torch.randn(20, 16, 5) +// >>> F.conv1d(inputs, filters) +// +// +//go:linkname Conv1d py.conv1d +func Conv1d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, groups *py.Object) *py.Object +// +// conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor +// +// Applies a 2D convolution over an input image composed of several input +// planes. +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.Conv2d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// Note: +// This operator supports complex data types i.e. ``complex32, complex64, complex128``. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` +// weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)` +// bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None`` +// stride: the stride of the convolving kernel. Can be a single number or a +// tuple `(sH, sW)`. Default: 1 +// padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, +// single number or a tuple `(padH, padW)`. Default: 0 +// ``padding='valid'`` is the same as no padding. ``padding='same'`` pads +// the input so the output has the same shape as the input. However, this mode +// doesn't support any stride values other than 1. +// +// .. warning:: +// For ``padding='same'``, if the ``weight`` is even-length and +// ``dilation`` is odd in any dimension, a full :func:`pad` operation +// may be needed internally. Lowering performance. +// +// dilation: the spacing between kernel elements. Can be a single number or +// a tuple `(dH, dW)`. Default: 1 +// groups: split input into groups, both :math:`\text{in\_channels}` and :math:`\text{out\_channels}` +// should be divisible by the number of groups. Default: 1 +// +// Examples:: +// +// >>> # With square kernels and equal stride +// >>> filters = torch.randn(8, 4, 3, 3) +// >>> inputs = torch.randn(1, 4, 5, 5) +// >>> F.conv2d(inputs, filters, padding=1) +// +// +//go:linkname Conv2d py.conv2d +func Conv2d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, groups *py.Object) *py.Object +// +// conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor +// +// Applies a 3D convolution over an input image composed of several input +// planes. +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.Conv3d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// Note: +// This operator supports complex data types i.e. ``complex32, complex64, complex128``. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)` +// weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)` +// bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None +// stride: the stride of the convolving kernel. Can be a single number or a +// tuple `(sT, sH, sW)`. Default: 1 +// padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, +// single number or a tuple `(padT, padH, padW)`. Default: 0 +// ``padding='valid'`` is the same as no padding. ``padding='same'`` pads +// the input so the output has the same shape as the input. However, this mode +// doesn't support any stride values other than 1. +// +// .. warning:: +// For ``padding='same'``, if the ``weight`` is even-length and +// ``dilation`` is odd in any dimension, a full :func:`pad` operation +// may be needed internally. Lowering performance. +// +// dilation: the spacing between kernel elements. Can be a single number or +// a tuple `(dT, dH, dW)`. Default: 1 +// groups: split input into groups, :math:`\text{in\_channels}` should be divisible by +// the number of groups. Default: 1 +// +// Examples:: +// +// >>> filters = torch.randn(33, 16, 3, 3, 3) +// >>> inputs = torch.randn(20, 16, 50, 10, 20) +// >>> F.conv3d(inputs, filters) +// +// +//go:linkname Conv3d py.conv3d +func Conv3d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, groups *py.Object) *py.Object +// +// Applies a 1-dimensional sequence convolution over an input sequence. +// Input and output dimensions are (Time, Batch, Channels) - hence TBC. +// +// Args: +// input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})` +// weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`) +// bias: bias of shape (:math:`\text{out\_channels}`) +// pad: number of timesteps to pad. Default: 0 +// +// +//go:linkname ConvTbc py.conv_tbc +func ConvTbc(__llgo_va_list ...interface{}) *py.Object +// +// conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor +// +// Applies a 1D transposed convolution operator over an input signal +// composed of several input planes, sometimes also called "deconvolution". +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.ConvTranspose1d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` +// weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)` +// bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None +// stride: the stride of the convolving kernel. Can be a single number or a +// tuple ``(sW,)``. Default: 1 +// padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both +// sides of each dimension in the input. Can be a single number or a tuple +// ``(padW,)``. Default: 0 +// output_padding: additional size added to one side of each dimension in the +// output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0 +// groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the +// number of groups. Default: 1 +// dilation: the spacing between kernel elements. Can be a single number or +// a tuple ``(dW,)``. Default: 1 +// +// Examples:: +// +// >>> inputs = torch.randn(20, 16, 50) +// >>> weights = torch.randn(16, 33, 5) +// >>> F.conv_transpose1d(inputs, weights) +// +// +//go:linkname ConvTranspose1d py.conv_transpose1d +func ConvTranspose1d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, outputPadding *py.Object, groups *py.Object, dilation *py.Object) *py.Object +// +// conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor +// +// Applies a 2D transposed convolution operator over an input image +// composed of several input planes, sometimes also called "deconvolution". +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.ConvTranspose2d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` +// weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)` +// bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None +// stride: the stride of the convolving kernel. Can be a single number or a +// tuple ``(sH, sW)``. Default: 1 +// padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both +// sides of each dimension in the input. Can be a single number or a tuple +// ``(padH, padW)``. Default: 0 +// output_padding: additional size added to one side of each dimension in the +// output shape. Can be a single number or a tuple ``(out_padH, out_padW)``. +// Default: 0 +// groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the +// number of groups. Default: 1 +// dilation: the spacing between kernel elements. Can be a single number or +// a tuple ``(dH, dW)``. Default: 1 +// +// Examples:: +// +// >>> # With square kernels and equal stride +// >>> inputs = torch.randn(1, 4, 5, 5) +// >>> weights = torch.randn(4, 8, 3, 3) +// >>> F.conv_transpose2d(inputs, weights, padding=1) +// +// +//go:linkname ConvTranspose2d py.conv_transpose2d +func ConvTranspose2d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, outputPadding *py.Object, groups *py.Object, dilation *py.Object) *py.Object +// +// conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor +// +// Applies a 3D transposed convolution operator over an input image +// composed of several input planes, sometimes also called "deconvolution" +// +// This operator supports :ref:`TensorFloat32`. +// +// See :class:`~torch.nn.ConvTranspose3d` for details and output shape. +// +// Note: +// In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information. +// +// +// Args: +// input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)` +// weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)` +// bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None +// stride: the stride of the convolving kernel. Can be a single number or a +// tuple ``(sT, sH, sW)``. Default: 1 +// padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both +// sides of each dimension in the input. Can be a single number or a tuple +// ``(padT, padH, padW)``. Default: 0 +// output_padding: additional size added to one side of each dimension in the +// output shape. Can be a single number or a tuple +// ``(out_padT, out_padH, out_padW)``. Default: 0 +// groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the +// number of groups. Default: 1 +// dilation: the spacing between kernel elements. Can be a single number or +// a tuple `(dT, dH, dW)`. Default: 1 +// +// Examples:: +// +// >>> inputs = torch.randn(20, 16, 50, 10, 20) +// >>> weights = torch.randn(16, 33, 3, 3, 3) +// >>> F.conv_transpose3d(inputs, weights) +// +// +//go:linkname ConvTranspose3d py.conv_transpose3d +func ConvTranspose3d(input *py.Object, weight *py.Object, bias *py.Object, stride *py.Object, padding *py.Object, outputPadding *py.Object, groups *py.Object, dilation *py.Object) *py.Object +// None +// +//go:linkname Convolution py.convolution +func Convolution(__llgo_va_list ...interface{}) *py.Object +// +// copysign(input, other, *, out=None) -> Tensor +// +// Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. +// +// .. math:: +// \text{out}_{i} = \begin{cases} +// -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ +// |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ +// \end{cases} +// +// +// Supports :ref:`broadcasting to a common shape `, +// and integer and float inputs. +// +// Args: +// input (Tensor): magnitudes. +// other (Tensor or Number): contains value(s) whose signbit(s) are +// applied to the magnitudes in :attr:`input`. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(5) +// >>> a +// tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) +// >>> torch.copysign(a, 1) +// tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], +// [-0.0059, -0.2600, -0.4475, -1.3948], +// [ 0.3667, -0.9567, -2.5757, -0.1751], +// [ 0.2046, -0.0742, 0.2998, -0.1054]]) +// >>> b = torch.randn(4) +// tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) +// >>> torch.copysign(a, b) +// tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], +// [ 0.0059, 0.2600, 0.4475, -1.3948], +// [ 0.3667, 0.9567, 2.5757, -0.1751], +// [ 0.2046, 0.0742, 0.2998, -0.1054]]) +// >>> a = torch.tensor([1.]) +// >>> b = torch.tensor([-0.]) +// >>> torch.copysign(a, b) +// tensor([-1.]) +// +// .. note:: +// copysign handles signed zeros. If the other argument has a negative zero (-0), +// the corresponding output value will be negative. +// +// +// +//go:linkname Copysign py.copysign +func Copysign(input *py.Object, other *py.Object) *py.Object +// +// corrcoef(input) -> Tensor +// +// Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix, +// where rows are the variables and columns are the observations. +// +// .. note:: +// +// The correlation coefficient matrix R is computed using the covariance matrix C as given by +// :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` +// +// .. note:: +// +// Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1. +// The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation. +// +// Args: +// input (Tensor): A 2D matrix containing multiple variables and observations, or a +// Scalar or 1D vector representing a single variable. +// +// Returns: +// (Tensor) The correlation coefficient matrix of the variables. +// +// .. seealso:: +// +// :func:`torch.cov` covariance matrix. +// +// Example:: +// +// >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]]) +// >>> torch.corrcoef(x) +// tensor([[ 1., -1.], +// [-1., 1.]]) +// >>> x = torch.randn(2, 4) +// >>> x +// tensor([[-0.2678, -0.0908, -0.3766, 0.2780], +// [-0.5812, 0.1535, 0.2387, 0.2350]]) +// >>> torch.corrcoef(x) +// tensor([[1.0000, 0.3582], +// [0.3582, 1.0000]]) +// >>> torch.corrcoef(x[0]) +// tensor(1.) +// +// +//go:linkname Corrcoef py.corrcoef +func Corrcoef(input *py.Object) *py.Object +// +// cos(input, *, out=None) -> Tensor +// +// Returns a new tensor with the cosine of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \cos(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) +// >>> torch.cos(a) +// tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) +// +// +//go:linkname Cos py.cos +func Cos(input *py.Object) *py.Object +// None +// +//go:linkname Cos_ py.cos_ +func Cos_(__llgo_va_list ...interface{}) *py.Object +// +// cosh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the hyperbolic cosine of the elements of +// :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \cosh(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) +// >>> torch.cosh(a) +// tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) +// +// .. note:: +// When :attr:`input` is on the CPU, the implementation of torch.cosh may use +// the Sleef library, which rounds very large results to infinity or negative +// infinity. See `here `_ for details. +// +// +//go:linkname Cosh py.cosh +func Cosh(input *py.Object) *py.Object +// None +// +//go:linkname Cosh_ py.cosh_ +func Cosh_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CosineEmbeddingLoss py.cosine_embedding_loss +func CosineEmbeddingLoss(__llgo_va_list ...interface{}) *py.Object +// +// cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor +// +// Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable +// to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is +// squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 fewer dimension. +// +// .. math :: +// \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2, \epsilon) \cdot \max(\Vert x_2 \Vert _2, \epsilon)} +// +// Supports :ref:`type promotion `. +// +// Args: +// x1 (Tensor): First input. +// x2 (Tensor): Second input. +// dim (int, optional): Dimension along which cosine similarity is computed. Default: 1 +// eps (float, optional): Small value to avoid division by zero. +// Default: 1e-8 +// +// Example:: +// +// >>> input1 = torch.randn(100, 128) +// >>> input2 = torch.randn(100, 128) +// >>> output = F.cosine_similarity(input1, input2) +// >>> print(output) +// +// +//go:linkname CosineSimilarity py.cosine_similarity +func CosineSimilarity(x1 *py.Object, x2 *py.Object, dim *py.Object, eps *py.Object) *py.Object +// +// count_nonzero(input, dim=None) -> Tensor +// +// Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. +// If no dim is specified then all non-zeros in the tensor are counted. +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. +// +// Example:: +// +// >>> x = torch.zeros(3,3) +// >>> x[torch.randn(3,3) > 0.5] = 1 +// >>> x +// tensor([[0., 1., 1.], +// [0., 0., 0.], +// [0., 0., 1.]]) +// >>> torch.count_nonzero(x) +// tensor(3) +// >>> torch.count_nonzero(x, dim=0) +// tensor([0, 1, 2]) +// +// +//go:linkname CountNonzero py.count_nonzero +func CountNonzero(input *py.Object, dim *py.Object) *py.Object +// +// cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor +// +// Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are +// the variables and columns are the observations. +// +// A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains +// the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents +// a single variable (Scalar or 1D) then its variance is returned. +// +// The sample covariance of the variables :math:`x` and :math:`y` is given by: +// +// .. math:: +// \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)} +// +// where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and +// :math:`\delta N` is the :attr:`correction`. +// +// If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance +// is calculated, which is given by: +// +// .. math:: +// \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)} +// {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)} +// +// where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is +// provided, or :math:`w = f \times a` if both are provided, and +// :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not +// provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size. +// +// Args: +// input (Tensor): A 2D matrix containing multiple variables and observations, or a +// Scalar or 1D vector representing a single variable. +// +// Keyword Args: +// correction (int, optional): difference between the sample size and sample degrees of freedom. +// Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate, +// even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0`` +// will return the simple average. Defaults to ``1``. +// fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of +// times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`. +// Must have integral dtype. Ignored if ``None``. Defaults to ``None``. +// aweights (tensor, optional): A Scalar or 1D array of observation vector weights. +// These relative weights are typically large for observations considered “important” and smaller for +// observations considered less “important”. Its numel must equal the number of columns of :attr:`input`. +// Must have floating point dtype. Ignored if ``None``. Defaults to ``None``. +// +// Returns: +// (Tensor) The covariance matrix of the variables. +// +// .. seealso:: +// +// :func:`torch.corrcoef` normalized covariance matrix. +// +// Example:: +// >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T +// >>> x +// tensor([[0, 1, 2], +// [2, 1, 0]]) +// >>> torch.cov(x) +// tensor([[ 1., -1.], +// [-1., 1.]]) +// >>> torch.cov(x, correction=0) +// tensor([[ 0.6667, -0.6667], +// [-0.6667, 0.6667]]) +// >>> fw = torch.randint(1, 10, (3,)) +// >>> fw +// tensor([1, 6, 9]) +// >>> aw = torch.rand(3) +// >>> aw +// tensor([0.4282, 0.0255, 0.4144]) +// >>> torch.cov(x, fweights=fw, aweights=aw) +// tensor([[ 0.4169, -0.4169], +// [-0.4169, 0.4169]]) +// +// +//go:linkname Cov py.cov +func Cov(input *py.Object) *py.Object +// +// cross(input, other, dim=None, *, out=None) -> Tensor +// +// +// Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input` +// and :attr:`other`. +// +// Supports input of float, double, cfloat and cdouble dtypes. Also supports batches +// of vectors, for which it computes the product along the dimension :attr:`dim`. +// In this case, the output has the same batch dimensions as the inputs. +// +// .. warning:: +// If :attr:`dim` is not given, it defaults to the first dimension found +// with the size 3. Note that this might be unexpected. +// +// This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross` +// in a future release. +// +// .. seealso:: +// :func:`torch.linalg.cross` which has dim=-1 as default. +// +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// dim (int, optional): the dimension to take the cross-product in. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4, 3) +// >>> a +// tensor([[-0.3956, 1.1455, 1.6895], +// [-0.5849, 1.3672, 0.3599], +// [-1.1626, 0.7180, -0.0521], +// [-0.1339, 0.9902, -2.0225]]) +// >>> b = torch.randn(4, 3) +// >>> b +// tensor([[-0.0257, -1.4725, -1.2251], +// [-1.1479, -0.7005, -1.9757], +// [-1.3904, 0.3726, -1.1836], +// [-0.9688, -0.7153, 0.2159]]) +// >>> torch.cross(a, b, dim=1) +// tensor([[ 1.0844, -0.5281, 0.6120], +// [-2.4490, -1.5687, 1.9792], +// [-0.8304, -1.3037, 0.5650], +// [-1.2329, 1.9883, 1.0551]]) +// >>> torch.cross(a, b) +// tensor([[ 1.0844, -0.5281, 0.6120], +// [-2.4490, -1.5687, 1.9792], +// [-0.8304, -1.3037, 0.5650], +// [-1.2329, 1.9883, 1.0551]]) +// +// +//go:linkname Cross py.cross +func Cross(input *py.Object, other *py.Object, dim *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.crow_indices`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname CrowIndicesCopy py.crow_indices_copy +func CrowIndicesCopy(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CtcLoss py.ctc_loss +func CtcLoss(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnAffineGridGenerator py.cudnn_affine_grid_generator +func CudnnAffineGridGenerator(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnBatchNorm py.cudnn_batch_norm +func CudnnBatchNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnConvolution py.cudnn_convolution +func CudnnConvolution(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnConvolutionAddRelu py.cudnn_convolution_add_relu +func CudnnConvolutionAddRelu(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnConvolutionRelu py.cudnn_convolution_relu +func CudnnConvolutionRelu(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnConvolutionTranspose py.cudnn_convolution_transpose +func CudnnConvolutionTranspose(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnGridSampler py.cudnn_grid_sampler +func CudnnGridSampler(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname CudnnIsAcceptable py.cudnn_is_acceptable +func CudnnIsAcceptable(__llgo_va_list ...interface{}) *py.Object +// +// cummax(input, dim, *, out=None) -> (Tensor, LongTensor) +// Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of +// elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +// location of each maximum value found in the dimension :attr:`dim`. +// +// .. math:: +// y_i = max(x_1, x_2, x_3, \dots, x_i) +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to do the operation over +// +// Keyword args: +// out (tuple, optional): the result tuple of two output tensors (values, indices) +// +// Example:: +// +// >>> a = torch.randn(10) +// >>> a +// tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, +// 1.9946, -0.8209]) +// >>> torch.cummax(a, dim=0) +// torch.return_types.cummax( +// values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, +// 1.9946, 1.9946]), +// indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) +// +// +//go:linkname Cummax py.cummax +func Cummax(input *py.Object, dim *py.Object) *py.Object +// +// cummin(input, dim, *, out=None) -> (Tensor, LongTensor) +// Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of +// elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +// location of each maximum value found in the dimension :attr:`dim`. +// +// .. math:: +// y_i = min(x_1, x_2, x_3, \dots, x_i) +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to do the operation over +// +// Keyword args: +// out (tuple, optional): the result tuple of two output tensors (values, indices) +// +// Example:: +// +// >>> a = torch.randn(10) +// >>> a +// tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, +// 0.9165, 1.6684]) +// >>> torch.cummin(a, dim=0) +// torch.return_types.cummin( +// values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, +// -1.3298, -1.3298]), +// indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) +// +// +//go:linkname Cummin py.cummin +func Cummin(input *py.Object, dim *py.Object) *py.Object +// +// cumprod(input, dim, *, dtype=None, out=None) -> Tensor +// +// Returns the cumulative product of elements of :attr:`input` in the dimension +// :attr:`dim`. +// +// For example, if :attr:`input` is a vector of size N, the result will also be +// a vector of size N, with elements. +// +// .. math:: +// y_i = x_1 \times x_2\times x_3\times \dots \times x_i +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to do the operation over +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(10) +// >>> a +// tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, +// -0.2129, -0.4206, 0.1968]) +// >>> torch.cumprod(a, dim=0) +// tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, +// 0.0014, -0.0006, -0.0001]) +// +// >>> a[5] = 0.0 +// >>> torch.cumprod(a, dim=0) +// tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, +// 0.0000, -0.0000, -0.0000]) +// +// +//go:linkname Cumprod py.cumprod +func Cumprod(input *py.Object, dim *py.Object) *py.Object +// +// cumsum(input, dim, *, dtype=None, out=None) -> Tensor +// +// Returns the cumulative sum of elements of :attr:`input` in the dimension +// :attr:`dim`. +// +// For example, if :attr:`input` is a vector of size N, the result will also be +// a vector of size N, with elements. +// +// .. math:: +// y_i = x_1 + x_2 + x_3 + \dots + x_i +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to do the operation over +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(10) +// >>> a +// tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595, +// 0.1850, -1.1571, -0.4243]) +// >>> torch.cumsum(a, dim=0) +// tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058, +// -1.8209, -2.9780, -3.4022]) +// +// +//go:linkname Cumsum py.cumsum +func Cumsum(input *py.Object, dim *py.Object) *py.Object +// +// cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor +// +// Cumulatively computes the `trapezoidal rule `_ +// along :attr:`dim`. By default the spacing between elements is assumed to be 1, but +// :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +// used to specify arbitrary spacing along :attr:`dim`. +// +// For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` +// and this function is that, :func:`torch.trapezoid` returns a value for each integration, +// where as this function returns a cumulative value for every spacing within the integration. This +// is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. +// +// Arguments: +// y (Tensor): Values to use when computing the trapezoidal rule. +// x (Tensor): If specified, defines spacing between values as specified above. +// +// Keyword arguments: +// dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` +// are specified then this defaults to 1. Effectively multiplies the result by its value. +// dim (int): The dimension along which to compute the trapezoidal rule. +// The last (inner-most) dimension by default. +// +// Examples:: +// +// >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. +// >>> y = torch.tensor([1, 5, 10]) +// >>> torch.cumulative_trapezoid(y) +// tensor([3., 10.5]) +// +// >>> # Computes the same trapezoidal rule directly up to each element to verify +// >>> (1 + 5) / 2 +// 3.0 +// >>> (1 + 10 + 10) / 2 +// 10.5 +// +// >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 +// >>> # NOTE: the result is the same as before, but multiplied by 2 +// >>> torch.cumulative_trapezoid(y, dx=2) +// tensor([6., 21.]) +// +// >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing +// >>> x = torch.tensor([1, 3, 6]) +// >>> torch.cumulative_trapezoid(y, x) +// tensor([6., 28.5]) +// +// >>> # Computes the same trapezoidal rule directly up to each element to verify +// >>> ((3 - 1) * (1 + 5)) / 2 +// 6.0 +// >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 +// 28.5 +// +// >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix +// >>> y = torch.arange(9).reshape(3, 3) +// tensor([[0, 1, 2], +// [3, 4, 5], +// [6, 7, 8]]) +// >>> torch.cumulative_trapezoid(y) +// tensor([[ 0.5, 2.], +// [ 3.5, 8.], +// [ 6.5, 14.]]) +// +// >>> # Cumulatively computes the trapezoidal rule for each column of the matrix +// >>> torch.cumulative_trapezoid(y, dim=0) +// tensor([[ 1.5, 2.5, 3.5], +// [ 6.0, 8.0, 10.0]]) +// +// >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix +// >>> # with the same arbitrary spacing +// >>> y = torch.ones(3, 3) +// >>> x = torch.tensor([1, 3, 6]) +// >>> torch.cumulative_trapezoid(y, x) +// tensor([[2., 5.], +// [2., 5.], +// [2., 5.]]) +// +// >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix +// >>> # with different arbitrary spacing per row +// >>> y = torch.ones(3, 3) +// >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) +// >>> torch.cumulative_trapezoid(y, x) +// tensor([[1., 2.], +// [2., 4.], +// [3., 6.]]) +// +// +//go:linkname CumulativeTrapezoid py.cumulative_trapezoid +func CumulativeTrapezoid(y *py.Object, x *py.Object) *py.Object +// +// deg2rad(input, *, out=None) -> Tensor +// +// Returns a new tensor with each of the elements of :attr:`input` +// converted from angles in degrees to radians. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) +// >>> torch.deg2rad(a) +// tensor([[ 3.1416, -3.1416], +// [ 6.2832, -6.2832], +// [ 1.5708, -1.5708]]) +// +// +// +//go:linkname Deg2rad py.deg2rad +func Deg2rad(input *py.Object) *py.Object +// None +// +//go:linkname Deg2rad_ py.deg2rad_ +func Deg2rad_(__llgo_va_list ...interface{}) *py.Object +// +// dequantize(tensor) -> Tensor +// +// Returns an fp32 Tensor by dequantizing a quantized Tensor +// +// Args: +// tensor (Tensor): A quantized Tensor +// +// .. function:: dequantize(tensors) -> sequence of Tensors +// :noindex: +// +// Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors +// +// Args: +// tensors (sequence of Tensors): A list of quantized Tensors +// +// +//go:linkname Dequantize py.dequantize +func Dequantize(tensor *py.Object) *py.Object +// +// det(input) -> Tensor +// +// Alias for :func:`torch.linalg.det` +// +// +//go:linkname Det py.det +func Det(input *py.Object) *py.Object +// None +// +//go:linkname Detach py.detach +func Detach(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Detach_ py.detach_ +func Detach_(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.detach`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname DetachCopy py.detach_copy +func DetachCopy(__llgo_va_list ...interface{}) *py.Object +// +// diag(input, diagonal=0, *, out=None) -> Tensor +// +// - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor +// with the elements of :attr:`input` as the diagonal. +// - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with +// the diagonal elements of :attr:`input`. +// +// The argument :attr:`diagonal` controls which diagonal to consider: +// +// - If :attr:`diagonal` = 0, it is the main diagonal. +// - If :attr:`diagonal` > 0, it is above the main diagonal. +// - If :attr:`diagonal` < 0, it is below the main diagonal. +// +// Args: +// input (Tensor): the input tensor. +// diagonal (int, optional): the diagonal to consider +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// .. seealso:: +// +// :func:`torch.diagonal` always returns the diagonal of its input. +// +// :func:`torch.diagflat` always constructs a tensor with diagonal elements +// specified by the input. +// +// Examples: +// +// Get the square matrix where the input vector is the diagonal:: +// +// >>> a = torch.randn(3) +// >>> a +// tensor([ 0.5950,-0.0872, 2.3298]) +// >>> torch.diag(a) +// tensor([[ 0.5950, 0.0000, 0.0000], +// [ 0.0000,-0.0872, 0.0000], +// [ 0.0000, 0.0000, 2.3298]]) +// >>> torch.diag(a, 1) +// tensor([[ 0.0000, 0.5950, 0.0000, 0.0000], +// [ 0.0000, 0.0000,-0.0872, 0.0000], +// [ 0.0000, 0.0000, 0.0000, 2.3298], +// [ 0.0000, 0.0000, 0.0000, 0.0000]]) +// +// Get the k-th diagonal of a given matrix:: +// +// >>> a = torch.randn(3, 3) +// >>> a +// tensor([[-0.4264, 0.0255,-0.1064], +// [ 0.8795,-0.2429, 0.1374], +// [ 0.1029,-0.6482,-1.6300]]) +// >>> torch.diag(a, 0) +// tensor([-0.4264,-0.2429,-1.6300]) +// >>> torch.diag(a, 1) +// tensor([ 0.0255, 0.1374]) +// +// +//go:linkname Diag py.diag +func Diag(input *py.Object, diagonal *py.Object) *py.Object +// +// diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor +// +// Creates a tensor whose diagonals of certain 2D planes (specified by +// :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`. +// To facilitate creating batched diagonal matrices, the 2D planes formed by +// the last two dimensions of the returned tensor are chosen by default. +// +// The argument :attr:`offset` controls which diagonal to consider: +// +// - If :attr:`offset` = 0, it is the main diagonal. +// - If :attr:`offset` > 0, it is above the main diagonal. +// - If :attr:`offset` < 0, it is below the main diagonal. +// +// The size of the new matrix will be calculated to make the specified diagonal +// of the size of the last input dimension. +// Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1` +// and :attr:`dim2` matters. Exchanging them is equivalent to changing the +// sign of :attr:`offset`. +// +// Applying :meth:`torch.diagonal` to the output of this function with +// the same arguments yields a matrix identical to input. However, +// :meth:`torch.diagonal` has different default dimensions, so those +// need to be explicitly specified. +// +// Args: +// input (Tensor): the input tensor. Must be at least 1-dimensional. +// offset (int, optional): which diagonal to consider. Default: 0 +// (main diagonal). +// dim1 (int, optional): first dimension with respect to which to +// take diagonal. Default: -2. +// dim2 (int, optional): second dimension with respect to which to +// take diagonal. Default: -1. +// +// Example:: +// +// >>> a = torch.randn(2, 3) +// >>> torch.diag_embed(a) +// tensor([[[ 1.5410, 0.0000, 0.0000], +// [ 0.0000, -0.2934, 0.0000], +// [ 0.0000, 0.0000, -2.1788]], +// +// [[ 0.5684, 0.0000, 0.0000], +// [ 0.0000, -1.0845, 0.0000], +// [ 0.0000, 0.0000, -1.3986]]]) +// +// >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2) +// tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000], +// [ 0.0000, 0.5684, 0.0000, 0.0000]], +// +// [[ 0.0000, 0.0000, -0.2934, 0.0000], +// [ 0.0000, 0.0000, -1.0845, 0.0000]], +// +// [[ 0.0000, 0.0000, 0.0000, -2.1788], +// [ 0.0000, 0.0000, 0.0000, -1.3986]], +// +// [[ 0.0000, 0.0000, 0.0000, 0.0000], +// [ 0.0000, 0.0000, 0.0000, 0.0000]]]) +// +// +//go:linkname DiagEmbed py.diag_embed +func DiagEmbed(input *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object +// +// diagflat(input, offset=0) -> Tensor +// +// - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor +// with the elements of :attr:`input` as the diagonal. +// - If :attr:`input` is a tensor with more than one dimension, then returns a +// 2-D tensor with diagonal elements equal to a flattened :attr:`input`. +// +// The argument :attr:`offset` controls which diagonal to consider: +// +// - If :attr:`offset` = 0, it is the main diagonal. +// - If :attr:`offset` > 0, it is above the main diagonal. +// - If :attr:`offset` < 0, it is below the main diagonal. +// +// Args: +// input (Tensor): the input tensor. +// offset (int, optional): the diagonal to consider. Default: 0 (main +// diagonal). +// +// Examples:: +// +// >>> a = torch.randn(3) +// >>> a +// tensor([-0.2956, -0.9068, 0.1695]) +// >>> torch.diagflat(a) +// tensor([[-0.2956, 0.0000, 0.0000], +// [ 0.0000, -0.9068, 0.0000], +// [ 0.0000, 0.0000, 0.1695]]) +// >>> torch.diagflat(a, 1) +// tensor([[ 0.0000, -0.2956, 0.0000, 0.0000], +// [ 0.0000, 0.0000, -0.9068, 0.0000], +// [ 0.0000, 0.0000, 0.0000, 0.1695], +// [ 0.0000, 0.0000, 0.0000, 0.0000]]) +// +// >>> a = torch.randn(2, 2) +// >>> a +// tensor([[ 0.2094, -0.3018], +// [-0.1516, 1.9342]]) +// >>> torch.diagflat(a) +// tensor([[ 0.2094, 0.0000, 0.0000, 0.0000], +// [ 0.0000, -0.3018, 0.0000, 0.0000], +// [ 0.0000, 0.0000, -0.1516, 0.0000], +// [ 0.0000, 0.0000, 0.0000, 1.9342]]) +// +// +//go:linkname Diagflat py.diagflat +func Diagflat(input *py.Object, offset *py.Object) *py.Object +// +// diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor +// +// Returns a partial view of :attr:`input` with the its diagonal elements +// with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension +// at the end of the shape. +// +// The argument :attr:`offset` controls which diagonal to consider: +// +// - If :attr:`offset` = 0, it is the main diagonal. +// - If :attr:`offset` > 0, it is above the main diagonal. +// - If :attr:`offset` < 0, it is below the main diagonal. +// +// Applying :meth:`torch.diag_embed` to the output of this function with +// the same arguments yields a diagonal matrix with the diagonal entries +// of the input. However, :meth:`torch.diag_embed` has different default +// dimensions, so those need to be explicitly specified. +// +// Args: +// input (Tensor): the input tensor. Must be at least 2-dimensional. +// offset (int, optional): which diagonal to consider. Default: 0 +// (main diagonal). +// dim1 (int, optional): first dimension with respect to which to +// take diagonal. Default: 0. +// dim2 (int, optional): second dimension with respect to which to +// take diagonal. Default: 1. +// +// .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. +// +// Examples:: +// +// >>> a = torch.randn(3, 3) +// >>> a +// tensor([[-1.0854, 1.1431, -0.1752], +// [ 0.8536, -0.0905, 0.0360], +// [ 0.6927, -0.3735, -0.4945]]) +// +// +// >>> torch.diagonal(a, 0) +// tensor([-1.0854, -0.0905, -0.4945]) +// +// +// >>> torch.diagonal(a, 1) +// tensor([ 1.1431, 0.0360]) +// +// +// >>> x = torch.randn(2, 5, 4, 2) +// >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) +// tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], +// [-1.1065, 1.0401, -0.2235, -0.7938]], +// +// [[-1.7325, -0.3081, 0.6166, 0.2335], +// [ 1.0500, 0.7336, -0.3836, -1.1015]]]) +// +// +//go:linkname Diagonal py.diagonal +func Diagonal(input *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.diagonal`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname DiagonalCopy py.diagonal_copy +func DiagonalCopy(__llgo_va_list ...interface{}) *py.Object +// +// diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor +// +// Embeds the values of the :attr:`src` tensor into :attr:`input` along +// the diagonal elements of :attr:`input`, with respect to :attr:`dim1` +// and :attr:`dim2`. +// +// This function returns a tensor with fresh storage; it does not +// return a view. +// +// The argument :attr:`offset` controls which diagonal to consider: +// +// - If :attr:`offset` = 0, it is the main diagonal. +// - If :attr:`offset` > 0, it is above the main diagonal. +// - If :attr:`offset` < 0, it is below the main diagonal. +// +// Args: +// input (Tensor): the input tensor. Must be at least 2-dimensional. +// src (Tensor): the tensor to embed into :attr:`input`. +// offset (int, optional): which diagonal to consider. Default: 0 +// (main diagonal). +// dim1 (int, optional): first dimension with respect to which to +// take diagonal. Default: 0. +// dim2 (int, optional): second dimension with respect to which to +// take diagonal. Default: 1. +// +// .. note:: +// +// :attr:`src` must be of the proper size in order to be embedded +// into :attr:`input`. Specifically, it should have the same shape as +// ``torch.diagonal(input, offset, dim1, dim2)`` +// +// Examples:: +// +// >>> a = torch.zeros(3, 3) +// >>> a +// tensor([[0., 0., 0.], +// [0., 0., 0.], +// [0., 0., 0.]]) +// +// >>> torch.diagonal_scatter(a, torch.ones(3), 0) +// tensor([[1., 0., 0.], +// [0., 1., 0.], +// [0., 0., 1.]]) +// +// >>> torch.diagonal_scatter(a, torch.ones(2), 1) +// tensor([[0., 1., 0.], +// [0., 0., 1.], +// [0., 0., 0.]]) +// +// +//go:linkname DiagonalScatter py.diagonal_scatter +func DiagonalScatter(input *py.Object, src *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object +// +// diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor +// +// Computes the n-th forward difference along the given dimension. +// +// The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order +// differences are calculated by using :func:`torch.diff` recursively. +// +// Args: +// input (Tensor): the tensor to compute the differences on +// n (int, optional): the number of times to recursively compute the difference +// dim (int, optional): the dimension to compute the difference along. +// Default is the last dimension. +// prepend, append (Tensor, optional): values to prepend or append to +// :attr:`input` along :attr:`dim` before computing the difference. +// Their dimensions must be equivalent to that of input, and their shapes +// must match input's shape except on :attr:`dim`. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, 3, 2]) +// >>> torch.diff(a) +// tensor([ 2, -1]) +// >>> b = torch.tensor([4, 5]) +// >>> torch.diff(a, append=b) +// tensor([ 2, -1, 2, 1]) +// >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]]) +// >>> torch.diff(c, dim=0) +// tensor([[2, 2, 2]]) +// >>> torch.diff(c, dim=1) +// tensor([[1, 1], +// [1, 1]]) +// +// +//go:linkname Diff py.diff +func Diff(input *py.Object, n *py.Object, dim *py.Object, prepend *py.Object, append *py.Object) *py.Object +// +// digamma(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.digamma`. +// +// +//go:linkname Digamma py.digamma +func Digamma(input *py.Object) *py.Object +// +// dist(input, other, p=2) -> Tensor +// +// Returns the p-norm of (:attr:`input` - :attr:`other`) +// +// The shapes of :attr:`input` and :attr:`other` must be +// :ref:`broadcastable `. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the Right-hand-side input tensor +// p (float, optional): the norm to be computed +// +// Example:: +// +// >>> x = torch.randn(4) +// >>> x +// tensor([-1.5393, -0.8675, 0.5916, 1.6321]) +// >>> y = torch.randn(4) +// >>> y +// tensor([ 0.0967, -1.0511, 0.6295, 0.8360]) +// >>> torch.dist(x, y, 3.5) +// tensor(1.6727) +// >>> torch.dist(x, y, 3) +// tensor(1.6973) +// >>> torch.dist(x, y, 0) +// tensor(4.) +// >>> torch.dist(x, y, 1) +// tensor(2.6537) +// +// +//go:linkname Dist py.dist +func Dist(input *py.Object, other *py.Object, p *py.Object) *py.Object +// +// div(input, other, *, rounding_mode=None, out=None) -> Tensor +// +// Divides each element of the input ``input`` by the corresponding element of +// :attr:`other`. +// +// .. math:: +// \text{out}_i = \frac{\text{input}_i}{\text{other}_i} +// +// .. note:: +// By default, this performs a "true" division like Python 3. +// See the :attr:`rounding_mode` argument for floor division. +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer, float, and complex inputs. +// Always promotes integer types to the default scalar type. +// +// Args: +// input (Tensor): the dividend +// other (Tensor or Number): the divisor +// +// Keyword args: +// rounding_mode (str, optional): Type of rounding applied to the result: +// +// * None - default behavior. Performs no rounding and, if both :attr:`input` and +// :attr:`other` are integer types, promotes the inputs to the default scalar type. +// Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``. +// * ``"trunc"`` - rounds the results of the division towards zero. +// Equivalent to C-style integer division. +// * ``"floor"`` - rounds the results of the division down. +// Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``. +// +// out (Tensor, optional): the output tensor. +// +// Examples:: +// +// >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) +// >>> torch.div(x, 0.5) +// tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274]) +// +// >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], +// ... [ 0.1815, -1.0111, 0.9805, -1.5923], +// ... [ 0.1062, 1.4581, 0.7759, -1.2344], +// ... [-0.1830, -0.0313, 1.1908, -1.4757]]) +// >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) +// >>> torch.div(a, b) +// tensor([[-0.4620, -6.6051, 0.5676, 1.2639], +// [ 0.2260, -3.4509, -1.2086, 6.8990], +// [ 0.1322, 4.9764, -0.9564, 5.3484], +// [-0.2278, -0.1068, -1.4678, 6.3938]]) +// +// >>> torch.div(a, b, rounding_mode='trunc') +// tensor([[-0., -6., 0., 1.], +// [ 0., -3., -1., 6.], +// [ 0., 4., -0., 5.], +// [-0., -0., -1., 6.]]) +// +// >>> torch.div(a, b, rounding_mode='floor') +// tensor([[-1., -7., 0., 1.], +// [ 0., -4., -2., 6.], +// [ 0., 4., -1., 5.], +// [-1., -1., -2., 6.]]) +// +// +// +//go:linkname Div py.div +func Div(input *py.Object, other *py.Object) *py.Object +// +// divide(input, other, *, rounding_mode=None, out=None) -> Tensor +// +// Alias for :func:`torch.div`. +// +// +//go:linkname Divide py.divide +func Divide(input *py.Object, other *py.Object) *py.Object +// +// dot(input, other, *, out=None) -> Tensor +// +// Computes the dot product of two 1D tensors. +// +// .. note:: +// +// Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product +// of two 1D tensors with the same number of elements. +// +// Args: +// input (Tensor): first tensor in the dot product, must be 1D. +// other (Tensor): second tensor in the dot product, must be 1D. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) +// tensor(7) +// +// +//go:linkname Dot py.dot +func Dot(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Dropout py.dropout +func Dropout(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Dropout_ py.dropout_ +func Dropout_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Dsmm py.dsmm +func Dsmm(__llgo_va_list ...interface{}) *py.Object +// +// dsplit(input, indices_or_sections) -> List of Tensors +// +// Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors +// depthwise according to :attr:`indices_or_sections`. Each split is a view of +// :attr:`input`. +// +// This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) +// (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer +// it must evenly divide the split dimension or a runtime error will be thrown. +// +// This function is based on NumPy's :func:`numpy.dsplit`. +// +// Args: +// input (Tensor): tensor to split. +// indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. +// +// Example:: +// >>> t = torch.arange(16.0).reshape(2, 2, 4) +// >>> t +// tensor([[[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.]], +// [[ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]]) +// >>> torch.dsplit(t, 2) +// (tensor([[[ 0., 1.], +// [ 4., 5.]], +// [[ 8., 9.], +// [12., 13.]]]), +// tensor([[[ 2., 3.], +// [ 6., 7.]], +// [[10., 11.], +// [14., 15.]]])) +// +// >>> torch.dsplit(t, [3, 6]) +// (tensor([[[ 0., 1., 2.], +// [ 4., 5., 6.]], +// [[ 8., 9., 10.], +// [12., 13., 14.]]]), +// tensor([[[ 3.], +// [ 7.]], +// [[11.], +// [15.]]]), +// tensor([], size=(2, 2, 0))) +// +// +// +//go:linkname Dsplit py.dsplit +func Dsplit(input *py.Object, indicesOrSections *py.Object) *py.Object +// +// dstack(tensors, *, out=None) -> Tensor +// +// Stack tensors in sequence depthwise (along third axis). +// +// This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`. +// +// Args: +// tensors (sequence of Tensors): sequence of tensors to concatenate +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, 2, 3]) +// >>> b = torch.tensor([4, 5, 6]) +// >>> torch.dstack((a,b)) +// tensor([[[1, 4], +// [2, 5], +// [3, 6]]]) +// >>> a = torch.tensor([[1],[2],[3]]) +// >>> b = torch.tensor([[4],[5],[6]]) +// >>> torch.dstack((a,b)) +// tensor([[[1, 4]], +// [[2, 5]], +// [[3, 6]]]) +// +// +// +// +//go:linkname Dstack py.dstack +func Dstack(tensors *py.Object) *py.Object +// einsum(equation, *operands) -> Tensor +// +// Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation +// based on the Einstein summation convention. +// +// Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them +// in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of +// this format are described below, but the general idea is to label every dimension of the input :attr:`operands` +// with some subscript and define which subscripts are part of the output. The output is then computed by summing +// the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the +// output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`. +// Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why). +// +// Equation: +// +// The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of +// the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a +// comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript +// must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is +// repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand +// must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that +// appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order. +// The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based +// on the subscripts, and then summing out the dimensions whose subscripts are not part of the output. +// +// Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation +// followed by the subscripts for the output. For instance, the following equation computes the transpose of a +// matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and +// at most once for the output. +// +// Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis. +// Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts, +// e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth +// dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the +// 'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not +// explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions), +// before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements +// batch matrix multiplication `'...ij,...jk'`. +// +// A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis, +// arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands. +// +// .. note:: +// +// ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions +// covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output. +// +// .. note:: +// +// This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to +// consume less memory by optimizing contraction order. This optimization occurs when there are at least three +// inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem, +// thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available, +// the default order is to contract from left to right. +// +// To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path +// calculation: `torch.backends.opt_einsum.enabled = False` +// +// To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line: +// `torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and +// 'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in +// the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html). +// +// .. note:: +// +// As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format, +// subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists +// follow their operands, and an extra sublist can appear at the end of the input to specify the output's +// subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object +// may be provided in a sublist to enable broadcasting as described in the Equation section above. +// +// Args: +// equation (str): The subscripts for the Einstein summation. +// operands (List[Tensor]): The tensors to compute the Einstein summation of. +// +// Examples:: +// +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> # trace +// >>> torch.einsum('ii', torch.randn(4, 4)) +// tensor(-1.2104) +// +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> # diagonal +// >>> torch.einsum('ii->i', torch.randn(4, 4)) +// tensor([-0.1034, 0.7952, -0.2433, 0.4545]) +// +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> # outer product +// >>> x = torch.randn(5) +// >>> y = torch.randn(4) +// >>> torch.einsum('i,j->ij', x, y) +// tensor([[ 0.1156, -0.2897, -0.3918, 0.4963], +// [-0.3744, 0.9381, 1.2685, -1.6070], +// [ 0.7208, -1.8058, -2.4419, 3.0936], +// [ 0.1713, -0.4291, -0.5802, 0.7350], +// [ 0.5704, -1.4290, -1.9323, 2.4480]]) +// +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> # batch matrix multiplication +// >>> As = torch.randn(3, 2, 5) +// >>> Bs = torch.randn(3, 5, 4) +// >>> torch.einsum('bij,bjk->bik', As, Bs) +// tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], +// [-1.6706, -0.8097, -0.8025, -2.1183]], +// +// [[ 4.2239, 0.3107, -0.5756, -0.2354], +// [-1.4558, -0.3460, 1.5087, -0.8530]], +// +// [[ 2.8153, 1.8787, -4.3839, -1.2112], +// [ 0.3728, -2.1131, 0.0921, 0.8305]]]) +// +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> # with sublist format and ellipsis +// >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2]) +// tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], +// [-1.6706, -0.8097, -0.8025, -2.1183]], +// +// [[ 4.2239, 0.3107, -0.5756, -0.2354], +// [-1.4558, -0.3460, 1.5087, -0.8530]], +// +// [[ 2.8153, 1.8787, -4.3839, -1.2112], +// [ 0.3728, -2.1131, 0.0921, 0.8305]]]) +// +// >>> # batch permute +// >>> A = torch.randn(2, 3, 4, 5) +// >>> torch.einsum('...ij->...ji', A).shape +// torch.Size([2, 3, 5, 4]) +// +// >>> # equivalent to torch.nn.functional.bilinear +// >>> A = torch.randn(3, 5, 4) +// >>> l = torch.randn(2, 5) +// >>> r = torch.randn(2, 4) +// >>> torch.einsum('bn,anm,bm->ba', l, A, r) +// tensor([[-0.3430, -5.2405, 0.4494], +// [ 0.3311, 5.5201, -3.0356]]) +// +// +//go:linkname Einsum py.einsum +func Einsum(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Embedding py.embedding +func Embedding(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname EmbeddingBag py.embedding_bag +func EmbeddingBag(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname EmbeddingRenorm_ py.embedding_renorm_ +func EmbeddingRenorm_(__llgo_va_list ...interface{}) *py.Object +// +// empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor +// +// Returns a tensor filled with uninitialized data. The shape of the tensor is +// defined by the variable argument :attr:`size`. +// +// .. note:: +// If :func:`torch.use_deterministic_algorithms()` and +// :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to +// ``True``, the output tensor is initialized to prevent any possible +// nondeterministic behavior from using the data as an input to an operation. +// Floating point and complex tensors are filled with NaN, and integer tensors +// are filled with the maximum value. +// +// Args: +// size (int...): a sequence of integers defining the shape of the output tensor. +// Can be a variable number of arguments or a collection like a list or tuple. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.contiguous_format``. +// +// Example:: +// +// >>> torch.empty((2,3), dtype=torch.int64) +// tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], +// [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) +// +// +//go:linkname Empty py.empty +func Empty(__llgo_va_list ...interface{}) *py.Object +// +// empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns an uninitialized tensor with the same size as :attr:`input`. +// ``torch.empty_like(input)`` is equivalent to +// ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// .. note:: +// If :func:`torch.use_deterministic_algorithms()` and +// :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to +// ``True``, the output tensor is initialized to prevent any possible +// nondeterministic behavior from using the data as an input to an operation. +// Floating point and complex tensors are filled with NaN, and integer tensors +// are filled with the maximum value. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// Example:: +// +// >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda') +// >>> torch.empty_like(a) +// tensor([[0, 0, 0], +// [0, 0, 0]], device='cuda:0', dtype=torch.int32) +// +// +//go:linkname EmptyLike py.empty_like +func EmptyLike(input *py.Object) *py.Object +// +// empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// Creates an uninitialized, non-overlapping and dense tensor with the +// specified :attr:`size`, with :attr:`physical_layout` specifying how the +// dimensions are physically laid out in memory (each logical dimension is listed +// from outermost to innermost). :attr:`physical_layout` is a generalization +// of NCHW/NHWC notation: if each dimension is assigned a number according to +// what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)`` +// while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output +// tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]`` +// (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``). +// +// Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense +// tensor with no overlaps. If possible, prefer using this function over +// :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`. +// +// .. note:: +// If :func:`torch.use_deterministic_algorithms()` and +// :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to +// ``True``, the output tensor is initialized to prevent any possible +// nondeterministic behavior from using the data as an input to an operation. +// Floating point and complex tensors are filled with NaN, and integer tensors +// are filled with the maximum value. +// +// Args: +// size (tuple of int): the shape of the output tensor +// physical_layout (tuple of int): the ordering of dimensions physically in memory +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Examples: +// +// >>> torch.empty((2, 3, 5, 7)).stride() +// (105, 35, 7, 1) +// >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride() +// (105, 35, 7, 1) +// >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride() +// (105, 1, 21, 3) +// >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride() +// (105, 1, 21, 3) +// >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order() +// (0, 2, 3, 1) +// +// +//go:linkname EmptyPermuted py.empty_permuted +func EmptyPermuted(size *py.Object, physicalLayout *py.Object) *py.Object +// None +// +//go:linkname EmptyQuantized py.empty_quantized +func EmptyQuantized(__llgo_va_list ...interface{}) *py.Object +// +// empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data. +// +// .. warning:: +// If the constructed tensor is "overlapped" (with multiple indices referring to the same element +// in memory) its behavior is undefined. +// +// .. note:: +// If :func:`torch.use_deterministic_algorithms()` and +// :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to +// ``True``, the output tensor is initialized to prevent any possible +// nondeterministic behavior from using the data as an input to an operation. +// Floating point and complex tensors are filled with NaN, and integer tensors +// are filled with the maximum value. +// +// Args: +// size (tuple of int): the shape of the output tensor +// stride (tuple of int): the strides of the output tensor +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Example:: +// +// >>> a = torch.empty_strided((2, 3), (1, 2)) +// >>> a +// tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07], +// [0.0000e+00, 0.0000e+00, 3.0705e-41]]) +// >>> a.stride() +// (1, 2) +// >>> a.size() +// torch.Size([2, 3]) +// +// +//go:linkname EmptyStrided py.empty_strided +func EmptyStrided(size *py.Object, stride *py.Object) *py.Object +// +// eq(input, other, *, out=None) -> Tensor +// +// Computes element-wise equality +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or float): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[ True, False], +// [False, True]]) +// +// +//go:linkname Eq py.eq +func Eq(input *py.Object, other *py.Object) *py.Object +// +// equal(input, other) -> bool +// +// ``True`` if two tensors have the same size and elements, ``False`` otherwise. +// +// Example:: +// +// >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2])) +// True +// +// +//go:linkname Equal py.equal +func Equal(input *py.Object, other *py.Object) *py.Object +// +// erf(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.erf`. +// +// +//go:linkname Erf py.erf +func Erf(input *py.Object) *py.Object +// None +// +//go:linkname Erf_ py.erf_ +func Erf_(__llgo_va_list ...interface{}) *py.Object +// +// erfc(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.erfc`. +// +// +//go:linkname Erfc py.erfc +func Erfc(input *py.Object) *py.Object +// None +// +//go:linkname Erfc_ py.erfc_ +func Erfc_(__llgo_va_list ...interface{}) *py.Object +// +// erfinv(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.erfinv`. +// +// +//go:linkname Erfinv py.erfinv +func Erfinv(input *py.Object) *py.Object +// +// exp(input, *, out=None) -> Tensor +// +// Returns a new tensor with the exponential of the elements +// of the input tensor :attr:`input`. +// +// .. math:: +// y_{i} = e^{x_{i}} +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.exp(torch.tensor([0, math.log(2.)])) +// tensor([ 1., 2.]) +// +// +//go:linkname Exp py.exp +func Exp(input *py.Object) *py.Object +// +// exp2(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.exp2`. +// +// +//go:linkname Exp2 py.exp2 +func Exp2(input *py.Object) *py.Object +// None +// +//go:linkname Exp2_ py.exp2_ +func Exp2_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Exp_ py.exp_ +func Exp_(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.expand`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ExpandCopy py.expand_copy +func ExpandCopy(__llgo_va_list ...interface{}) *py.Object +// +// expm1(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.expm1`. +// +// +//go:linkname Expm1 py.expm1 +func Expm1(input *py.Object) *py.Object +// None +// +//go:linkname Expm1_ py.expm1_ +func Expm1_(__llgo_va_list ...interface{}) *py.Object +// +// eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. +// +// Args: +// n (int): the number of rows +// m (int, optional): the number of columns with default being :attr:`n` +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Returns: +// Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere +// +// Example:: +// +// >>> torch.eye(3) +// tensor([[ 1., 0., 0.], +// [ 0., 1., 0.], +// [ 0., 0., 1.]]) +// +// +//go:linkname Eye py.eye +func Eye(n *py.Object, m *py.Object) *py.Object +// +// fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor +// +// Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`, +// :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`. +// +// .. math:: +// \text{output} = ( +// min( +// \text{quant\_max}, +// max( +// \text{quant\_min}, +// \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} +// ) +// ) - \text{zero\_point} +// ) \times \text{scale} +// +// Args: +// input (Tensor): the input value(s), in ``torch.float32`` +// scale (Tensor): quantization scale, per channel in ``torch.float32`` +// zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32`` +// axis (int32): channel axis +// quant_min (int64): lower bound of the quantized domain +// quant_max (int64): upper bound of the quantized domain +// +// Returns: +// Tensor: A newly fake_quantized per channel ``torch.float32`` tensor +// +// Example:: +// +// >>> x = torch.randn(2, 2, 2) +// >>> x +// tensor([[[-0.2525, -0.0466], +// [ 0.3491, -0.2168]], +// +// [[-0.5906, 1.6258], +// [ 0.6444, -0.0542]]]) +// >>> scales = (torch.randn(2) + 1) * 0.05 +// >>> scales +// tensor([0.0475, 0.0486]) +// >>> zero_points = torch.zeros(2).to(torch.int32) +// >>> zero_points +// tensor([0, 0]) +// >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255) +// tensor([[[0.0000, 0.0000], +// [0.3405, 0.0000]], +// +// [[0.0000, 1.6134], +// [0.6323, 0.0000]]]) +// +// +//go:linkname FakeQuantizePerChannelAffine py.fake_quantize_per_channel_affine +func FakeQuantizePerChannelAffine(input *py.Object, scale *py.Object, zeroPoint *py.Object, axis *py.Object, quantMin *py.Object, quantMax *py.Object) *py.Object +// +// fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor +// +// Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, +// :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. +// +// .. math:: +// \text{output} = ( +// min( +// \text{quant\_max}, +// max( +// \text{quant\_min}, +// \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} +// ) +// ) - \text{zero\_point} +// ) \times \text{scale} +// +// Args: +// input (Tensor): the input value(s), ``torch.float32`` tensor +// scale (double scalar or ``float32`` Tensor): quantization scale +// zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point +// quant_min (int64): lower bound of the quantized domain +// quant_max (int64): upper bound of the quantized domain +// +// Returns: +// Tensor: A newly fake_quantized ``torch.float32`` tensor +// +// Example:: +// +// >>> x = torch.randn(4) +// >>> x +// tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) +// >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) +// tensor([0.1000, 1.0000, 0.4000, 0.0000]) +// >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) +// tensor([0.1000, 1.0000, 0.4000, 0.0000]) +// +// +//go:linkname FakeQuantizePerTensorAffine py.fake_quantize_per_tensor_affine +func FakeQuantizePerTensorAffine(input *py.Object, scale *py.Object, zeroPoint *py.Object, quantMin *py.Object, quantMax *py.Object) *py.Object +// None +// +//go:linkname FbgemmLinearFp16Weight py.fbgemm_linear_fp16_weight +func FbgemmLinearFp16Weight(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmLinearFp16WeightFp32Activation py.fbgemm_linear_fp16_weight_fp32_activation +func FbgemmLinearFp16WeightFp32Activation(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmLinearInt8Weight py.fbgemm_linear_int8_weight +func FbgemmLinearInt8Weight(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmLinearInt8WeightFp32Activation py.fbgemm_linear_int8_weight_fp32_activation +func FbgemmLinearInt8WeightFp32Activation(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmLinearQuantizeWeight py.fbgemm_linear_quantize_weight +func FbgemmLinearQuantizeWeight(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmPackGemmMatrixFp16 py.fbgemm_pack_gemm_matrix_fp16 +func FbgemmPackGemmMatrixFp16(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FbgemmPackQuantizedMatrix py.fbgemm_pack_quantized_matrix +func FbgemmPackQuantizedMatrix(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FeatureAlphaDropout py.feature_alpha_dropout +func FeatureAlphaDropout(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FeatureAlphaDropout_ py.feature_alpha_dropout_ +func FeatureAlphaDropout_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FeatureDropout py.feature_dropout +func FeatureDropout(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname FeatureDropout_ py.feature_dropout_ +func FeatureDropout_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Fill py.fill +func Fill(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Fill_ py.fill_ +func Fill_(__llgo_va_list ...interface{}) *py.Object +// +// fix(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.trunc` +// +// +//go:linkname Fix py.fix +func Fix(input *py.Object) *py.Object +// None +// +//go:linkname Fix_ py.fix_ +func Fix_(__llgo_va_list ...interface{}) *py.Object +// +// flatten(input, start_dim=0, end_dim=-1) -> Tensor +// +// Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` +// are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. +// The order of elements in :attr:`input` is unchanged. +// +// Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, +// or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can +// be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the +// flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. +// +// .. note:: +// Flattening a zero-dimensional tensor will return a one-dimensional view. +// +// Args: +// input (Tensor): the input tensor. +// start_dim (int): the first dim to flatten +// end_dim (int): the last dim to flatten +// +// Example:: +// +// >>> t = torch.tensor([[[1, 2], +// ... [3, 4]], +// ... [[5, 6], +// ... [7, 8]]]) +// >>> torch.flatten(t) +// tensor([1, 2, 3, 4, 5, 6, 7, 8]) +// >>> torch.flatten(t, start_dim=1) +// tensor([[1, 2, 3, 4], +// [5, 6, 7, 8]]) +// +// +//go:linkname Flatten py.flatten +func Flatten(input *py.Object, startDim *py.Object, endDim *py.Object) *py.Object +// +// flip(input, dims) -> Tensor +// +// Reverse the order of an n-D tensor along given axis in dims. +// +// .. note:: +// `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`, +// which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, +// `torch.flip` is expected to be slower than `np.flip`. +// +// Args: +// input (Tensor): the input tensor. +// dims (a list or tuple): axis to flip on +// +// Example:: +// +// >>> x = torch.arange(8).view(2, 2, 2) +// >>> x +// tensor([[[ 0, 1], +// [ 2, 3]], +// +// [[ 4, 5], +// [ 6, 7]]]) +// >>> torch.flip(x, [0, 1]) +// tensor([[[ 6, 7], +// [ 4, 5]], +// +// [[ 2, 3], +// [ 0, 1]]]) +// +// +//go:linkname Flip py.flip +func Flip(input *py.Object, dims *py.Object) *py.Object +// +// fliplr(input) -> Tensor +// +// Flip tensor in the left/right direction, returning a new tensor. +// +// Flip the entries in each row in the left/right direction. +// Columns are preserved, but appear in a different order than before. +// +// Note: +// Requires the tensor to be at least 2-D. +// +// .. note:: +// `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`, +// which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, +// `torch.fliplr` is expected to be slower than `np.fliplr`. +// +// Args: +// input (Tensor): Must be at least 2-dimensional. +// +// Example:: +// +// >>> x = torch.arange(4).view(2, 2) +// >>> x +// tensor([[0, 1], +// [2, 3]]) +// >>> torch.fliplr(x) +// tensor([[1, 0], +// [3, 2]]) +// +// +//go:linkname Fliplr py.fliplr +func Fliplr(input *py.Object) *py.Object +// +// flipud(input) -> Tensor +// +// Flip tensor in the up/down direction, returning a new tensor. +// +// Flip the entries in each column in the up/down direction. +// Rows are preserved, but appear in a different order than before. +// +// Note: +// Requires the tensor to be at least 1-D. +// +// .. note:: +// `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`, +// which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, +// `torch.flipud` is expected to be slower than `np.flipud`. +// +// Args: +// input (Tensor): Must be at least 1-dimensional. +// +// Example:: +// +// >>> x = torch.arange(4).view(2, 2) +// >>> x +// tensor([[0, 1], +// [2, 3]]) +// >>> torch.flipud(x) +// tensor([[2, 3], +// [0, 1]]) +// +// +//go:linkname Flipud py.flipud +func Flipud(input *py.Object) *py.Object +// +// float_power(input, exponent, *, out=None) -> Tensor +// +// Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. +// If neither input is complex returns a ``torch.float64`` tensor, +// and if one or more inputs is complex returns a ``torch.complex128`` tensor. +// +// .. note:: +// This function always computes in double precision, unlike :func:`torch.pow`, +// which implements more typical :ref:`type promotion `. +// This is useful when the computation needs to be performed in a wider or more precise dtype, +// or the results of the computation may contain fractional values not representable in the input dtypes, +// like when an integer base is raised to a negative integer exponent. +// +// Args: +// input (Tensor or Number): the base value(s) +// exponent (Tensor or Number): the exponent value(s) +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randint(10, (4,)) +// >>> a +// tensor([6, 4, 7, 1]) +// >>> torch.float_power(a, 2) +// tensor([36., 16., 49., 1.], dtype=torch.float64) +// +// >>> a = torch.arange(1, 5) +// >>> a +// tensor([ 1, 2, 3, 4]) +// >>> exp = torch.tensor([2, -3, 4, -5]) +// >>> exp +// tensor([ 2, -3, 4, -5]) +// >>> torch.float_power(a, exp) +// tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) +// +// +//go:linkname FloatPower py.float_power +func FloatPower(input *py.Object, exponent *py.Object) *py.Object +// +// floor(input, *, out=None) -> Tensor +// +// Returns a new tensor with the floor of the elements of :attr:`input`, +// the largest integer less than or equal to each element. +// +// For integer inputs, follows the array-api convention of returning a +// copy of the input tensor. +// +// .. math:: +// \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.8166, 1.5308, -0.2530, -0.2091]) +// >>> torch.floor(a) +// tensor([-1., 1., -1., -1.]) +// +// +//go:linkname Floor py.floor +func Floor(input *py.Object) *py.Object +// None +// +//go:linkname Floor_ py.floor_ +func Floor_(__llgo_va_list ...interface{}) *py.Object +// +// floor_divide(input, other, *, out=None) -> Tensor +// +// .. note:: +// +// Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed +// truncation division. To restore the previous behavior use +// :func:`torch.div` with ``rounding_mode='trunc'``. +// +// Computes :attr:`input` divided by :attr:`other`, elementwise, and floors +// the result. +// +// .. math:: +// \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right) +// +// +// +// Supports broadcasting to a common shape, type promotion, and integer and float inputs. +// +// Args: +// input (Tensor or Number): the dividend +// other (Tensor or Number): the divisor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([4.0, 3.0]) +// >>> b = torch.tensor([2.0, 2.0]) +// >>> torch.floor_divide(a, b) +// tensor([2.0, 1.0]) +// >>> torch.floor_divide(a, 1.4) +// tensor([2.0, 2.0]) +// +// +//go:linkname FloorDivide py.floor_divide +func FloorDivide(input *py.Object, other *py.Object) *py.Object +// +// fmax(input, other, *, out=None) -> Tensor +// +// Computes the element-wise maximum of :attr:`input` and :attr:`other`. +// +// This is like :func:`torch.maximum` except it handles NaNs differently: +// if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum. +// Only if both elements are NaN is NaN propagated. +// +// This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function. +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer and floating-point inputs. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')]) +// >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')]) +// >>> torch.fmax(a, b) +// tensor([9.7000, 0.5000, 3.1000, nan]) +// +// +//go:linkname Fmax py.fmax +func Fmax(input *py.Object, other *py.Object) *py.Object +// +// fmin(input, other, *, out=None) -> Tensor +// +// Computes the element-wise minimum of :attr:`input` and :attr:`other`. +// +// This is like :func:`torch.minimum` except it handles NaNs differently: +// if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum. +// Only if both elements are NaN is NaN propagated. +// +// This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function. +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer and floating-point inputs. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')]) +// >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')]) +// >>> torch.fmin(a, b) +// tensor([-9.3000, 0.1000, 2.1000, nan]) +// +// +//go:linkname Fmin py.fmin +func Fmin(input *py.Object, other *py.Object) *py.Object +// +// fmod(input, other, *, out=None) -> Tensor +// +// Applies C++'s `std::fmod `_ entrywise. +// The result has the same sign as the dividend :attr:`input` and its absolute value +// is less than that of :attr:`other`. +// +// This function may be defined in terms of :func:`torch.div` as +// +// .. code:: python +// +// torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer and float inputs. +// +// .. note:: +// +// When the divisor is zero, returns ``NaN`` for floating point dtypes +// on both CPU and GPU; raises ``RuntimeError`` for integer division by +// zero on CPU; Integer division by zero on GPU may return any value. +// +// .. note:: +// +// Complex inputs are not supported. In some cases, it is not mathematically +// possible to satisfy the definition of a modulo operation with complex numbers. +// +// .. seealso:: +// +// :func:`torch.remainder` which implements Python's modulus operator. +// This one is defined using division rounding down the result. +// +// Args: +// input (Tensor): the dividend +// other (Tensor or Scalar): the divisor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) +// tensor([-1., -0., -1., 1., 0., 1.]) +// >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) +// tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) +// +// +// +//go:linkname Fmod py.fmod +func Fmod(input *py.Object, other *py.Object) *py.Object +// +// frac(input, *, out=None) -> Tensor +// +// Computes the fractional portion of each element in :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) +// +// Example:: +// +// >>> torch.frac(torch.tensor([1, 2.5, -3.2])) +// tensor([ 0.0000, 0.5000, -0.2000]) +// +// +//go:linkname Frac py.frac +func Frac(input *py.Object) *py.Object +// None +// +//go:linkname Frac_ py.frac_ +func Frac_(__llgo_va_list ...interface{}) *py.Object +// +// frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent) +// +// Decomposes :attr:`input` into mantissa and exponent tensors +// such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`. +// +// The range of mantissa is the open interval (-1, 1). +// +// Supports float inputs. +// +// Args: +// input (Tensor): the input tensor +// +// +// Keyword args: +// out (tuple, optional): the output tensors +// +// Example:: +// +// >>> x = torch.arange(9.) +// >>> mantissa, exponent = torch.frexp(x) +// >>> mantissa +// tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000]) +// >>> exponent +// tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32) +// >>> torch.ldexp(mantissa, exponent) +// tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.]) +// +// +//go:linkname Frexp py.frexp +func Frexp(input *py.Object) *py.Object +// None +// +//go:linkname FrobeniusNorm py.frobenius_norm +func FrobeniusNorm(__llgo_va_list ...interface{}) *py.Object +// +// from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False) +// +// Creates a CPU tensor with a storage backed by a memory-mapped file. +// +// If ``shared`` is True, then memory is shared between processes. All changes are written to the file. +// If ``shared`` is False, then changes to the tensor do not affect the file. +// +// ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain +// at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed. +// +// .. note:: +// Only CPU tensors can be mapped to files. +// +// .. note:: +// For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory. +// +// +// Args: +// filename (str): file name to map +// shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the +// underlying `mmap(2) call `_) +// size (int): number of elements in the tensor +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Example:: +// >>> t = torch.randn(2, 5, dtype=torch.float64) +// >>> t.numpy().tofile('storage.pt') +// >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64) +// +// +//go:linkname FromFile py.from_file +func FromFile(filename *py.Object, shared *py.Object, size *py.Object) *py.Object +// +// from_numpy(ndarray) -> Tensor +// +// Creates a :class:`Tensor` from a :class:`numpy.ndarray`. +// +// The returned tensor and :attr:`ndarray` share the same memory. Modifications to +// the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned +// tensor is not resizable. +// +// It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``, +// ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``, +// ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``, +// and ``bool``. +// +// .. warning:: +// Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior. +// +// Example:: +// +// >>> a = numpy.array([1, 2, 3]) +// >>> t = torch.from_numpy(a) +// >>> t +// tensor([ 1, 2, 3]) +// >>> t[0] = -1 +// >>> a +// array([-1, 2, 3]) +// +// +//go:linkname FromNumpy py.from_numpy +func FromNumpy(ndarray *py.Object) *py.Object +// +// frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor +// +// Creates a 1-dimensional :class:`Tensor` from an object that implements +// the Python buffer protocol. +// +// Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of +// the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count` +// elements. +// +// Note that either of the following must be true: +// +// 1. :attr:`count` is a positive non-zero number, and the total number of bytes +// in the buffer is less than :attr:`offset` plus :attr:`count` times the size +// (in bytes) of :attr:`dtype`. +// +// 2. :attr:`count` is negative, and the length (number of bytes) of the buffer +// subtracted by the :attr:`offset` is a multiple of the size (in bytes) of +// :attr:`dtype`. +// +// The returned tensor and buffer share the same memory. Modifications to +// the tensor will be reflected in the buffer and vice versa. The returned +// tensor is not resizable. +// +// .. note:: +// This function increments the reference count for the object that +// owns the shared memory. Therefore, such memory will not be deallocated +// before the returned tensor goes out of scope. +// +// .. warning:: +// This function's behavior is undefined when passed an object implementing +// the buffer protocol whose data is not on the CPU. Doing so is likely to +// cause a segmentation fault. +// +// .. warning:: +// This function does not try to infer the :attr:`dtype` (hence, it is not +// optional). Passing a different :attr:`dtype` than its source may result +// in unexpected behavior. +// +// Args: +// buffer (object): a Python object that exposes the buffer interface. +// +// Keyword args: +// dtype (:class:`torch.dtype`): the desired data type of returned tensor. +// count (int, optional): the number of desired elements to be read. +// If negative, all the elements (until the end of the buffer) will be +// read. Default: -1. +// offset (int, optional): the number of bytes to skip at the start of +// the buffer. Default: 0. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> import array +// >>> a = array.array('i', [1, 2, 3]) +// >>> t = torch.frombuffer(a, dtype=torch.int32) +// >>> t +// tensor([ 1, 2, 3]) +// >>> t[0] = -1 +// >>> a +// array([-1, 2, 3]) +// +// >>> # Interprets the signed char bytes as 32-bit integers. +// >>> # Each 4 signed char elements will be interpreted as +// >>> # 1 signed 32-bit integer. +// >>> import array +// >>> a = array.array('b', [-1, 0, 0, 0]) +// >>> torch.frombuffer(a, dtype=torch.int32) +// tensor([255], dtype=torch.int32) +// +// +//go:linkname Frombuffer py.frombuffer +func Frombuffer(buffer *py.Object) *py.Object +// +// full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The +// tensor's dtype is inferred from :attr:`fill_value`. +// +// Args: +// size (int...): a list, tuple, or :class:`torch.Size` of integers defining the +// shape of the output tensor. +// fill_value (Scalar): the value to fill the output tensor with. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.full((2, 3), 3.141592) +// tensor([[ 3.1416, 3.1416, 3.1416], +// [ 3.1416, 3.1416, 3.1416]]) +// +// +//go:linkname Full py.full +func Full(size *py.Object, fillValue *py.Object) *py.Object +// +// full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`. +// ``torch.full_like(input, fill_value)`` is equivalent to +// ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// fill_value: the number to fill the output tensor with. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// +//go:linkname FullLike py.full_like +func FullLike(input *py.Object, fillValue *py.Object) *py.Object +// None +// +//go:linkname FusedMovingAvgObsFakeQuant py.fused_moving_avg_obs_fake_quant +func FusedMovingAvgObsFakeQuant(__llgo_va_list ...interface{}) *py.Object +// +// gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor +// +// Gathers values along an axis specified by `dim`. +// +// For a 3-D tensor the output is specified by:: +// +// out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 +// out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 +// out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 +// +// :attr:`input` and :attr:`index` must have the same number of dimensions. +// It is also required that ``index.size(d) <= input.size(d)`` for all +// dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. +// Note that ``input`` and ``index`` do not broadcast against each other. +// +// Args: +// input (Tensor): the source tensor +// dim (int): the axis along which to index +// index (LongTensor): the indices of elements to gather +// +// Keyword arguments: +// sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. +// out (Tensor, optional): the destination tensor +// +// Example:: +// +// >>> t = torch.tensor([[1, 2], [3, 4]]) +// >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) +// tensor([[ 1, 1], +// [ 4, 3]]) +// +// +//go:linkname Gather py.gather +func Gather(input *py.Object, dim *py.Object, index *py.Object) *py.Object +// +// gcd(input, other, *, out=None) -> Tensor +// +// Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`. +// +// Both :attr:`input` and :attr:`other` must have integer types. +// +// .. note:: +// This defines :math:`gcd(0, 0) = 0`. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([5, 10, 15]) +// >>> b = torch.tensor([3, 4, 5]) +// >>> torch.gcd(a, b) +// tensor([1, 2, 5]) +// >>> c = torch.tensor([3]) +// >>> torch.gcd(a, c) +// tensor([1, 1, 3]) +// +// +//go:linkname Gcd py.gcd +func Gcd(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Gcd_ py.gcd_ +func Gcd_(__llgo_va_list ...interface{}) *py.Object +// +// ge(input, other, *, out=None) -> Tensor +// +// Computes :math:`\text{input} \geq \text{other}` element-wise. +// +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or float): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[True, True], [False, True]]) +// +// +//go:linkname Ge py.ge +func Ge(input *py.Object, other *py.Object) *py.Object +// +// geqrf(input, *, out=None) -> (Tensor, Tensor) +// +// This is a low-level function for calling LAPACK's geqrf directly. This function +// returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ . +// +// Computes a QR decomposition of :attr:`input`. +// Both `Q` and `R` matrices are stored in the same output tensor `a`. +// The elements of `R` are stored on and above the diagonal. +// Elementary reflectors (or Householder vectors) implicitly defining matrix `Q` +// are stored below the diagonal. +// The results of this function can be used together with :func:`torch.linalg.householder_product` +// to obtain the `Q` matrix or +// with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix, +// for an efficient matrix-matrix multiplication. +// +// See `LAPACK documentation for geqrf`_ for further details. +// +// .. note:: +// See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq` +// with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition. +// +// Args: +// input (Tensor): the input matrix +// +// Keyword args: +// out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`. +// +// .. _LAPACK documentation for geqrf: +// http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html +// +// +// +//go:linkname Geqrf py.geqrf +func Geqrf(input *py.Object) *py.Object +// +// ger(input, vec2, *, out=None) -> Tensor +// +// Alias of :func:`torch.outer`. +// +// .. warning:: +// This function is deprecated and will be removed in a future PyTorch release. +// Use :func:`torch.outer` instead. +// +// +//go:linkname Ger py.ger +func Ger(input *py.Object, vec2 *py.Object) *py.Object +// None +// +//go:linkname GetDevice py.get_device +func GetDevice(__llgo_va_list ...interface{}) *py.Object +// +// gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors +// +// Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in +// one or more dimensions using the `second-order accurate central differences method +// `_ and +// either first or second order estimates at the boundaries. +// +// The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not +// specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates +// to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional +// :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and +// :math:`g(1, 2, 3)\ == input[1, 2, 3]`. +// +// When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. +// This is detailed in the "Keyword Arguments" section below. +// +// The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is +// accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be +// improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative +// is estimated using `Taylor’s theorem with remainder `_. +// Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring +// it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: +// +// .. math:: +// \begin{aligned} +// f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ +// f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ +// \end{aligned} +// +// Using the fact that :math:`f \in C^3` and solving the linear system, we derive: +// +// .. math:: +// f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) +// + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } +// +// .. note:: +// We estimate the gradient of functions in complex domain +// :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. +// +// The value of each partial derivative at the boundary points is computed differently. See edge_order below. +// +// Args: +// input (``Tensor``): the tensor that represents the values of the function +// +// Keyword args: +// spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify +// how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then +// the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the +// indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding +// indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). +// Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for +// the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then +// the coordinates are (t0[1], t1[2], t2[3]) +// +// dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default +// the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of +// the :attr:`spacing` argument must correspond with the specified dims." +// +// edge_order (``int``, optional): 1 or 2, for `first-order +// `_ or +// `second-order `_ +// estimation of the boundary ("edge") values, respectively. +// +// Examples:: +// +// >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] +// >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) +// >>> values = torch.tensor([4., 1., 1., 16.], ) +// >>> torch.gradient(values, spacing = coordinates) +// (tensor([-3., -2., 2., 5.]),) +// +// >>> # Estimates the gradient of the R^2 -> R function whose samples are +// >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost +// >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates +// >>> # partial derivative for both dimensions. +// >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) +// >>> torch.gradient(t) +// (tensor([[ 9., 18., 36., 72.], +// [ 9., 18., 36., 72.]]), +// tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], +// [10.0000, 15.0000, 30.0000, 40.0000]])) +// +// >>> # A scalar value for spacing modifies the relationship between tensor indices +// >>> # and input coordinates by multiplying the indices to find the +// >>> # coordinates. For example, below the indices of the innermost +// >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of +// >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. +// >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) +// (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], +// [ 4.5000, 9.0000, 18.0000, 36.0000]]), +// tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], +// [ 5.0000, 7.5000, 15.0000, 20.0000]])) +// >>> # doubling the spacing between samples halves the estimated partial gradients. +// +// >>> +// >>> # Estimates only the partial derivative for dimension 1 +// >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) +// (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], +// [10.0000, 15.0000, 30.0000, 40.0000]]),) +// +// >>> # When spacing is a list of scalars, the relationship between the tensor +// >>> # indices and input coordinates changes based on dimension. +// >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate +// >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension +// >>> # 0, 1 translate to coordinates of [0, 2]. +// >>> torch.gradient(t, spacing = [3., 2.]) +// (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], +// [ 4.5000, 9.0000, 18.0000, 36.0000]]), +// tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], +// [ 3.3333, 5.0000, 10.0000, 13.3333]])) +// +// >>> # The following example is a replication of the previous one with explicit +// >>> # coordinates. +// >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) +// >>> torch.gradient(t, spacing = coords) +// (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], +// [ 4.5000, 9.0000, 18.0000, 36.0000]]), +// tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], +// [ 3.3333, 5.0000, 10.0000, 13.3333]])) +// +// +// +//go:linkname Gradient py.gradient +func Gradient(input *py.Object) *py.Object +// +// greater(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.gt`. +// +// +//go:linkname Greater py.greater +func Greater(input *py.Object, other *py.Object) *py.Object +// +// greater_equal(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.ge`. +// +// +//go:linkname GreaterEqual py.greater_equal +func GreaterEqual(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname GridSampler py.grid_sampler +func GridSampler(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GridSampler2d py.grid_sampler_2d +func GridSampler2d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GridSampler3d py.grid_sampler_3d +func GridSampler3d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GroupNorm py.group_norm +func GroupNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Gru py.gru +func Gru(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname GruCell py.gru_cell +func GruCell(__llgo_va_list ...interface{}) *py.Object +// +// gt(input, other, *, out=None) -> Tensor +// +// Computes :math:`\text{input} > \text{other}` element-wise. +// +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or float): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[False, True], [False, False]]) +// +// +//go:linkname Gt py.gt +func Gt(input *py.Object, other *py.Object) *py.Object +// +// hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Hamming window function. +// +// .. math:: +// w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), +// +// where :math:`N` is the full window size. +// +// The input :attr:`window_length` is a positive integer controlling the +// returned window size. :attr:`periodic` flag determines whether the returned +// window trims off the last duplicate value from the symmetric window and is +// ready to be used as a periodic window with functions like +// :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +// above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +// ``torch.hamming_window(L, periodic=True)`` equal to +// ``torch.hamming_window(L + 1, periodic=False)[:-1])``. +// +// .. note:: +// If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +// +// .. note:: +// This is a generalized version of :meth:`torch.hann_window`. +// +// Arguments: +// window_length (int): the size of returned window +// periodic (bool, optional): If True, returns a window to be used as periodic +// function. If False, return a symmetric window. +// alpha (float, optional): The coefficient :math:`\alpha` in the equation above +// beta (float, optional): The coefficient :math:`\beta` in the equation above +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). Only floating point types are supported. +// layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only +// ``torch.strided`` (dense layout) is supported. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Returns: +// Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. +// +// +// +//go:linkname HammingWindow py.hamming_window +func HammingWindow(windowLength *py.Object, periodic *py.Object, alpha *py.Object, beta *py.Object) *py.Object +// +// hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Hann window function. +// +// .. math:: +// w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = +// \sin^2 \left( \frac{\pi n}{N - 1} \right), +// +// where :math:`N` is the full window size. +// +// The input :attr:`window_length` is a positive integer controlling the +// returned window size. :attr:`periodic` flag determines whether the returned +// window trims off the last duplicate value from the symmetric window and is +// ready to be used as a periodic window with functions like +// :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +// above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +// ``torch.hann_window(L, periodic=True)`` equal to +// ``torch.hann_window(L + 1, periodic=False)[:-1])``. +// +// .. note:: +// If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +// +// Arguments: +// window_length (int): the size of returned window +// periodic (bool, optional): If True, returns a window to be used as periodic +// function. If False, return a symmetric window. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). Only floating point types are supported. +// layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only +// ``torch.strided`` (dense layout) is supported. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Returns: +// Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window +// +// +// +//go:linkname HannWindow py.hann_window +func HannWindow(windowLength *py.Object, periodic *py.Object) *py.Object +// +// hardshrink(input, lambd=0.5) -> Tensor +// +// Applies the hard shrinkage function element-wise +// +// See :class:`~torch.nn.Hardshrink` for more details. +// +// +//go:linkname Hardshrink py.hardshrink +func Hardshrink(input *py.Object, lambd *py.Object) *py.Object +// +// heaviside(input, values, *, out=None) -> Tensor +// +// Computes the Heaviside step function for each element in :attr:`input`. +// The Heaviside step function is defined as: +// +// .. math:: +// \text{{heaviside}}(input, values) = \begin{cases} +// 0, & \text{if input < 0}\\ +// values, & \text{if input == 0}\\ +// 1, & \text{if input > 0} +// \end{cases} +// +// +// Args: +// input (Tensor): the input tensor. +// values (Tensor): The values to use where :attr:`input` is zero. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> input = torch.tensor([-1.5, 0, 2.0]) +// >>> values = torch.tensor([0.5]) +// >>> torch.heaviside(input, values) +// tensor([0.0000, 0.5000, 1.0000]) +// >>> values = torch.tensor([1.2, -2.0, 3.5]) +// >>> torch.heaviside(input, values) +// tensor([0., -2., 1.]) +// +// +// +//go:linkname Heaviside py.heaviside +func Heaviside(input *py.Object, values *py.Object) *py.Object +// None +// +//go:linkname HingeEmbeddingLoss py.hinge_embedding_loss +func HingeEmbeddingLoss(__llgo_va_list ...interface{}) *py.Object +// +// histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor +// +// Computes the histogram of a tensor. +// +// The elements are sorted into equal width bins between :attr:`min` and +// :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and +// maximum values of the data are used. +// +// Elements lower than min and higher than max and ``NaN`` elements are ignored. +// +// Args: +// input (Tensor): the input tensor. +// bins (int): number of histogram bins +// min (Scalar): lower end of the range (inclusive) +// max (Scalar): upper end of the range (inclusive) +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// Tensor: Histogram represented as a tensor +// +// Example:: +// +// >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3) +// tensor([ 0., 2., 1., 0.]) +// +// +//go:linkname Histc py.histc +func Histc(input *py.Object, bins *py.Object, min *py.Object, max *py.Object) *py.Object +// +// histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) +// +// Computes a histogram of the values in a tensor. +// +// :attr:`bins` can be an integer or a 1D tensor. +// +// If :attr:`bins` is an int, it specifies the number of equal-width bins. +// By default, the lower and upper range of the bins is determined by the +// minimum and maximum elements of the input tensor. The :attr:`range` +// argument can be provided to specify a range for the bins. +// +// If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges +// including the rightmost edge. It should contain at least 2 elements +// and its elements should be increasing. +// +// Args: +// input (Tensor): the input tensor. +// bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, +// defines the sequence of bin edges including the rightmost edge. +// +// Keyword args: +// range (tuple of float): Defines the range of the bins. +// weight (Tensor): If provided, weight should have the same shape as input. Each value in +// input contributes its associated weight towards its bin's result. +// density (bool): If False, the result will contain the count (or total weight) in each bin. +// If True, the result is the value of the probability density function over the bins, +// normalized such that the integral over the range of the bins is 1. +// out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges). +// +// Returns: +// hist (Tensor): 1D Tensor containing the values of the histogram. +// bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. +// +// Example:: +// +// >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) +// (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) +// >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) +// (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) +// +// +//go:linkname Histogram py.histogram +func Histogram(input *py.Object, bins *py.Object) *py.Object +// +// histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) +// +// Computes a multi-dimensional histogram of the values in a tensor. +// +// Interprets the elements of an input tensor whose innermost dimension has size N +// as a collection of N-dimensional points. Maps each of the points into a set of +// N-dimensional bins and returns the number of points (or total weight) in each bin. +// +// :attr:`input` must be a tensor with at least 2 dimensions. +// If input has shape (M, N), each of its M rows defines a point in N-dimensional space. +// If input has three or more dimensions, all but the last dimension are flattened. +// +// Each dimension is independently associated with its own strictly increasing sequence +// of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D +// tensors. Alternatively, bin edges may be constructed automatically by passing a +// sequence of integers specifying the number of equal-width bins in each dimension. +// +// For each N-dimensional point in input: +// - Each of its coordinates is binned independently among the bin edges +// corresponding to its dimension +// - Binning results are combined to identify the N-dimensional bin (if any) +// into which the point falls +// - If the point falls into a bin, the bin's count (or total weight) is incremented +// - Points which do not fall into any bin do not contribute to the output +// +// :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. +// +// If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences +// of bin edges. Each 1D tensor should contain a strictly increasing sequence with at +// least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying +// the left and right edges of all bins. Every bin is exclusive of its left edge. Only +// the rightmost bin is inclusive of its right edge. +// +// If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins +// in each dimension. By default, the leftmost and rightmost bin edges in each dimension +// are determined by the minimum and maximum elements of the input tensor in the +// corresponding dimension. The :attr:`range` argument can be provided to manually +// specify the leftmost and rightmost bin edges in each dimension. +// +// If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. +// +// .. note:: +// See also :func:`torch.histogram`, which specifically computes 1D histograms. +// While :func:`torch.histogramdd` infers the dimensionality of its bins and +// binned values from the shape of :attr:`input`, :func:`torch.histogram` +// accepts and flattens :attr:`input` of any shape. +// +// Args: +// input (Tensor): the input tensor. +// bins: Tensor[], int[], or int. +// If Tensor[], defines the sequences of bin edges. +// If int[], defines the number of equal-width bins in each dimension. +// If int, defines the number of equal-width bins for all dimensions. +// Keyword args: +// range (sequence of float): Defines the leftmost and rightmost bin edges +// in each dimension. +// weight (Tensor): By default, each value in the input has weight 1. If a weight +// tensor is passed, each N-dimensional coordinate in input +// contributes its associated weight towards its bin's result. +// The weight tensor should have the same shape as the :attr:`input` +// tensor excluding its innermost dimension N. +// density (bool): If False (default), the result will contain the count (or total weight) +// in each bin. If True, each count (weight) is divided by the total count +// (total weight), then divided by the volume of its associated bin. +// Returns: +// hist (Tensor): N-dimensional Tensor containing the values of the histogram. +// bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. +// +// Example:: +// >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], +// ... weight=torch.tensor([1., 2., 4., 8.])) +// torch.return_types.histogramdd( +// hist=tensor([[0., 1., 0.], +// [2., 0., 0.], +// [4., 0., 8.]]), +// bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), +// tensor([0.0000, 0.6667, 1.3333, 2.0000]))) +// +// >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], +// ... range=[0., 1., 0., 1.], density=True) +// torch.return_types.histogramdd( +// hist=tensor([[2., 0.], +// [0., 2.]]), +// bin_edges=(tensor([0.0000, 0.5000, 1.0000]), +// tensor([0.0000, 0.5000, 1.0000]))) +// +// +// +//go:linkname Histogramdd py.histogramdd +func Histogramdd(input *py.Object, bins *py.Object) *py.Object +// None +// +//go:linkname Hsmm py.hsmm +func Hsmm(__llgo_va_list ...interface{}) *py.Object +// +// hsplit(input, indices_or_sections) -> List of Tensors +// +// Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors +// horizontally according to :attr:`indices_or_sections`. Each split is a view of +// :attr:`input`. +// +// If :attr:`input` is one dimensional this is equivalent to calling +// torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is +// zero), and if :attr:`input` has two or more dimensions it's equivalent to calling +// torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), +// except that if :attr:`indices_or_sections` is an integer it must evenly divide +// the split dimension or a runtime error will be thrown. +// +// This function is based on NumPy's :func:`numpy.hsplit`. +// +// Args: +// input (Tensor): tensor to split. +// indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. +// +// Example:: +// >>> t = torch.arange(16.0).reshape(4,4) +// >>> t +// tensor([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]) +// >>> torch.hsplit(t, 2) +// (tensor([[ 0., 1.], +// [ 4., 5.], +// [ 8., 9.], +// [12., 13.]]), +// tensor([[ 2., 3.], +// [ 6., 7.], +// [10., 11.], +// [14., 15.]])) +// >>> torch.hsplit(t, [3, 6]) +// (tensor([[ 0., 1., 2.], +// [ 4., 5., 6.], +// [ 8., 9., 10.], +// [12., 13., 14.]]), +// tensor([[ 3.], +// [ 7.], +// [11.], +// [15.]]), +// tensor([], size=(4, 0))) +// +// +// +//go:linkname Hsplit py.hsplit +func Hsplit(input *py.Object, indicesOrSections *py.Object) *py.Object +// +// hspmm(mat1, mat2, *, out=None) -> Tensor +// +// Performs a matrix multiplication of a :ref:`sparse COO matrix +// ` :attr:`mat1` and a strided matrix :attr:`mat2`. The +// result is a (1 + 1)-dimensional :ref:`hybrid COO matrix +// `. +// +// Args: +// mat1 (Tensor): the first sparse matrix to be matrix multiplied +// mat2 (Tensor): the second strided matrix to be matrix multiplied +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// +//go:linkname Hspmm py.hspmm +func Hspmm(mat1 *py.Object, mat2 *py.Object) *py.Object +// +// hstack(tensors, *, out=None) -> Tensor +// +// Stack tensors in sequence horizontally (column wise). +// +// This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. +// +// Args: +// tensors (sequence of Tensors): sequence of tensors to concatenate +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, 2, 3]) +// >>> b = torch.tensor([4, 5, 6]) +// >>> torch.hstack((a,b)) +// tensor([1, 2, 3, 4, 5, 6]) +// >>> a = torch.tensor([[1],[2],[3]]) +// >>> b = torch.tensor([[4],[5],[6]]) +// >>> torch.hstack((a,b)) +// tensor([[1, 4], +// [2, 5], +// [3, 6]]) +// +// +// +//go:linkname Hstack py.hstack +func Hstack(tensors *py.Object) *py.Object +// +// hypot(input, other, *, out=None) -> Tensor +// +// Given the legs of a right triangle, return its hypotenuse. +// +// .. math:: +// \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}} +// +// The shapes of ``input`` and ``other`` must be +// :ref:`broadcastable `. +// +// Args: +// input (Tensor): the first input tensor +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])) +// tensor([5.0000, 5.6569, 6.4031]) +// +// +// +//go:linkname Hypot py.hypot +func Hypot(input *py.Object, other *py.Object) *py.Object +// +// i0(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.i0`. +// +// +//go:linkname I0 py.i0 +func I0(input *py.Object) *py.Object +// None +// +//go:linkname I0_ py.i0_ +func I0_(__llgo_va_list ...interface{}) *py.Object +// +// igamma(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.gammainc`. +// +// +//go:linkname Igamma py.igamma +func Igamma(input *py.Object, other *py.Object) *py.Object +// +// igammac(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.gammaincc`. +// +// +//go:linkname Igammac py.igammac +func Igammac(input *py.Object, other *py.Object) *py.Object +// +// imag(input) -> Tensor +// +// Returns a new tensor containing imaginary values of the :attr:`self` tensor. +// The returned tensor and :attr:`self` share the same underlying storage. +// +// .. warning:: +// :func:`imag` is only supported for tensors with complex dtypes. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x=torch.randn(4, dtype=torch.cfloat) +// >>> x +// tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) +// >>> x.imag +// tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) +// +// +// +//go:linkname Imag py.imag +func Imag(input *py.Object) *py.Object +// +// index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor +// +// See :meth:`~Tensor.index_add_` for function description. +// +// +//go:linkname IndexAdd py.index_add +func IndexAdd(input *py.Object, dim *py.Object, index *py.Object, source *py.Object) *py.Object +// +// index_copy(input, dim, index, source, *, out=None) -> Tensor +// +// See :meth:`~Tensor.index_add_` for function description. +// +// +//go:linkname IndexCopy py.index_copy +func IndexCopy(input *py.Object, dim *py.Object, index *py.Object, source *py.Object) *py.Object +// None +// +//go:linkname IndexFill py.index_fill +func IndexFill(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IndexPut py.index_put +func IndexPut(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IndexPut_ py.index_put_ +func IndexPut_(__llgo_va_list ...interface{}) *py.Object +// +// index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor +// +// See :meth:`~Tensor.index_reduce_` for function description. +// +// +//go:linkname IndexReduce py.index_reduce +func IndexReduce(input *py.Object, dim *py.Object, index *py.Object, source *py.Object, reduce *py.Object) *py.Object +// +// index_select(input, dim, index, *, out=None) -> Tensor +// +// Returns a new tensor which indexes the :attr:`input` tensor along dimension +// :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. +// +// The returned tensor has the same number of dimensions as the original tensor +// (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length +// of :attr:`index`; other dimensions have the same size as in the original tensor. +// +// .. note:: The returned tensor does **not** use the same storage as the original +// tensor. If :attr:`out` has a different shape than expected, we +// silently change it to the correct shape, reallocating the underlying +// storage if necessary. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension in which we index +// index (IntTensor or LongTensor): the 1-D tensor containing the indices to index +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.randn(3, 4) +// >>> x +// tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], +// [-0.4664, 0.2647, -0.1228, -1.1068], +// [-1.1734, -0.6571, 0.7230, -0.6004]]) +// >>> indices = torch.tensor([0, 2]) +// >>> torch.index_select(x, 0, indices) +// tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], +// [-1.1734, -0.6571, 0.7230, -0.6004]]) +// >>> torch.index_select(x, 1, indices) +// tensor([[ 0.1427, -0.5414], +// [-0.4664, -0.1228], +// [-1.1734, 0.7230]]) +// +// +//go:linkname IndexSelect py.index_select +func IndexSelect(input *py.Object, dim *py.Object, index *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.indices`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname IndicesCopy py.indices_copy +func IndicesCopy(__llgo_va_list ...interface{}) *py.Object +// +// inner(input, other, *, out=None) -> Tensor +// +// Computes the dot product for 1D tensors. For higher dimensions, sums the product +// of elements from :attr:`input` and :attr:`other` along their last dimension. +// +// .. note:: +// +// If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent +// to `torch.mul(input, other)`. +// +// If both :attr:`input` and :attr:`other` are non-scalars, the size of their last +// dimension must match and the result is equivalent to `torch.tensordot(input, +// other, dims=([-1], [-1]))` +// +// Args: +// input (Tensor): First input tensor +// other (Tensor): Second input tensor +// +// Keyword args: +// out (Tensor, optional): Optional output tensor to write result into. The output +// shape is `input.shape[:-1] + other.shape[:-1]`. +// +// Example:: +// +// # Dot product +// >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1])) +// tensor(7) +// +// # Multidimensional input tensors +// >>> a = torch.randn(2, 3) +// >>> a +// tensor([[0.8173, 1.0874, 1.1784], +// [0.3279, 0.1234, 2.7894]]) +// >>> b = torch.randn(2, 4, 3) +// >>> b +// tensor([[[-0.4682, -0.7159, 0.1506], +// [ 0.4034, -0.3657, 1.0387], +// [ 0.9892, -0.6684, 0.1774], +// [ 0.9482, 1.3261, 0.3917]], +// +// [[ 0.4537, 0.7493, 1.1724], +// [ 0.2291, 0.5749, -0.2267], +// [-0.7920, 0.3607, -0.3701], +// [ 1.3666, -0.5850, -1.7242]]]) +// >>> torch.inner(a, b) +// tensor([[[-0.9837, 1.1560, 0.2907, 2.6785], +// [ 2.5671, 0.5452, -0.6912, -1.5509]], +// +// [[ 0.1782, 2.9843, 0.7366, 1.5672], +// [ 3.5115, -0.4864, -1.2476, -4.4337]]]) +// +// # Scalar input +// >>> torch.inner(a, torch.tensor(2)) +// tensor([[1.6347, 2.1748, 2.3567], +// [0.6558, 0.2469, 5.5787]]) +// +// +//go:linkname Inner py.inner +func Inner(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname InstanceNorm py.instance_norm +func InstanceNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IntRepr py.int_repr +func IntRepr(__llgo_va_list ...interface{}) *py.Object +// +// inverse(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.linalg.inv` +// +// +//go:linkname Inverse py.inverse +func Inverse(input *py.Object) *py.Object +// +// is_complex(input) -> (bool) +// +// Returns True if the data type of :attr:`input` is a complex data type i.e., +// one of ``torch.complex64``, and ``torch.complex128``. +// +// Args: +// input (Tensor): the input tensor. +// +// +//go:linkname IsComplex py.is_complex +func IsComplex(input *py.Object) *py.Object +// +// is_conj(input) -> (bool) +// +// Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`. +// +// Args: +// input (Tensor): the input tensor. +// +// +//go:linkname IsConj py.is_conj +func IsConj(input *py.Object) *py.Object +// None +// +//go:linkname IsDistributed py.is_distributed +func IsDistributed(__llgo_va_list ...interface{}) *py.Object +// +// is_floating_point(input) -> (bool) +// +// Returns True if the data type of :attr:`input` is a floating point data type i.e., +// one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``. +// +// Args: +// input (Tensor): the input tensor. +// +// +//go:linkname IsFloatingPoint py.is_floating_point +func IsFloatingPoint(input *py.Object) *py.Object +// +// is_inference(input) -> (bool) +// +// Returns True if :attr:`input` is an inference tensor. +// +// A non-view tensor is an inference tensor if and only if it was +// allocated during inference mode. A view tensor is an inference +// tensor if and only if the tensor it is a view of is an inference tensor. +// +// For details on inference mode please see +// `Inference Mode `_. +// +// Args: +// input (Tensor): the input tensor. +// +// +//go:linkname IsInference py.is_inference +func IsInference(input *py.Object) *py.Object +// None +// +//go:linkname IsNeg py.is_neg +func IsNeg(__llgo_va_list ...interface{}) *py.Object +// +// is_nonzero(input) -> (bool) +// +// Returns True if the :attr:`input` is a single element tensor which is not equal to zero +// after type conversions. +// i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or +// ``torch.tensor([False])``. +// Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case +// of sparse tensors). +// +// Args: +// input (Tensor): the input tensor. +// +// Examples:: +// +// >>> torch.is_nonzero(torch.tensor([0.])) +// False +// >>> torch.is_nonzero(torch.tensor([1.5])) +// True +// >>> torch.is_nonzero(torch.tensor([False])) +// False +// >>> torch.is_nonzero(torch.tensor([3])) +// True +// >>> torch.is_nonzero(torch.tensor([1, 3, 5])) +// Traceback (most recent call last): +// ... +// RuntimeError: bool value of Tensor with more than one value is ambiguous +// >>> torch.is_nonzero(torch.tensor([])) +// Traceback (most recent call last): +// ... +// RuntimeError: bool value of Tensor with no values is ambiguous +// +// +//go:linkname IsNonzero py.is_nonzero +func IsNonzero(input *py.Object) *py.Object +// None +// +//go:linkname IsSameSize py.is_same_size +func IsSameSize(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsSigned py.is_signed +func IsSigned(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname IsVulkanAvailable py.is_vulkan_available +func IsVulkanAvailable(__llgo_va_list ...interface{}) *py.Object +// +// isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor +// +// Returns a new tensor with boolean elements representing if each element of +// :attr:`input` is "close" to the corresponding element of :attr:`other`. +// Closeness is defined as: +// +// .. math:: +// \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +// +// +// where :attr:`input` and :attr:`other` are finite. Where :attr:`input` +// and/or :attr:`other` are nonfinite they are close if and only if +// they are equal, with NaNs being considered equal to each other when +// :attr:`equal_nan` is True. +// +// Args: +// input (Tensor): first tensor to compare +// other (Tensor): second tensor to compare +// atol (float, optional): absolute tolerance. Default: 1e-08 +// rtol (float, optional): relative tolerance. Default: 1e-05 +// equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` +// +// Examples:: +// +// >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4))) +// tensor([ True, False, False]) +// >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5) +// tensor([True, True]) +// +// +//go:linkname Isclose py.isclose +func Isclose(input *py.Object, other *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object +// +// isfinite(input) -> Tensor +// +// Returns a new tensor with boolean elements representing if each element is `finite` or not. +// +// Real values are finite when they are not NaN, negative infinity, or infinity. +// Complex values are finite when both their real and imaginary parts are finite. +// +// Args: +// input (Tensor): the input tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is finite and False elsewhere +// +// Example:: +// +// >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) +// tensor([True, False, True, False, False]) +// +// +//go:linkname Isfinite py.isfinite +func Isfinite(input *py.Object) *py.Object +// +// isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor +// +// Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns +// a boolean tensor of the same shape as :attr:`elements` that is True for elements +// in :attr:`test_elements` and False otherwise. +// +// .. note:: +// One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. +// +// Args: +// elements (Tensor or Scalar): Input elements +// test_elements (Tensor or Scalar): Values against which to test for each input element +// assume_unique (bool, optional): If True, assumes both :attr:`elements` and +// :attr:`test_elements` contain unique elements, which can speed up the +// calculation. Default: False +// invert (bool, optional): If True, inverts the boolean return tensor, resulting in True +// values for elements *not* in :attr:`test_elements`. Default: False +// +// Returns: +// A boolean tensor of the same shape as :attr:`elements` that is True for elements in +// :attr:`test_elements` and False otherwise +// +// Example: +// >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) +// tensor([[False, True], +// [ True, False]]) +// +// +//go:linkname Isin py.isin +func Isin(elements *py.Object, testElements *py.Object) *py.Object +// +// isinf(input) -> Tensor +// +// Tests if each element of :attr:`input` is infinite +// (positive or negative infinity) or not. +// +// .. note:: +// Complex values are infinite when their real or imaginary part is +// infinite. +// +// Args: +// input (Tensor): the input tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is infinite and False elsewhere +// +// Example:: +// +// >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) +// tensor([False, True, False, True, False]) +// +// +//go:linkname Isinf py.isinf +func Isinf(input *py.Object) *py.Object +// +// isnan(input) -> Tensor +// +// Returns a new tensor with boolean elements representing if each element of :attr:`input` +// is NaN or not. Complex values are considered NaN when either their real +// and/or imaginary part is NaN. +// +// Arguments: +// input (Tensor): the input tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is NaN and False elsewhere +// +// Example:: +// +// >>> torch.isnan(torch.tensor([1, float('nan'), 2])) +// tensor([False, True, False]) +// +// +//go:linkname Isnan py.isnan +func Isnan(input *py.Object) *py.Object +// +// isneginf(input, *, out=None) -> Tensor +// Tests if each element of :attr:`input` is negative infinity or not. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) +// >>> torch.isneginf(a) +// tensor([ True, False, False]) +// +// +//go:linkname Isneginf py.isneginf +func Isneginf(input *py.Object) *py.Object +// +// isposinf(input, *, out=None) -> Tensor +// Tests if each element of :attr:`input` is positive infinity or not. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) +// >>> torch.isposinf(a) +// tensor([False, True, False]) +// +// +//go:linkname Isposinf py.isposinf +func Isposinf(input *py.Object) *py.Object +// +// isreal(input) -> Tensor +// +// Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not. +// All real-valued types are considered real. Complex values are considered real when their imaginary part is 0. +// +// Arguments: +// input (Tensor): the input tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is real and False elsewhere +// +// Example:: +// +// >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j])) +// tensor([True, False, True]) +// +// +//go:linkname Isreal py.isreal +func Isreal(input *py.Object) *py.Object +// istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, normalized=False, onesided=None, length=None, return_complex=False) -> Tensor: +// +// Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`. +// +// .. warning:: +// From version 2.1, a warning will be provided if a :attr:`window` is +// not specified. In a future release, this attribute will be required. +// Please provide the same window used in the stft call. +// +// It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the +// least squares estimation of the original signal. The algorithm will check using the NOLA condition ( +// nonzero overlap). +// +// Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelop +// created by the summation of all the windows is never zero at certain point in time. Specifically, +// :math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`. +// +// Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame, +// ``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False +// since the signal isn't padded). If `length` is given in the arguments and is longer than expected, +// ``istft`` will pad zeros to the end of the returned signal. +// +// If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc. +// Left padding can be trimmed off exactly because they can be calculated but right padding cannot be +// calculated without additional information. +// +// Example: Suppose the last window is: +// ``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]`` +// +// The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation +// of right padding. These additional values could be zeros or a reflection of the signal so providing +// :attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed +// (some loss of signal). +// +// [1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform," +// IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984. +// +// Args: +// input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`, +// output. That is a complex tensor of shape `(B?, N, T)` where +// +// - `B?` is an optional batch dimension +// - `N` is the number of frequency samples, `(n_fft // 2) + 1` +// for onesided input, or otherwise `n_fft`. +// - `T` is the number of frames, `1 + length // hop_length` for centered stft, +// or `1 + (length - n_fft) // hop_length` otherwise. +// +// .. versionchanged:: 2.0 +// Real datatype inputs are no longer supported. Input must now have a +// complex datatype, as returned by ``stft(..., return_complex=True)``. +// n_fft (int): Size of Fourier transform +// hop_length (Optional[int]): The distance between neighboring sliding window frames. +// (Default: ``n_fft // 4``) +// win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``) +// window (Optional[torch.Tensor]): The optional window function. +// Shape must be 1d and `<= n_fft` +// (Default: ``torch.ones(win_length)``) +// center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is +// centered at time :math:`t \times \text{hop\_length}`. +// (Default: ``True``) +// normalized (bool): Whether the STFT was normalized. (Default: ``False``) +// onesided (Optional[bool]): Whether the STFT was onesided. +// (Default: ``True`` if `n_fft != fft_size` in the input size) +// length (Optional[int]): The amount to trim the signal by (i.e. the +// original signal length). Defaults to `(T - 1) * hop_length` for +// centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T` +// is the number of input frames. +// return_complex (Optional[bool]): +// Whether the output should be complex, or if the input should be +// assumed to derive from a real signal and window. +// Note that this is incompatible with ``onesided=True``. +// (Default: ``False``) +// +// Returns: +// Tensor: Least squares estimation of the original signal of shape `(B?, length)` where +// `B?` is an optional batch dimension from the input tensor. +// +// +//go:linkname Istft py.istft +func Istft(input *py.Object, nFft *py.Object, hopLength *py.Object, winLength *py.Object, window *py.Object, center *py.Object, normalized *py.Object, onesided *py.Object, length *py.Object, returnComplex *py.Object) *py.Object +// +// kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. +// +// Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and +// ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, +// where ``L`` is the :attr:`window_length`. This function computes: +// +// .. math:: +// out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) +// +// Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling +// ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. +// The :attr:`periodic` argument is intended as a helpful shorthand +// to produce a periodic window as input to functions like :func:`torch.stft`. +// +// .. note:: +// If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. +// +// +// Args: +// window_length (int): length of the window. +// periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. +// If False, returns a symmetric window suitable for use in filter design. +// beta (float, optional): shape parameter for the window. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only +// ``torch.strided`` (dense layout) is supported. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// +// +//go:linkname KaiserWindow py.kaiser_window +func KaiserWindow(windowLength *py.Object, periodic *py.Object, beta *py.Object) *py.Object +// None +// +//go:linkname KlDiv py.kl_div +func KlDiv(__llgo_va_list ...interface{}) *py.Object +// +// kron(input, other, *, out=None) -> Tensor +// +// Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`. +// +// If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a +// :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a +// :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries: +// +// .. math:: +// (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} = +// \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n}, +// +// where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`. +// If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions. +// +// Supports real-valued and complex-valued inputs. +// +// .. note:: +// This function generalizes the typical definition of the Kronecker product for two matrices to two tensors, +// as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a +// :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix: +// +// .. math:: +// \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix} +// a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\ +// \vdots & \ddots & \vdots \\ +// a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix} +// +// where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`. +// +// Arguments: +// input (Tensor) +// other (Tensor) +// +// Keyword args: +// out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None`` +// +// Examples:: +// +// >>> mat1 = torch.eye(2) +// >>> mat2 = torch.ones(2, 2) +// >>> torch.kron(mat1, mat2) +// tensor([[1., 1., 0., 0.], +// [1., 1., 0., 0.], +// [0., 0., 1., 1.], +// [0., 0., 1., 1.]]) +// +// >>> mat1 = torch.eye(2) +// >>> mat2 = torch.arange(1, 5).reshape(2, 2) +// >>> torch.kron(mat1, mat2) +// tensor([[1., 2., 0., 0.], +// [3., 4., 0., 0.], +// [0., 0., 1., 2.], +// [0., 0., 3., 4.]]) +// +// +//go:linkname Kron py.kron +func Kron(input *py.Object, other *py.Object) *py.Object +// +// kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// +// Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th +// smallest element of each row of the :attr:`input` tensor in the given dimension +// :attr:`dim`. And ``indices`` is the index location of each element found. +// +// If :attr:`dim` is not given, the last dimension of the `input` is chosen. +// +// If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors +// are the same size as :attr:`input`, except in the dimension :attr:`dim` where +// they are of size 1. Otherwise, :attr:`dim` is squeezed +// (see :func:`torch.squeeze`), resulting in both the :attr:`values` and +// :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. +// +// .. note:: +// When :attr:`input` is a CUDA tensor and there are multiple valid +// :attr:`k` th values, this function may nondeterministically return +// :attr:`indices` for any of them. +// +// Args: +// input (Tensor): the input tensor. +// k (int): k for the k-th smallest element +// dim (int, optional): the dimension to find the kth value along +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (tuple, optional): the output tuple of (Tensor, LongTensor) +// can be optionally given to be used as output buffers +// +// Example:: +// +// >>> x = torch.arange(1., 6.) +// >>> x +// tensor([ 1., 2., 3., 4., 5.]) +// >>> torch.kthvalue(x, 4) +// torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) +// +// >>> x=torch.arange(1.,7.).resize_(2,3) +// >>> x +// tensor([[ 1., 2., 3.], +// [ 4., 5., 6.]]) +// >>> torch.kthvalue(x, 2, 0, True) +// torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) +// +// +//go:linkname Kthvalue py.kthvalue +func Kthvalue(input *py.Object, k *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// None +// +//go:linkname LayerNorm py.layer_norm +func LayerNorm(__llgo_va_list ...interface{}) *py.Object +// +// lcm(input, other, *, out=None) -> Tensor +// +// Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`. +// +// Both :attr:`input` and :attr:`other` must have integer types. +// +// .. note:: +// This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([5, 10, 15]) +// >>> b = torch.tensor([3, 4, 5]) +// >>> torch.lcm(a, b) +// tensor([15, 20, 15]) +// >>> c = torch.tensor([3]) +// >>> torch.lcm(a, c) +// tensor([15, 30, 15]) +// +// +//go:linkname Lcm py.lcm +func Lcm(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Lcm_ py.lcm_ +func Lcm_(__llgo_va_list ...interface{}) *py.Object +// +// ldexp(input, other, *, out=None) -> Tensor +// +// Multiplies :attr:`input` by 2 ** :attr:`other`. +// +// .. math:: +// \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i +// +// +// Typically this function is used to construct floating point numbers by multiplying +// mantissas in :attr:`input` with integral powers of two created from the exponents +// in :attr:`other`. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): a tensor of exponents, typically integers. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1])) +// tensor([2.]) +// >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) +// tensor([ 2., 4., 8., 16.]) +// +// +// +// +//go:linkname Ldexp py.ldexp +func Ldexp(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Ldexp_ py.ldexp_ +func Ldexp_(__llgo_va_list ...interface{}) *py.Object +// +// le(input, other, *, out=None) -> Tensor +// +// Computes :math:`\text{input} \leq \text{other}` element-wise. +// +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or Scalar): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is less than or equal to +// :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[True, False], [True, True]]) +// +// +//go:linkname Le py.le +func Le(input *py.Object, other *py.Object) *py.Object +// +// lerp(input, end, weight, *, out=None) +// +// Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based +// on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. +// +// .. math:: +// \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) +// +// The shapes of :attr:`start` and :attr:`end` must be +// :ref:`broadcastable `. If :attr:`weight` is a tensor, then +// the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. +// +// Args: +// input (Tensor): the tensor with the starting points +// end (Tensor): the tensor with the ending points +// weight (float or tensor): the weight for the interpolation formula +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> start = torch.arange(1., 5.) +// >>> end = torch.empty(4).fill_(10) +// >>> start +// tensor([ 1., 2., 3., 4.]) +// >>> end +// tensor([ 10., 10., 10., 10.]) +// >>> torch.lerp(start, end, 0.5) +// tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) +// >>> torch.lerp(start, end, torch.full_like(start, 0.5)) +// tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) +// +// +//go:linkname Lerp py.lerp +func Lerp(input *py.Object, end *py.Object, weight *py.Object) *py.Object +// +// less(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.lt`. +// +// +//go:linkname Less py.less +func Less(input *py.Object, other *py.Object) *py.Object +// +// less_equal(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.le`. +// +// +//go:linkname LessEqual py.less_equal +func LessEqual(input *py.Object, other *py.Object) *py.Object +// +// lgamma(input, *, out=None) -> Tensor +// +// Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \ln |\Gamma(\text{input}_{i})| +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.arange(0.5, 2, 0.5) +// >>> torch.lgamma(a) +// tensor([ 0.5724, 0.0000, -0.1208]) +// +// +//go:linkname Lgamma py.lgamma +func Lgamma(input *py.Object) *py.Object +// +// linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +// spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: +// +// .. math:: +// (\text{start}, +// \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, +// \ldots, +// \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, +// \text{end}) +// +// +// From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. +// +// Args: +// start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional +// end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional +// steps (int): size of the constructed tensor +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// dtype (torch.dtype, optional): the data type to perform the computation in. +// Default: if None, uses the global default dtype (see torch.get_default_dtype()) +// when both :attr:`start` and :attr:`end` are real, +// and corresponding complex dtype when either is complex. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// +// Example:: +// +// >>> torch.linspace(3, 10, steps=5) +// tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) +// >>> torch.linspace(-10, 10, steps=5) +// tensor([-10., -5., 0., 5., 10.]) +// >>> torch.linspace(start=-10, end=10, steps=5) +// tensor([-10., -5., 0., 5., 10.]) +// >>> torch.linspace(start=-10, end=10, steps=1) +// tensor([-10.]) +// +// +//go:linkname Linspace py.linspace +func Linspace(start *py.Object, end *py.Object, steps *py.Object) *py.Object +// +// log(input, *, out=None) -> Tensor +// +// Returns a new tensor with the natural logarithm of the elements +// of :attr:`input`. +// +// .. math:: +// y_{i} = \log_{e} (x_{i}) +// +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.rand(5) * 5 +// >>> a +// tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739]) +// >>> torch.log(a) +// tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204]) +// +// +//go:linkname Log py.log +func Log(input *py.Object) *py.Object +// +// log10(input, *, out=None) -> Tensor +// +// Returns a new tensor with the logarithm to the base 10 of the elements +// of :attr:`input`. +// +// .. math:: +// y_{i} = \log_{10} (x_{i}) +// +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.rand(5) +// >>> a +// tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) +// +// +// >>> torch.log10(a) +// tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) +// +// +// +//go:linkname Log10 py.log10 +func Log10(input *py.Object) *py.Object +// None +// +//go:linkname Log10_ py.log10_ +func Log10_(__llgo_va_list ...interface{}) *py.Object +// +// log1p(input, *, out=None) -> Tensor +// +// Returns a new tensor with the natural logarithm of (1 + :attr:`input`). +// +// .. math:: +// y_i = \log_{e} (x_i + 1) +// +// .. note:: This function is more accurate than :func:`torch.log` for small +// values of :attr:`input` +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(5) +// >>> a +// tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) +// >>> torch.log1p(a) +// tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) +// +// +//go:linkname Log1p py.log1p +func Log1p(input *py.Object) *py.Object +// None +// +//go:linkname Log1p_ py.log1p_ +func Log1p_(__llgo_va_list ...interface{}) *py.Object +// +// log2(input, *, out=None) -> Tensor +// +// Returns a new tensor with the logarithm to the base 2 of the elements +// of :attr:`input`. +// +// .. math:: +// y_{i} = \log_{2} (x_{i}) +// +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.rand(5) +// >>> a +// tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) +// +// +// >>> torch.log2(a) +// tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) +// +// +// +//go:linkname Log2 py.log2 +func Log2(input *py.Object) *py.Object +// None +// +//go:linkname Log2_ py.log2_ +func Log2_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Log_ py.log_ +func Log_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname LogSoftmax py.log_softmax +func LogSoftmax(__llgo_va_list ...interface{}) *py.Object +// +// logaddexp(input, other, *, out=None) -> Tensor +// +// Logarithm of the sum of exponentiations of the inputs. +// +// Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful +// in statistics where the calculated probabilities of events may be so small as to +// exceed the range of normal floating point numbers. In such cases the logarithm +// of the calculated probability is stored. This function allows adding +// probabilities stored in such a fashion. +// +// This op should be disambiguated with :func:`torch.logsumexp` which performs a +// reduction on a single tensor. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3])) +// tensor([-0.3069, -0.6867, -0.8731]) +// >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3])) +// tensor([-1., -2., -3.]) +// >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3])) +// tensor([1.1269e+00, 2.0000e+03, 3.0000e+04]) +// +// +//go:linkname Logaddexp py.logaddexp +func Logaddexp(input *py.Object, other *py.Object) *py.Object +// +// logaddexp2(input, other, *, out=None) -> Tensor +// +// Logarithm of the sum of exponentiations of the inputs in base-2. +// +// Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See +// :func:`torch.logaddexp` for more details. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// +//go:linkname Logaddexp2 py.logaddexp2 +func Logaddexp2(input *py.Object, other *py.Object) *py.Object +// +// logcumsumexp(input, dim, *, out=None) -> Tensor +// Returns the logarithm of the cumulative summation of the exponentiation of +// elements of :attr:`input` in the dimension :attr:`dim`. +// +// For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is +// +// .. math:: +// \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij}) +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to do the operation over +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(10) +// >>> torch.logcumsumexp(a, dim=0) +// tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, +// 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) +// +// +//go:linkname Logcumsumexp py.logcumsumexp +func Logcumsumexp(input *py.Object, dim *py.Object) *py.Object +// +// logdet(input) -> Tensor +// +// Calculates log determinant of a square matrix or batches of square matrices. +// +// It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has +// a negative determinant. +// +// .. note:: +// Backward through :meth:`logdet` internally uses SVD results when :attr:`input` +// is not invertible. In this case, double backward through :meth:`logdet` will +// be unstable in when :attr:`input` doesn't have distinct singular values. See +// :func:`torch.linalg.svd` for details. +// +// .. seealso:: +// +// :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the +// absolute value of the determinant of real-valued (resp. complex) square matrices. +// +// Arguments: +// input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more +// batch dimensions. +// +// Example:: +// +// >>> A = torch.randn(3, 3) +// >>> torch.det(A) +// tensor(0.2611) +// >>> torch.logdet(A) +// tensor(-1.3430) +// >>> A +// tensor([[[ 0.9254, -0.6213], +// [-0.5787, 1.6843]], +// +// [[ 0.3242, -0.9665], +// [ 0.4539, -0.0887]], +// +// [[ 1.1336, -0.4025], +// [-0.7089, 0.9032]]]) +// >>> A.det() +// tensor([1.1990, 0.4099, 0.7386]) +// >>> A.det().log() +// tensor([ 0.1815, -0.8917, -0.3031]) +// +// +//go:linkname Logdet py.logdet +func Logdet(input *py.Object) *py.Object +// +// logical_and(input, other, *, out=None) -> Tensor +// +// Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are +// treated as ``True``. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the tensor to compute AND with +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False])) +// tensor([ True, False, False]) +// >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) +// >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) +// >>> torch.logical_and(a, b) +// tensor([False, False, True, False]) +// >>> torch.logical_and(a.double(), b.double()) +// tensor([False, False, True, False]) +// >>> torch.logical_and(a.double(), b) +// tensor([False, False, True, False]) +// >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool)) +// tensor([False, False, True, False]) +// +// +//go:linkname LogicalAnd py.logical_and +func LogicalAnd(input *py.Object, other *py.Object) *py.Object +// +// logical_not(input, *, out=None) -> Tensor +// +// Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool +// dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.logical_not(torch.tensor([True, False])) +// tensor([False, True]) +// >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) +// tensor([ True, False, False]) +// >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) +// tensor([ True, False, False]) +// >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) +// tensor([1, 0, 0], dtype=torch.int16) +// +// +//go:linkname LogicalNot py.logical_not +func LogicalNot(input *py.Object) *py.Object +// +// logical_or(input, other, *, out=None) -> Tensor +// +// Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +// treated as ``True``. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the tensor to compute OR with +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) +// tensor([ True, False, True]) +// >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) +// >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) +// >>> torch.logical_or(a, b) +// tensor([ True, True, True, False]) +// >>> torch.logical_or(a.double(), b.double()) +// tensor([ True, True, True, False]) +// >>> torch.logical_or(a.double(), b) +// tensor([ True, True, True, False]) +// >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool)) +// tensor([ True, True, True, False]) +// +// +//go:linkname LogicalOr py.logical_or +func LogicalOr(input *py.Object, other *py.Object) *py.Object +// +// logical_xor(input, other, *, out=None) -> Tensor +// +// Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +// treated as ``True``. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the tensor to compute XOR with +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False])) +// tensor([False, False, True]) +// >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) +// >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) +// >>> torch.logical_xor(a, b) +// tensor([ True, True, False, False]) +// >>> torch.logical_xor(a.double(), b.double()) +// tensor([ True, True, False, False]) +// >>> torch.logical_xor(a.double(), b) +// tensor([ True, True, False, False]) +// >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool)) +// tensor([ True, True, False, False]) +// +// +//go:linkname LogicalXor py.logical_xor +func LogicalXor(input *py.Object, other *py.Object) *py.Object +// +// logit(input, eps=None, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.logit`. +// +// +//go:linkname Logit py.logit +func Logit(input *py.Object, eps *py.Object) *py.Object +// None +// +//go:linkname Logit_ py.logit_ +func Logit_(__llgo_va_list ...interface{}) *py.Object +// +// logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// +// Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +// spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to +// :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale +// with base :attr:`base`. That is, the values are: +// +// .. math:: +// (\text{base}^{\text{start}}, +// \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, +// \ldots, +// \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, +// \text{base}^{\text{end}}) +// +// +// +// From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. +// +// Args: +// start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional +// end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional +// steps (int): size of the constructed tensor +// base (float, optional): base of the logarithm function. Default: ``10.0``. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// dtype (torch.dtype, optional): the data type to perform the computation in. +// Default: if None, uses the global default dtype (see torch.get_default_dtype()) +// when both :attr:`start` and :attr:`end` are real, +// and corresponding complex dtype when either is complex. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.logspace(start=-10, end=10, steps=5) +// tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) +// >>> torch.logspace(start=0.1, end=1.0, steps=5) +// tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) +// >>> torch.logspace(start=0.1, end=1.0, steps=1) +// tensor([1.2589]) +// >>> torch.logspace(start=2, end=2, steps=1, base=2) +// tensor([4.0]) +// +// +//go:linkname Logspace py.logspace +func Logspace(start *py.Object, end *py.Object, steps *py.Object, base *py.Object) *py.Object +// +// logsumexp(input, dim, keepdim=False, *, out=None) +// +// Returns the log of summed exponentials of each row of the :attr:`input` +// tensor in the given dimension :attr:`dim`. The computation is numerically +// stabilized. +// +// For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is +// +// .. math:: +// \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(3, 3) +// >>> torch.logsumexp(a, 1) +// tensor([1.4907, 1.0593, 1.5696]) +// >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) +// tensor(1.6859e-07) +// +// +//go:linkname Logsumexp py.logsumexp +func Logsumexp(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// None +// +//go:linkname Lstm py.lstm +func Lstm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname LstmCell py.lstm_cell +func LstmCell(__llgo_va_list ...interface{}) *py.Object +// +// lt(input, other, *, out=None) -> Tensor +// +// Computes :math:`\text{input} < \text{other}` element-wise. +// +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or float): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[False, False], [True, False]]) +// +// +//go:linkname Lt py.lt +func Lt(input *py.Object, other *py.Object) *py.Object +// +// lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor +// +// Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted +// LU factorization of A from :func:`~linalg.lu_factor`. +// +// This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`. +// +// .. warning:: +// +// :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`. +// :func:`torch.lu_solve` will be removed in a future PyTorch release. +// ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with +// +// .. code:: python +// +// X = linalg.lu_solve(LU, pivots, B) +// +// Arguments: +// b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*` +// is zero or more batch dimensions. +// LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`, +// where :math:`*` is zero or more batch dimensions. +// LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`, +// where :math:`*` is zero or more batch dimensions. +// The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of +// :attr:`LU_data`. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> A = torch.randn(2, 3, 3) +// >>> b = torch.randn(2, 3, 1) +// >>> LU, pivots = torch.linalg.lu_factor(A) +// >>> x = torch.lu_solve(b, LU, pivots) +// >>> torch.dist(A @ x, b) +// tensor(1.00000e-07 * +// 2.8312) +// +// +//go:linkname LuSolve py.lu_solve +func LuSolve(b *py.Object, LUData *py.Object, LUPivots *py.Object) *py.Object +// +// lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor) +// +// Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices. +// +// .. seealso:: +// +// :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient +// than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`. +// +// Args: +// LU_data (Tensor): the packed LU factorization data +// LU_pivots (Tensor): the packed LU factorization pivots +// unpack_data (bool): flag indicating if the data should be unpacked. +// If ``False``, then the returned ``L`` and ``U`` are empty tensors. +// Default: ``True`` +// unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``. +// If ``False``, then the returned ``P`` is an empty tensor. +// Default: ``True`` +// +// Keyword args: +// out (tuple, optional): output tuple of three tensors. Ignored if `None`. +// +// Returns: +// A namedtuple ``(P, L, U)`` +// +// Examples:: +// +// >>> A = torch.randn(2, 3, 3) +// >>> LU, pivots = torch.linalg.lu_factor(A) +// >>> P, L, U = torch.lu_unpack(LU, pivots) +// >>> # We can recover A from the factorization +// >>> A_ = P @ L @ U +// >>> torch.allclose(A, A_) +// True +// +// >>> # LU factorization of a rectangular matrix: +// >>> A = torch.randn(2, 3, 2) +// >>> LU, pivots = torch.linalg.lu_factor(A) +// >>> P, L, U = torch.lu_unpack(LU, pivots) +// >>> # P, L, U are the same as returned by linalg.lu +// >>> P_, L_, U_ = torch.linalg.lu(A) +// >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_) +// True +// +// +// +//go:linkname LuUnpack py.lu_unpack +func LuUnpack(LUData *py.Object, LUPivots *py.Object, unpackData *py.Object, unpackPivots *py.Object) *py.Object +// None +// +//go:linkname MarginRankingLoss py.margin_ranking_loss +func MarginRankingLoss(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MaskedFill py.masked_fill +func MaskedFill(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MaskedScatter py.masked_scatter +func MaskedScatter(__llgo_va_list ...interface{}) *py.Object +// +// masked_select(input, mask, *, out=None) -> Tensor +// +// Returns a new 1-D tensor which indexes the :attr:`input` tensor according to +// the boolean mask :attr:`mask` which is a `BoolTensor`. +// +// The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need +// to match, but they must be :ref:`broadcastable `. +// +// .. note:: The returned tensor does **not** use the same storage +// as the original tensor +// +// Args: +// input (Tensor): the input tensor. +// mask (BoolTensor): the tensor containing the binary mask to index with +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.randn(3, 4) +// >>> x +// tensor([[ 0.3552, -2.3825, -0.8297, 0.3477], +// [-1.2035, 1.2252, 0.5002, 0.6248], +// [ 0.1307, -2.0608, 0.1244, 2.0139]]) +// >>> mask = x.ge(0.5) +// >>> mask +// tensor([[False, False, False, False], +// [False, True, True, True], +// [False, False, False, True]]) +// >>> torch.masked_select(x, mask) +// tensor([ 1.2252, 0.5002, 0.6248, 2.0139]) +// +// +//go:linkname MaskedSelect py.masked_select +func MaskedSelect(input *py.Object, mask *py.Object) *py.Object +// +// matmul(input, other, *, out=None) -> Tensor +// +// Matrix product of two tensors. +// +// The behavior depends on the dimensionality of the tensors as follows: +// +// - If both tensors are 1-dimensional, the dot product (scalar) is returned. +// - If both arguments are 2-dimensional, the matrix-matrix product is returned. +// - If the first argument is 1-dimensional and the second argument is 2-dimensional, +// a 1 is prepended to its dimension for the purpose of the matrix multiply. +// After the matrix multiply, the prepended dimension is removed. +// - If the first argument is 2-dimensional and the second argument is 1-dimensional, +// the matrix-vector product is returned. +// - If both arguments are at least 1-dimensional and at least one argument is +// N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first +// argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the +// batched matrix multiply and removed after. If the second argument is 1-dimensional, a +// 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. +// The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus +// must be broadcastable). For example, if :attr:`input` is a +// :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)` +// tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor. +// +// Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs +// are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a +// :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` +// tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the +// matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor. +// +// This operation has support for arguments with :ref:`sparse layouts`. In particular the +// matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions +// as :func:`torch.mm` +// +// +// .. warning:: +// Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, +// or may not have autograd support. If you notice missing functionality please +// open a feature request. +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// .. note:: +// +// The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. +// +// Arguments: +// input (Tensor): the first tensor to be multiplied +// other (Tensor): the second tensor to be multiplied +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> # vector x vector +// >>> tensor1 = torch.randn(3) +// >>> tensor2 = torch.randn(3) +// >>> torch.matmul(tensor1, tensor2).size() +// torch.Size([]) +// >>> # matrix x vector +// >>> tensor1 = torch.randn(3, 4) +// >>> tensor2 = torch.randn(4) +// >>> torch.matmul(tensor1, tensor2).size() +// torch.Size([3]) +// >>> # batched matrix x broadcasted vector +// >>> tensor1 = torch.randn(10, 3, 4) +// >>> tensor2 = torch.randn(4) +// >>> torch.matmul(tensor1, tensor2).size() +// torch.Size([10, 3]) +// >>> # batched matrix x batched matrix +// >>> tensor1 = torch.randn(10, 3, 4) +// >>> tensor2 = torch.randn(10, 4, 5) +// >>> torch.matmul(tensor1, tensor2).size() +// torch.Size([10, 3, 5]) +// >>> # batched matrix x broadcasted matrix +// >>> tensor1 = torch.randn(10, 3, 4) +// >>> tensor2 = torch.randn(4, 5) +// >>> torch.matmul(tensor1, tensor2).size() +// torch.Size([10, 3, 5]) +// +// +// +//go:linkname Matmul py.matmul +func Matmul(input *py.Object, other *py.Object) *py.Object +// +// matrix_exp(A) -> Tensor +// +// Alias for :func:`torch.linalg.matrix_exp`. +// +// +//go:linkname MatrixExp py.matrix_exp +func MatrixExp(A *py.Object) *py.Object +// +// matrix_power(input, n, *, out=None) -> Tensor +// +// Alias for :func:`torch.linalg.matrix_power` +// +// +//go:linkname MatrixPower py.matrix_power +func MatrixPower(input *py.Object, n *py.Object) *py.Object +// +// max(input) -> Tensor +// +// Returns the maximum value of all elements in the ``input`` tensor. +// +// .. warning:: +// This function produces deterministic (sub)gradients unlike ``max(dim=0)`` +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[ 0.6763, 0.7445, -2.2369]]) +// >>> torch.max(a) +// tensor(0.7445) +// +// .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// :noindex: +// +// Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum +// value of each row of the :attr:`input` tensor in the given dimension +// :attr:`dim`. And ``indices`` is the index location of each maximum value found +// (argmax). +// +// If ``keepdim`` is ``True``, the output tensors are of the same size +// as ``input`` except in the dimension ``dim`` where they are of size 1. +// Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting +// in the output tensors having 1 fewer dimension than ``input``. +// +// .. note:: If there are multiple maximal values in a reduced row then +// the indices of the first maximal value are returned. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. +// +// Keyword args: +// out (tuple, optional): the result tuple of two output tensors (max, max_indices) +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[-1.2360, -0.2942, -0.1222, 0.8475], +// [ 1.1949, -1.1127, -2.2379, -0.6702], +// [ 1.5717, -0.9207, 0.1297, -1.8768], +// [-0.6172, 1.0036, -0.6060, -0.2432]]) +// >>> torch.max(a, 1) +// torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) +// +// .. function:: max(input, other, *, out=None) -> Tensor +// :noindex: +// +// See :func:`torch.maximum`. +// +// +// +//go:linkname Max py.max +func Max(input *py.Object) *py.Object +// None +// +//go:linkname MaxPool1d py.max_pool1d +func MaxPool1d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MaxPool1dWithIndices py.max_pool1d_with_indices +func MaxPool1dWithIndices(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MaxPool2d py.max_pool2d +func MaxPool2d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MaxPool3d py.max_pool3d +func MaxPool3d(__llgo_va_list ...interface{}) *py.Object +// +// maximum(input, other, *, out=None) -> Tensor +// +// Computes the element-wise maximum of :attr:`input` and :attr:`other`. +// +// .. note:: +// If one of the elements being compared is a NaN, then that element is returned. +// :func:`maximum` is not supported for tensors with complex dtypes. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor((1, 2, -1)) +// >>> b = torch.tensor((3, 0, 4)) +// >>> torch.maximum(a, b) +// tensor([3, 2, 4]) +// +// +//go:linkname Maximum py.maximum +func Maximum(input *py.Object, other *py.Object) *py.Object +// +// mean(input, *, dtype=None) -> Tensor +// +// Returns the mean value of all elements in the :attr:`input` tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[ 0.2294, -0.5481, 1.3288]]) +// >>> torch.mean(a) +// tensor(0.3367) +// +// .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor +// :noindex: +// +// Returns the mean value of each row of the :attr:`input` tensor in the given +// dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +// reduce over all of them. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// out (Tensor, optional): the output tensor. +// +// .. seealso:: +// +// :func:`torch.nanmean` computes the mean value of `non-NaN` elements. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[-0.3841, 0.6320, 0.4254, -0.7384], +// [-0.9644, 1.0131, -0.6549, -1.4279], +// [-0.2951, -1.3350, -0.7694, 0.5600], +// [ 1.0842, -0.9580, 0.3623, 0.2343]]) +// >>> torch.mean(a, 1) +// tensor([-0.0163, -0.5085, -0.4599, 0.1807]) +// >>> torch.mean(a, 1, True) +// tensor([[-0.0163], +// [-0.5085], +// [-0.4599], +// [ 0.1807]]) +// +// +//go:linkname Mean py.mean +func Mean(input *py.Object) *py.Object +// +// median(input) -> Tensor +// +// Returns the median of the values in :attr:`input`. +// +// .. note:: +// The median is not unique for :attr:`input` tensors with an even number +// of elements. In this case the lower of the two medians is returned. To +// compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. +// +// .. warning:: +// This function produces deterministic (sub)gradients unlike ``median(dim=0)`` +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[ 1.5219, -1.5212, 0.2202]]) +// >>> torch.median(a) +// tensor(0.2202) +// +// .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// :noindex: +// +// Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +// in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. +// +// By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. +// +// If :attr:`keepdim` is ``True``, the output tensors are of the same size +// as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +// the outputs tensor having 1 fewer dimension than :attr:`input`. +// +// .. note:: +// The median is not unique for :attr:`input` tensors with an even number +// of elements in the dimension :attr:`dim`. In this case the lower of the +// two medians is returned. To compute the mean of both medians in +// :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. +// +// .. warning:: +// ``indices`` does not necessarily contain the first occurrence of each +// median value found, unless it is unique. +// The exact implementation details are device-specific. +// Do not expect the same result when run on CPU and GPU in general. +// For the same reason do not expect the gradients to be deterministic. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second +// tensor, which must have dtype long, with their indices in the dimension +// :attr:`dim` of :attr:`input`. +// +// Example:: +// +// >>> a = torch.randn(4, 5) +// >>> a +// tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], +// [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], +// [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], +// [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) +// >>> torch.median(a, 1) +// torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) +// +// +//go:linkname Median py.median +func Median(input *py.Object) *py.Object +// Creates grids of coordinates specified by the 1D inputs in `attr`:tensors. +// +// This is helpful when you want to visualize data over some +// range of inputs. See below for a plotting example. +// +// Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as +// inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`, +// this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots +// G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where +// the output :math:`G_i` is constructed by expanding :math:`T_i` +// to the result shape. +// +// .. note:: +// 0D inputs are treated equivalently to 1D inputs of a +// single element. +// +// .. warning:: +// `torch.meshgrid(*tensors)` currently has the same behavior +// as calling `numpy.meshgrid(*arrays, indexing='ij')`. +// +// In the future `torch.meshgrid` will transition to +// `indexing='xy'` as the default. +// +// https://github.com/pytorch/pytorch/issues/50276 tracks +// this issue with the goal of migrating to NumPy's behavior. +// +// .. seealso:: +// +// :func:`torch.cartesian_prod` has the same effect but it +// collects the data in a tensor of vectors. +// +// Args: +// tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be +// treated as tensors of size :math:`(1,)` automatically +// +// indexing: (str, optional): the indexing mode, either "xy" +// or "ij", defaults to "ij". See warning for future changes. +// +// If "xy" is selected, the first dimension corresponds +// to the cardinality of the second input and the second +// dimension corresponds to the cardinality of the first +// input. +// +// If "ij" is selected, the dimensions are in the same +// order as the cardinality of the inputs. +// +// Returns: +// seq (sequence of Tensors): If the input has :math:`N` +// tensors of size :math:`S_0 \ldots S_{N-1}``, then the +// output will also have :math:`N` tensors, where each tensor +// is of shape :math:`(S_0, ..., S_{N-1})`. +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3]) +// >>> y = torch.tensor([4, 5, 6]) +// +// Observe the element-wise pairings across the grid, (1, 4), +// (1, 5), ..., (3, 6). This is the same thing as the +// cartesian product. +// >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij') +// >>> grid_x +// tensor([[1, 1, 1], +// [2, 2, 2], +// [3, 3, 3]]) +// >>> grid_y +// tensor([[4, 5, 6], +// [4, 5, 6], +// [4, 5, 6]]) +// +// This correspondence can be seen when these grids are +// stacked properly. +// >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))), +// ... torch.cartesian_prod(x, y)) +// True +// +// `torch.meshgrid` is commonly used to produce a grid for +// plotting. +// >>> # xdoctest: +REQUIRES(module:matplotlib) +// >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW) +// >>> import matplotlib.pyplot as plt +// >>> xs = torch.linspace(-5, 5, steps=100) +// >>> ys = torch.linspace(-5, 5, steps=100) +// >>> x, y = torch.meshgrid(xs, ys, indexing='xy') +// >>> z = torch.sin(torch.sqrt(x * x + y * y)) +// >>> ax = plt.axes(projection='3d') +// >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy()) +// >>> plt.show() +// +// .. image:: ../_static/img/meshgrid.png +// :width: 512 +// +// +// +//go:linkname Meshgrid py.meshgrid +func Meshgrid(__llgo_va_list ...interface{}) *py.Object +// +// min(input) -> Tensor +// +// Returns the minimum value of all elements in the :attr:`input` tensor. +// +// .. warning:: +// This function produces deterministic (sub)gradients unlike ``min(dim=0)`` +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[ 0.6750, 1.0857, 1.7197]]) +// >>> torch.min(a) +// tensor(0.6750) +// +// .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// :noindex: +// +// Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum +// value of each row of the :attr:`input` tensor in the given dimension +// :attr:`dim`. And ``indices`` is the index location of each minimum value found +// (argmin). +// +// If :attr:`keepdim` is ``True``, the output tensors are of the same size as +// :attr:`input` except in the dimension :attr:`dim` where they are of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +// the output tensors having 1 fewer dimension than :attr:`input`. +// +// .. note:: If there are multiple minimal values in a reduced row then +// the indices of the first minimal value are returned. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (tuple, optional): the tuple of two output tensors (min, min_indices) +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[-0.6248, 1.1334, -1.1899, -0.2803], +// [-1.4644, -0.2635, -0.3651, 0.6134], +// [ 0.2457, 0.0384, 1.0128, 0.7015], +// [-0.1153, 2.9849, 2.1458, 0.5788]]) +// >>> torch.min(a, 1) +// torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) +// +// .. function:: min(input, other, *, out=None) -> Tensor +// :noindex: +// +// See :func:`torch.minimum`. +// +// +//go:linkname Min py.min +func Min(input *py.Object) *py.Object +// +// minimum(input, other, *, out=None) -> Tensor +// +// Computes the element-wise minimum of :attr:`input` and :attr:`other`. +// +// .. note:: +// If one of the elements being compared is a NaN, then that element is returned. +// :func:`minimum` is not supported for tensors with complex dtypes. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor((1, 2, -1)) +// >>> b = torch.tensor((3, 0, 4)) +// >>> torch.minimum(a, b) +// tensor([1, 0, -1]) +// +// +//go:linkname Minimum py.minimum +func Minimum(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname MiopenBatchNorm py.miopen_batch_norm +func MiopenBatchNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenConvolution py.miopen_convolution +func MiopenConvolution(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenConvolutionAddRelu py.miopen_convolution_add_relu +func MiopenConvolutionAddRelu(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenConvolutionRelu py.miopen_convolution_relu +func MiopenConvolutionRelu(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenConvolutionTranspose py.miopen_convolution_transpose +func MiopenConvolutionTranspose(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenDepthwiseConvolution py.miopen_depthwise_convolution +func MiopenDepthwiseConvolution(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MiopenRnn py.miopen_rnn +func MiopenRnn(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnAdaptiveAvgPool2d py.mkldnn_adaptive_avg_pool2d +func MkldnnAdaptiveAvgPool2d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnConvolution py.mkldnn_convolution +func MkldnnConvolution(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnLinearBackwardWeights py.mkldnn_linear_backward_weights +func MkldnnLinearBackwardWeights(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnMaxPool2d py.mkldnn_max_pool2d +func MkldnnMaxPool2d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnMaxPool3d py.mkldnn_max_pool3d +func MkldnnMaxPool3d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname MkldnnRnnLayer py.mkldnn_rnn_layer +func MkldnnRnnLayer(__llgo_va_list ...interface{}) *py.Object +// +// mm(input, mat2, *, out=None) -> Tensor +// +// Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. +// +// If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +// :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. +// +// .. note:: This function does not :ref:`broadcast `. +// For broadcasting matrix products, see :func:`torch.matmul`. +// +// Supports strided and sparse 2-D tensors as inputs, autograd with +// respect to strided inputs. +// +// This operation has support for arguments with :ref:`sparse layouts`. +// If :attr:`out` is provided it's layout will be used. Otherwise, the result +// layout will be deduced from that of :attr:`input`. +// +// +// .. warning:: +// Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, +// or may not have autograd support. If you notice missing functionality please +// open a feature request. +// +// This operator supports :ref:`TensorFloat32`. +// +// On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. +// +// Args: +// input (Tensor): the first matrix to be matrix multiplied +// mat2 (Tensor): the second matrix to be matrix multiplied +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> mat1 = torch.randn(2, 3) +// >>> mat2 = torch.randn(3, 3) +// >>> torch.mm(mat1, mat2) +// tensor([[ 0.4851, 0.5037, -0.3633], +// [-0.0760, -3.6705, 2.4784]]) +// +// +//go:linkname Mm py.mm +func Mm(input *py.Object, mat2 *py.Object) *py.Object +// +// mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// +// Returns a namedtuple ``(values, indices)`` where ``values`` is the mode +// value of each row of the :attr:`input` tensor in the given dimension +// :attr:`dim`, i.e. a value which appears most often +// in that row, and ``indices`` is the index location of each mode value found. +// +// By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. +// +// If :attr:`keepdim` is ``True``, the output tensors are of the same size as +// :attr:`input` except in the dimension :attr:`dim` where they are of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting +// in the output tensors having 1 fewer dimension than :attr:`input`. +// +// .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out (tuple, optional): the result tuple of two output tensors (values, indices) +// +// Example:: +// +// >>> a = torch.randint(10, (5,)) +// >>> a +// tensor([6, 5, 1, 0, 2]) +// >>> b = a + (torch.randn(50, 1) * 5).long() +// >>> torch.mode(b, 0) +// torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2])) +// +// +//go:linkname Mode py.mode +func Mode(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// moveaxis(input, source, destination) -> Tensor +// +// Alias for :func:`torch.movedim`. +// +// This function is equivalent to NumPy's moveaxis function. +// +// Examples:: +// +// >>> t = torch.randn(3,2,1) +// >>> t +// tensor([[[-0.3362], +// [-0.8437]], +// +// [[-0.9627], +// [ 0.1727]], +// +// [[ 0.5173], +// [-0.1398]]]) +// >>> torch.moveaxis(t, 1, 0).shape +// torch.Size([2, 3, 1]) +// >>> torch.moveaxis(t, 1, 0) +// tensor([[[-0.3362], +// [-0.9627], +// [ 0.5173]], +// +// [[-0.8437], +// [ 0.1727], +// [-0.1398]]]) +// >>> torch.moveaxis(t, (1, 2), (0, 1)).shape +// torch.Size([2, 1, 3]) +// >>> torch.moveaxis(t, (1, 2), (0, 1)) +// tensor([[[-0.3362, -0.9627, 0.5173]], +// +// [[-0.8437, 0.1727, -0.1398]]]) +// +// +//go:linkname Moveaxis py.moveaxis +func Moveaxis(input *py.Object, source *py.Object, destination *py.Object) *py.Object +// +// movedim(input, source, destination) -> Tensor +// +// Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` +// to the position(s) in :attr:`destination`. +// +// Other dimensions of :attr:`input` that are not explicitly moved remain in +// their original order and appear at the positions not specified in :attr:`destination`. +// +// Args: +// input (Tensor): the input tensor. +// source (int or tuple of ints): Original positions of the dims to move. These must be unique. +// destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. +// +// Examples:: +// +// >>> t = torch.randn(3,2,1) +// >>> t +// tensor([[[-0.3362], +// [-0.8437]], +// +// [[-0.9627], +// [ 0.1727]], +// +// [[ 0.5173], +// [-0.1398]]]) +// >>> torch.movedim(t, 1, 0).shape +// torch.Size([2, 3, 1]) +// >>> torch.movedim(t, 1, 0) +// tensor([[[-0.3362], +// [-0.9627], +// [ 0.5173]], +// +// [[-0.8437], +// [ 0.1727], +// [-0.1398]]]) +// >>> torch.movedim(t, (1, 2), (0, 1)).shape +// torch.Size([2, 1, 3]) +// >>> torch.movedim(t, (1, 2), (0, 1)) +// tensor([[[-0.3362, -0.9627, 0.5173]], +// +// [[-0.8437, 0.1727, -0.1398]]]) +// +// +//go:linkname Movedim py.movedim +func Movedim(input *py.Object, source *py.Object, destination *py.Object) *py.Object +// +// msort(input, *, out=None) -> Tensor +// +// Sorts the elements of the :attr:`input` tensor along its first dimension +// in ascending order by value. +// +// .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`. +// See also :func:`torch.sort`. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.randn(3, 4) +// >>> t +// tensor([[-0.1321, 0.4370, -1.2631, -1.1289], +// [-2.0527, -1.1250, 0.2275, 0.3077], +// [-0.0881, -0.1259, -0.5495, 1.0284]]) +// >>> torch.msort(t) +// tensor([[-2.0527, -1.1250, -1.2631, -1.1289], +// [-0.1321, -0.1259, -0.5495, 0.3077], +// [-0.0881, 0.4370, 0.2275, 1.0284]]) +// +// +//go:linkname Msort py.msort +func Msort(input *py.Object) *py.Object +// +// mul(input, other, *, out=None) -> Tensor +// +// Multiplies :attr:`input` by :attr:`other`. +// +// +// .. math:: +// \text{out}_i = \text{input}_i \times \text{other}_i +// +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer, float, and complex inputs. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor or Number) - the tensor or number to multiply input by. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Examples:: +// +// >>> a = torch.randn(3) +// >>> a +// tensor([ 0.2015, -0.4255, 2.6087]) +// >>> torch.mul(a, 100) +// tensor([ 20.1494, -42.5491, 260.8663]) +// +// >>> b = torch.randn(4, 1) +// >>> b +// tensor([[ 1.1207], +// [-0.3137], +// [ 0.0700], +// [ 0.8378]]) +// >>> c = torch.randn(1, 4) +// >>> c +// tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) +// >>> torch.mul(b, c) +// tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], +// [-0.1614, -0.0382, 0.1645, -0.7021], +// [ 0.0360, 0.0085, -0.0367, 0.1567], +// [ 0.4312, 0.1019, -0.4394, 1.8753]]) +// +// +//go:linkname Mul py.mul +func Mul(input *py.Object, other *py.Object) *py.Object +// +// multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor +// +// Returns a tensor where each row contains :attr:`num_samples` indices sampled +// from the multinomial (a stricter definition would be multivariate, +// refer to torch.distributions.multinomial.Multinomial for more details) +// probability distribution located in the corresponding row +// of tensor :attr:`input`. +// +// .. note:: +// The rows of :attr:`input` do not need to sum to one (in which case we use +// the values as weights), but must be non-negative, finite and have +// a non-zero sum. +// +// Indices are ordered from left to right according to when each was sampled +// (first samples are placed in first column). +// +// If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`. +// +// If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape +// :math:`(m \times \text{num\_samples})`. +// +// If replacement is ``True``, samples are drawn with replacement. +// +// If not, they are drawn without replacement, which means that when a +// sample index is drawn for a row, it cannot be drawn again for that row. +// +// .. note:: +// When drawn without replacement, :attr:`num_samples` must be lower than +// number of non-zero elements in :attr:`input` (or the min number of non-zero +// elements in each row of :attr:`input` if it is a matrix). +// +// Args: +// input (Tensor): the input tensor containing probabilities +// num_samples (int): number of samples to draw +// replacement (bool, optional): whether to draw with replacement or not +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights +// >>> torch.multinomial(weights, 2) +// tensor([1, 2]) +// >>> torch.multinomial(weights, 4) # ERROR! +// RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, +// not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 +// >>> torch.multinomial(weights, 4, replacement=True) +// tensor([ 2, 1, 1, 1]) +// +// +//go:linkname Multinomial py.multinomial +func Multinomial(input *py.Object, numSamples *py.Object, replacement *py.Object) *py.Object +// +// multiply(input, other, *, out=None) +// +// Alias for :func:`torch.mul`. +// +// +//go:linkname Multiply py.multiply +func Multiply(input *py.Object, other *py.Object) *py.Object +// +// mv(input, vec, *, out=None) -> Tensor +// +// Performs a matrix-vector product of the matrix :attr:`input` and the vector +// :attr:`vec`. +// +// If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +// size :math:`m`, :attr:`out` will be 1-D of size :math:`n`. +// +// .. note:: This function does not :ref:`broadcast `. +// +// Args: +// input (Tensor): matrix to be multiplied +// vec (Tensor): vector to be multiplied +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> mat = torch.randn(2, 3) +// >>> vec = torch.randn(3) +// >>> torch.mv(mat, vec) +// tensor([ 1.0404, -0.6361]) +// +// +//go:linkname Mv py.mv +func Mv(input *py.Object, vec *py.Object) *py.Object +// +// mvlgamma(input, p, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.multigammaln`. +// +// +//go:linkname Mvlgamma py.mvlgamma +func Mvlgamma(input *py.Object, p *py.Object) *py.Object +// +// nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor +// +// Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input` +// with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively. +// By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the +// greatest finite value representable by :attr:`input`'s dtype, and negative infinity +// is replaced with the least finite value representable by :attr:`input`'s dtype. +// +// Args: +// input (Tensor): the input tensor. +// nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero. +// posinf (Number, optional): if a Number, the value to replace positive infinity values with. +// If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype. +// Default is None. +// neginf (Number, optional): if a Number, the value to replace negative infinity values with. +// If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype. +// Default is None. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) +// >>> torch.nan_to_num(x) +// tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) +// >>> torch.nan_to_num(x, nan=2.0) +// tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) +// >>> torch.nan_to_num(x, nan=2.0, posinf=1.0) +// tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00]) +// +// +// +//go:linkname NanToNum py.nan_to_num +func NanToNum(input *py.Object, nan *py.Object, posinf *py.Object, neginf *py.Object) *py.Object +// None +// +//go:linkname NanToNum_ py.nan_to_num_ +func NanToNum_(__llgo_va_list ...interface{}) *py.Object +// +// nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor +// +// Computes the mean of all `non-NaN` elements along the specified dimensions. +// +// This function is identical to :func:`torch.mean` when there are no `NaN` values +// in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will +// propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the +// `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`). +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// out (Tensor, optional): the output tensor. +// +// .. seealso:: +// +// :func:`torch.mean` computes the mean value, propagating `NaN`. +// +// Example:: +// +// >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]]) +// >>> x.mean() +// tensor(nan) +// >>> x.nanmean() +// tensor(1.8000) +// >>> x.mean(dim=0) +// tensor([ nan, 1.5000, 2.5000]) +// >>> x.nanmean(dim=0) +// tensor([1.0000, 1.5000, 2.5000]) +// +// # If all elements in the reduced dimensions are NaN then the result is NaN +// >>> torch.tensor([torch.nan]).nanmean() +// tensor(nan) +// +// +//go:linkname Nanmean py.nanmean +func Nanmean(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// nanmedian(input) -> Tensor +// +// Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. +// +// This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. +// When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, +// while this function will return the median of the non-``NaN`` elements in :attr:`input`. +// If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, float('nan'), 3, 2]) +// >>> a.median() +// tensor(nan) +// >>> a.nanmedian() +// tensor(2.) +// +// .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) +// :noindex: +// +// Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +// in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values +// found in the dimension :attr:`dim`. +// +// This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has +// one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the +// median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second +// tensor, which must have dtype long, with their indices in the dimension +// :attr:`dim` of :attr:`input`. +// +// Example:: +// +// >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) +// >>> a +// tensor([[2., 3., 1.], +// [nan, 1., nan]]) +// >>> a.median(0) +// torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) +// >>> a.nanmedian(0) +// torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) +// +// +//go:linkname Nanmedian py.nanmedian +func Nanmedian(input *py.Object) *py.Object +// +// nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor +// +// This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, +// computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did +// not exist. If all values in a reduced row are ``NaN`` then the quantiles for +// that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. +// +// Args: +// input (Tensor): the input tensor. +// q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword arguments: +// interpolation (str): interpolation method to use when the desired quantile lies between two data points. +// Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. +// Default is ``linear``. +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.tensor([float('nan'), 1, 2]) +// >>> t.quantile(0.5) +// tensor(nan) +// >>> t.nanquantile(0.5) +// tensor(1.5000) +// >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) +// >>> t +// tensor([[nan, nan], +// [1., 2.]]) +// >>> t.nanquantile(0.5, dim=0) +// tensor([1., 2.]) +// >>> t.nanquantile(0.5, dim=1) +// tensor([ nan, 1.5000]) +// +// +//go:linkname Nanquantile py.nanquantile +func Nanquantile(input *py.Object, q *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// nansum(input, *, dtype=None) -> Tensor +// +// Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.tensor([1., 2., float('nan'), 4.]) +// >>> torch.nansum(a) +// tensor(7.) +// +// .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor +// :noindex: +// +// Returns the sum of each row of the :attr:`input` tensor in the given +// dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero. +// If :attr:`dim` is a list of dimensions, reduce over all of them. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> torch.nansum(torch.tensor([1., float("nan")])) +// 1.0 +// >>> a = torch.tensor([[1, 2], [3., float("nan")]]) +// >>> torch.nansum(a) +// tensor(6.) +// >>> torch.nansum(a, dim=0) +// tensor([4., 2.]) +// >>> torch.nansum(a, dim=1) +// tensor([3., 3.]) +// +// +//go:linkname Nansum py.nansum +func Nansum(input *py.Object) *py.Object +// +// narrow(input, dim, start, length) -> Tensor +// +// Returns a new tensor that is a narrowed version of :attr:`input` tensor. The +// dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The +// returned tensor and :attr:`input` tensor share the same underlying storage. +// +// Args: +// input (Tensor): the tensor to narrow +// dim (int): the dimension along which to narrow +// start (int or Tensor): index of the element to start the narrowed dimension +// from. Can be negative, which means indexing from the end of `dim`. If +// `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) +// length (int): length of the narrowed dimension, must be weakly positive +// +// Example:: +// +// >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) +// >>> torch.narrow(x, 0, 0, 2) +// tensor([[ 1, 2, 3], +// [ 4, 5, 6]]) +// >>> torch.narrow(x, 1, 1, 2) +// tensor([[ 2, 3], +// [ 5, 6], +// [ 8, 9]]) +// >>> torch.narrow(x, -1, torch.tensor(-1), 1) +// tensor([[3], +// [6], +// [9]]) +// +// +//go:linkname Narrow py.narrow +func Narrow(input *py.Object, dim *py.Object, start *py.Object, length *py.Object) *py.Object +// +// narrow_copy(input, dim, start, length, *, out=None) -> Tensor +// +// Same as :meth:`Tensor.narrow` except this returns a copy rather +// than shared storage. This is primarily for sparse tensors, which +// do not have a shared-storage narrow method. +// +// Args: +// input (Tensor): the tensor to narrow +// dim (int): the dimension along which to narrow +// start (int): index of the element to start the narrowed dimension from. Can +// be negative, which means indexing from the end of `dim` +// length (int): length of the narrowed dimension, must be weakly positive +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) +// >>> torch.narrow_copy(x, 0, 0, 2) +// tensor([[ 1, 2, 3], +// [ 4, 5, 6]]) +// >>> torch.narrow_copy(x, 1, 1, 2) +// tensor([[ 2, 3], +// [ 5, 6], +// [ 8, 9]]) +// >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2) +// >>> torch.narrow_copy(s, 0, 0, 1) +// tensor(indices=tensor([[0, 0], +// [0, 1]]), +// values=tensor([[[0, 1], +// [2, 3]], +// +// [[4, 5], +// [6, 7]]]), +// size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo) +// +// .. seealso:: +// +// :func:`torch.narrow` for a non copy variant +// +// +// +//go:linkname NarrowCopy py.narrow_copy +func NarrowCopy(input *py.Object, dim *py.Object, start *py.Object, length *py.Object) *py.Object +// None +// +//go:linkname NativeBatchNorm py.native_batch_norm +func NativeBatchNorm(__llgo_va_list ...interface{}) *py.Object +// +// native_channel_shuffle(input, groups) -> Tensor +// +// Native kernel level implementation of the `channel_shuffle`. +// This function might become private in future releases, use with caution. +// +// Divide the channels in a tensor of shape :math:`(*, C , H, W)` +// into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`, +// while keeping the original tensor shape. +// +// See :class:`~torch.nn.ChannelShuffle` for details. +// +// Args: +// input (Tensor): the input tensor +// groups (int): number of groups to divide channels in and rearrange. +// +// Examples:: +// +// >>> input = torch.randn(1, 4, 2, 2) +// >>> print(input) +// [[[[1, 2], +// [3, 4]], +// [[5, 6], +// [7, 8]], +// [[9, 10], +// [11, 12]], +// [[13, 14], +// [15, 16]], +// ]] +// >>> output = torch.nn.functional.native_channel_shuffle(input, 2) +// >>> print(output) +// [[[[1, 2], +// [3, 4]], +// [[9, 10], +// [11, 12]], +// [[5, 6], +// [7, 8]], +// [[13, 14], +// [15, 16]], +// ]] +// +// +//go:linkname NativeChannelShuffle py.native_channel_shuffle +func NativeChannelShuffle(input *py.Object, groups *py.Object) *py.Object +// None +// +//go:linkname NativeDropout py.native_dropout +func NativeDropout(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname NativeGroupNorm py.native_group_norm +func NativeGroupNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname NativeLayerNorm py.native_layer_norm +func NativeLayerNorm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname NativeNorm py.native_norm +func NativeNorm(__llgo_va_list ...interface{}) *py.Object +// +// ne(input, other, *, out=None) -> Tensor +// +// Computes :math:`\text{input} \neq \text{other}` element-wise. +// +// +// The second argument can be a number or a tensor whose shape is +// :ref:`broadcastable ` with the first argument. +// +// Args: +// input (Tensor): the tensor to compare +// other (Tensor or float): the tensor or value to compare +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere +// +// Example:: +// +// >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) +// tensor([[False, True], [True, False]]) +// +// +//go:linkname Ne py.ne +func Ne(input *py.Object, other *py.Object) *py.Object +// +// neg(input, *, out=None) -> Tensor +// +// Returns a new tensor with the negative of the elements of :attr:`input`. +// +// .. math:: +// \text{out} = -1 \times \text{input} +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(5) +// >>> a +// tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) +// >>> torch.neg(a) +// tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) +// +// +//go:linkname Neg py.neg +func Neg(input *py.Object) *py.Object +// None +// +//go:linkname Neg_ py.neg_ +func Neg_(__llgo_va_list ...interface{}) *py.Object +// +// negative(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.neg` +// +// +//go:linkname Negative py.negative +func Negative(input *py.Object) *py.Object +// None +// +//go:linkname Negative_ py.negative_ +func Negative_(__llgo_va_list ...interface{}) *py.Object +// +// nextafter(input, other, *, out=None) -> Tensor +// +// Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise. +// +// The shapes of ``input`` and ``other`` must be +// :ref:`broadcastable `. +// +// Args: +// input (Tensor): the first input tensor +// other (Tensor): the second input tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> eps = torch.finfo(torch.float32).eps +// >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps]) +// tensor([True, True]) +// +// +// +//go:linkname Nextafter py.nextafter +func Nextafter(input *py.Object, other *py.Object) *py.Object +// +// nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors +// +// .. note:: +// :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a +// 2-D tensor where each row is the index for a nonzero value. +// +// :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D +// index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` +// gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor +// contains nonzero indices for a certain dimension. +// +// See below for more details on the two behaviors. +// +// When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes +// host-device synchronization. +// +// **When** :attr:`as_tuple` **is** ``False`` **(default)**: +// +// Returns a tensor containing the indices of all non-zero elements of +// :attr:`input`. Each row in the result contains the indices of a non-zero +// element in :attr:`input`. The result is sorted lexicographically, with +// the last index changing the fastest (C-style). +// +// If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +// :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +// non-zero elements in the :attr:`input` tensor. +// +// **When** :attr:`as_tuple` **is** ``True``: +// +// Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, +// each containing the indices (in that dimension) of all non-zero elements of +// :attr:`input` . +// +// If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` +// tensors of size :math:`z`, where :math:`z` is the total number of +// non-zero elements in the :attr:`input` tensor. +// +// As a special case, when :attr:`input` has zero dimensions and a nonzero scalar +// value, it is treated as a one-dimensional tensor with one element. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (LongTensor, optional): the output tensor containing indices +// +// Returns: +// LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output +// tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for +// each dimension, containing the indices of each nonzero element along that +// dimension. +// +// Example:: +// +// >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) +// tensor([[ 0], +// [ 1], +// [ 2], +// [ 4]]) +// >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], +// ... [0.0, 0.4, 0.0, 0.0], +// ... [0.0, 0.0, 1.2, 0.0], +// ... [0.0, 0.0, 0.0,-0.4]])) +// tensor([[ 0, 0], +// [ 1, 1], +// [ 2, 2], +// [ 3, 3]]) +// >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) +// (tensor([0, 1, 2, 4]),) +// >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], +// ... [0.0, 0.4, 0.0, 0.0], +// ... [0.0, 0.0, 1.2, 0.0], +// ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) +// (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) +// >>> torch.nonzero(torch.tensor(5), as_tuple=True) +// (tensor([0]),) +// +// +//go:linkname Nonzero py.nonzero +func Nonzero(input *py.Object) *py.Object +// None +// +//go:linkname NonzeroStatic py.nonzero_static +func NonzeroStatic(__llgo_va_list ...interface{}) *py.Object +// Returns the matrix norm or vector norm of a given tensor. +// +// .. warning:: +// +// torch.norm is deprecated and may be removed in a future PyTorch release. +// Its documentation and behavior may be incorrect, and it is no longer +// actively maintained. +// +// Use :func:`torch.linalg.vector_norm` when computing vector norms and +// :func:`torch.linalg.matrix_norm` when computing matrix norms. +// For a function with a similar behavior as this one see :func:`torch.linalg.norm`. +// Note, however, the signature for these functions is slightly different than the +// signature for ``torch.norm``. +// +// Args: +// input (Tensor): The input tensor. Its data type must be either a floating +// point or complex type. For complex inputs, the norm is calculated using the +// absolute value of each element. If the input is complex and neither +// :attr:`dtype` nor :attr:`out` is specified, the result's data type will +// be the corresponding floating point type (e.g. float if :attr:`input` is +// complexfloat). +// +// p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'`` +// The following norms can be calculated: +// +// ====== ============== ========================== +// ord matrix norm vector norm +// ====== ============== ========================== +// 'fro' Frobenius norm -- +// 'nuc' nuclear norm -- +// Number -- sum(abs(x)**ord)**(1./ord) +// ====== ============== ========================== +// +// The vector norm can be calculated across any number of dimensions. +// The corresponding dimensions of :attr:`input` are flattened into +// one dimension, and the norm is calculated on the flattened +// dimension. +// +// Frobenius norm produces the same result as ``p=2`` in all cases +// except when :attr:`dim` is a list of three or more dims, in which +// case Frobenius norm throws an error. +// +// Nuclear norm can only be calculated across exactly two dimensions. +// +// dim (int, tuple of ints, list of ints, optional): +// Specifies which dimension or dimensions of :attr:`input` to +// calculate the norm across. If :attr:`dim` is ``None``, the norm will +// be calculated across all dimensions of :attr:`input`. If the norm +// type indicated by :attr:`p` does not support the specified number of +// dimensions, an error will occur. +// keepdim (bool, optional): whether the output tensors have :attr:`dim` +// retained or not. Ignored if :attr:`dim` = ``None`` and +// :attr:`out` = ``None``. Default: ``False`` +// out (Tensor, optional): the output tensor. Ignored if +// :attr:`dim` = ``None`` and :attr:`out` = ``None``. +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. If specified, the input tensor is casted to +// :attr:`dtype` while performing the operation. Default: None. +// +// .. note:: +// Even though ``p='fro'`` supports any number of dimensions, the true +// mathematical definition of Frobenius norm only applies to tensors with +// exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'`` +// aligns with the mathematical definition, since it can only be applied across +// exactly two dimensions. +// +// Example:: +// +// >>> import torch +// >>> a = torch.arange(9, dtype= torch.float) - 4 +// >>> b = a.reshape((3, 3)) +// >>> torch.norm(a) +// tensor(7.7460) +// >>> torch.norm(b) +// tensor(7.7460) +// >>> torch.norm(a, float('inf')) +// tensor(4.) +// >>> torch.norm(b, float('inf')) +// tensor(4.) +// >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float) +// >>> torch.norm(c, dim=0) +// tensor([1.4142, 2.2361, 5.0000]) +// >>> torch.norm(c, dim=1) +// tensor([3.7417, 4.2426]) +// >>> torch.norm(c, p=1, dim=1) +// tensor([6., 6.]) +// >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2) +// >>> torch.norm(d, dim=(1, 2)) +// tensor([ 3.7417, 11.2250]) +// >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :]) +// (tensor(3.7417), tensor(11.2250)) +// +// +//go:linkname Norm py.norm +func Norm(input *py.Object, p *py.Object, dim *py.Object, keepdim *py.Object, out *py.Object, dtype *py.Object) *py.Object +// None +// +//go:linkname NormExceptDim py.norm_except_dim +func NormExceptDim(__llgo_va_list ...interface{}) *py.Object +// +// normal(mean, std, *, generator=None, out=None) -> Tensor +// +// Returns a tensor of random numbers drawn from separate normal distributions +// whose mean and standard deviation are given. +// +// The :attr:`mean` is a tensor with the mean of +// each output element's normal distribution +// +// The :attr:`std` is a tensor with the standard deviation of +// each output element's normal distribution +// +// The shapes of :attr:`mean` and :attr:`std` don't need to match, but the +// total number of elements in each tensor need to be the same. +// +// .. note:: When the shapes do not match, the shape of :attr:`mean` +// is used as the shape for the returned output tensor +// +// .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes +// its device with the CPU. +// +// Args: +// mean (Tensor): the tensor of per-element means +// std (Tensor): the tensor of per-element standard deviations +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) +// tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, +// 8.0505, 8.1408, 9.0563, 10.0566]) +// +// .. function:: normal(mean=0.0, std, *, out=None) -> Tensor +// :noindex: +// +// Similar to the function above, but the means are shared among all drawn +// elements. +// +// Args: +// mean (float, optional): the mean for all distributions +// std (Tensor): the tensor of per-element standard deviations +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) +// tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) +// +// .. function:: normal(mean, std=1.0, *, out=None) -> Tensor +// :noindex: +// +// Similar to the function above, but the standard deviations are shared among +// all drawn elements. +// +// Args: +// mean (Tensor): the tensor of per-element means +// std (float, optional): the standard deviation for all distributions +// +// Keyword args: +// out (Tensor, optional): the output tensor +// +// Example:: +// +// >>> torch.normal(mean=torch.arange(1., 6.)) +// tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) +// +// .. function:: normal(mean, std, size, *, out=None) -> Tensor +// :noindex: +// +// Similar to the function above, but the means and standard deviations are shared +// among all drawn elements. The resulting tensor has size given by :attr:`size`. +// +// Args: +// mean (float): the mean for all distributions +// std (float): the standard deviation for all distributions +// size (int...): a sequence of integers defining the shape of the output tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.normal(2, 3, size=(1, 4)) +// tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) +// +// +//go:linkname Normal py.normal +func Normal(mean *py.Object, std *py.Object) *py.Object +// +// not_equal(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.ne`. +// +// +//go:linkname NotEqual py.not_equal +func NotEqual(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname NuclearNorm py.nuclear_norm +func NuclearNorm(__llgo_va_list ...interface{}) *py.Object +// +// numel(input) -> int +// +// Returns the total number of elements in the :attr:`input` tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> a = torch.randn(1, 2, 3, 4, 5) +// >>> torch.numel(a) +// 120 +// >>> a = torch.zeros(4,4) +// >>> torch.numel(a) +// 16 +// +// +// +//go:linkname Numel py.numel +func Numel(input *py.Object) *py.Object +// +// ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a tensor filled with the scalar value `1`, with the shape defined +// by the variable argument :attr:`size`. +// +// Args: +// size (int...): a sequence of integers defining the shape of the output tensor. +// Can be a variable number of arguments or a collection like a list or tuple. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.ones(2, 3) +// tensor([[ 1., 1., 1.], +// [ 1., 1., 1.]]) +// +// >>> torch.ones(5) +// tensor([ 1., 1., 1., 1., 1.]) +// +// +// +//go:linkname Ones py.ones +func Ones(__llgo_va_list ...interface{}) *py.Object +// +// ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor filled with the scalar value `1`, with the same size as +// :attr:`input`. ``torch.ones_like(input)`` is equivalent to +// ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// .. warning:: +// As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, +// the old ``torch.ones_like(input, out=output)`` is equivalent to +// ``torch.ones(input.size(), out=output)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword arguments: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// Example:: +// +// >>> input = torch.empty(2, 3) +// >>> torch.ones_like(input) +// tensor([[ 1., 1., 1.], +// [ 1., 1., 1.]]) +// +// +//go:linkname OnesLike py.ones_like +func OnesLike(input *py.Object) *py.Object +// +// orgqr(input, tau) -> Tensor +// +// Alias for :func:`torch.linalg.householder_product`. +// +// +//go:linkname Orgqr py.orgqr +func Orgqr(input *py.Object, tau *py.Object) *py.Object +// +// ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor +// +// Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix. +// +// Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`, +// where `Q` is represented using Householder reflectors `(input, tau)`. +// See `Representation of Orthogonal or Unitary Matrices`_ for further details. +// +// If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`. +// When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`. +// It has size :math:`n \times n` otherwise. +// If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op. +// +// Supports inputs of float, double, cfloat and cdouble dtypes. +// Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions. +// +// .. seealso:: +// :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q` +// from the QR decomposition. +// +// .. note:: +// This function supports backward but it is only fast when ``(input, tau)`` do not require gradients +// and/or ``tau.size(-1)`` is very small. +// `` +// +// Args: +// input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions +// and `mn` equals to `m` or `n` depending on the :attr:`left`. +// tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions. +// other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. +// left (bool): controls the order of multiplication. +// transpose (bool): controls whether the matrix `Q` is conjugate transposed or not. +// +// Keyword args: +// out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`. +// +// .. _Representation of Orthogonal or Unitary Matrices: +// https://www.netlib.org/lapack/lug/node128.html +// +// +//go:linkname Ormqr py.ormqr +func Ormqr(input *py.Object, tau *py.Object, other *py.Object, left *py.Object, transpose *py.Object) *py.Object +// +// outer(input, vec2, *, out=None) -> Tensor +// +// Outer product of :attr:`input` and :attr:`vec2`. +// If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of +// size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. +// +// .. note:: This function does not :ref:`broadcast `. +// +// Args: +// input (Tensor): 1-D input vector +// vec2 (Tensor): 1-D input vector +// +// Keyword args: +// out (Tensor, optional): optional output matrix +// +// Example:: +// +// >>> v1 = torch.arange(1., 5.) +// >>> v2 = torch.arange(1., 4.) +// >>> torch.outer(v1, v2) +// tensor([[ 1., 2., 3.], +// [ 2., 4., 6.], +// [ 3., 6., 9.], +// [ 4., 8., 12.]]) +// +// +//go:linkname Outer py.outer +func Outer(input *py.Object, vec2 *py.Object) *py.Object +// +// pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor +// +// See :class:`torch.nn.PairwiseDistance` for details +// +// +//go:linkname PairwiseDistance py.pairwise_distance +func PairwiseDistance(x1 *py.Object, x2 *py.Object, p *py.Object, eps *py.Object, keepdim *py.Object) *py.Object +// +// pdist(input, p=2) -> Tensor +// +// Computes the p-norm distance between every pair of row vectors in the input. +// This is identical to the upper triangular portion, excluding the diagonal, of +// `torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster +// if the rows are contiguous. +// +// If input has shape :math:`N \times M` then the output will have shape +// :math:`\frac{1}{2} N (N - 1)`. +// +// This function is equivalent to ``scipy.spatial.distance.pdist(input, +// 'minkowski', p=p)`` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is +// equivalent to ``scipy.spatial.distance.pdist(input, 'hamming') * M``. +// When :math:`p = \infty`, the closest scipy function is +// ``scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())``. +// +// Args: +// input: input tensor of shape :math:`N \times M`. +// p: p value for the p-norm distance to calculate between each vector pair +// :math:`\in [0, \infty]`. +// +// +//go:linkname Pdist py.pdist +func Pdist(input *py.Object, p *py.Object) *py.Object +// +// permute(input, dims) -> Tensor +// +// Returns a view of the original tensor :attr:`input` with its dimensions permuted. +// +// Args: +// input (Tensor): the input tensor. +// dims (tuple of int): The desired ordering of dimensions +// +// Example: +// >>> x = torch.randn(2, 3, 5) +// >>> x.size() +// torch.Size([2, 3, 5]) +// >>> torch.permute(x, (2, 0, 1)).size() +// torch.Size([5, 2, 3]) +// +// +//go:linkname Permute py.permute +func Permute(input *py.Object, dims *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.permute`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname PermuteCopy py.permute_copy +func PermuteCopy(__llgo_va_list ...interface{}) *py.Object +// +// pinverse(input, rcond=1e-15) -> Tensor +// +// Alias for :func:`torch.linalg.pinv` +// +// +//go:linkname Pinverse py.pinverse +func Pinverse(input *py.Object, rcond *py.Object) *py.Object +// +// pixel_shuffle(input, upscale_factor) -> Tensor +// +// Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a +// tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is the :attr:`upscale_factor`. +// +// See :class:`~torch.nn.PixelShuffle` for details. +// +// Args: +// input (Tensor): the input tensor +// upscale_factor (int): factor to increase spatial resolution by +// +// Examples:: +// +// >>> input = torch.randn(1, 9, 4, 4) +// >>> output = torch.nn.functional.pixel_shuffle(input, 3) +// >>> print(output.size()) +// torch.Size([1, 1, 12, 12]) +// +// +//go:linkname PixelShuffle py.pixel_shuffle +func PixelShuffle(input *py.Object, upscaleFactor *py.Object) *py.Object +// +// pixel_unshuffle(input, downscale_factor) -> Tensor +// +// Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a +// tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape +// :math:`(*, C \times r^2, H, W)`, where r is the :attr:`downscale_factor`. +// +// See :class:`~torch.nn.PixelUnshuffle` for details. +// +// Args: +// input (Tensor): the input tensor +// downscale_factor (int): factor to increase spatial resolution by +// +// Examples:: +// +// >>> input = torch.randn(1, 1, 12, 12) +// >>> output = torch.nn.functional.pixel_unshuffle(input, 3) +// >>> print(output.size()) +// torch.Size([1, 9, 4, 4]) +// +// +//go:linkname PixelUnshuffle py.pixel_unshuffle +func PixelUnshuffle(input *py.Object, downscaleFactor *py.Object) *py.Object +// +// poisson(input, generator=None) -> Tensor +// +// Returns a tensor of the same size as :attr:`input` with each element +// sampled from a Poisson distribution with rate parameter given by the corresponding +// element in :attr:`input` i.e., +// +// .. math:: +// \text{out}_i \sim \text{Poisson}(\text{input}_i) +// +// :attr:`input` must be non-negative. +// +// Args: +// input (Tensor): the input tensor containing the rates of the Poisson distribution +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// +// Example:: +// +// >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5 +// >>> torch.poisson(rates) +// tensor([[9., 1., 3., 5.], +// [8., 6., 6., 0.], +// [0., 4., 5., 3.], +// [2., 1., 4., 2.]]) +// +// +//go:linkname Poisson py.poisson +func Poisson(input *py.Object, generator *py.Object) *py.Object +// None +// +//go:linkname PoissonNllLoss py.poisson_nll_loss +func PoissonNllLoss(__llgo_va_list ...interface{}) *py.Object +// +// polar(abs, angle, *, out=None) -> Tensor +// +// Constructs a complex tensor whose elements are Cartesian coordinates +// corresponding to the polar coordinates with absolute value :attr:`abs` and angle +// :attr:`angle`. +// +// .. math:: +// \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j +// +// .. note:: +// `torch.polar` is similar to +// `std::polar `_ +// and does not compute the polar decomposition +// of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do. +// The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is +// infinite. +// +// +// Args: +// abs (Tensor): The absolute value the complex tensor. Must be float or double. +// angle (Tensor): The angle of the complex tensor. Must be same dtype as +// :attr:`abs`. +// +// Keyword args: +// out (Tensor): If the inputs are ``torch.float32``, must be +// ``torch.complex64``. If the inputs are ``torch.float64``, must be +// ``torch.complex128``. +// +// Example:: +// +// >>> import numpy as np +// >>> abs = torch.tensor([1, 2], dtype=torch.float64) +// >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64) +// >>> z = torch.polar(abs, angle) +// >>> z +// tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128) +// +// +//go:linkname Polar py.polar +func Polar(abs *py.Object, angle *py.Object) *py.Object +// +// polygamma(n, input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.polygamma`. +// +// +//go:linkname Polygamma py.polygamma +func Polygamma(n *py.Object, input *py.Object) *py.Object +// +// positive(input) -> Tensor +// +// Returns :attr:`input`. +// Throws a runtime error if :attr:`input` is a bool tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> t = torch.randn(5) +// >>> t +// tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) +// >>> torch.positive(t) +// tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) +// +// +//go:linkname Positive py.positive +func Positive(input *py.Object) *py.Object +// +// pow(input, exponent, *, out=None) -> Tensor +// +// Takes the power of each element in :attr:`input` with :attr:`exponent` and +// returns a tensor with the result. +// +// :attr:`exponent` can be either a single ``float`` number or a `Tensor` +// with the same number of elements as :attr:`input`. +// +// When :attr:`exponent` is a scalar value, the operation applied is: +// +// .. math:: +// \text{out}_i = x_i ^ \text{exponent} +// +// When :attr:`exponent` is a tensor, the operation applied is: +// +// .. math:: +// \text{out}_i = x_i ^ {\text{exponent}_i} +// +// When :attr:`exponent` is a tensor, the shapes of :attr:`input` +// and :attr:`exponent` must be :ref:`broadcastable `. +// +// Args: +// input (Tensor): the input tensor. +// exponent (float or tensor): the exponent value +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) +// >>> torch.pow(a, 2) +// tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) +// >>> exp = torch.arange(1., 5.) +// +// >>> a = torch.arange(1., 5.) +// >>> a +// tensor([ 1., 2., 3., 4.]) +// >>> exp +// tensor([ 1., 2., 3., 4.]) +// >>> torch.pow(a, exp) +// tensor([ 1., 4., 27., 256.]) +// +// .. function:: pow(self, exponent, *, out=None) -> Tensor +// :noindex: +// +// :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. +// The returned tensor :attr:`out` is of the same shape as :attr:`exponent` +// +// The operation applied is: +// +// .. math:: +// \text{out}_i = \text{self} ^ {\text{exponent}_i} +// +// Args: +// self (float): the scalar base value for the power operation +// exponent (Tensor): the exponent tensor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> exp = torch.arange(1., 5.) +// >>> base = 2 +// >>> torch.pow(base, exp) +// tensor([ 2., 4., 8., 16.]) +// +// +//go:linkname Pow py.pow +func Pow(input *py.Object, exponent *py.Object) *py.Object +// prelu(input, weight) -> Tensor +// +// Applies element-wise the function +// :math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a +// learnable parameter. +// +// .. note:: +// `weight` is expected to be a scalar or 1-D tensor. If `weight` is 1-D, +// its size must match the number of input channels, determined by +// `input.size(1)` when `input.dim() >= 2`, otherwise 1. +// In the 1-D case, note that when `input` has dim > 2, `weight` can be expanded +// to the shape of `input` in a way that is not possible using normal +// :ref:`broadcasting semantics`. +// +// See :class:`~torch.nn.PReLU` for more details. +// +// +//go:linkname Prelu py.prelu +func Prelu(input *py.Object, weight *py.Object) *py.Object +// +// prod(input, *, dtype=None) -> Tensor +// +// Returns the product of all elements in the :attr:`input` tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[-0.8020, 0.5428, -1.5854]]) +// >>> torch.prod(a) +// tensor(0.6902) +// +// .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor +// :noindex: +// +// Returns the product of each row of the :attr:`input` tensor in the given +// dimension :attr:`dim`. +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +// the output tensor having 1 fewer dimension than :attr:`input`. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.randn(4, 2) +// >>> a +// tensor([[ 0.5261, -0.3837], +// [ 1.1857, -0.2498], +// [-1.1646, 0.0705], +// [ 1.1131, -1.0629]]) +// >>> torch.prod(a, 1) +// tensor([-0.2018, -0.2962, -0.0821, -1.1831]) +// +// +//go:linkname Prod py.prod +func Prod(input *py.Object) *py.Object +// +// promote_types(type1, type2) -> dtype +// +// Returns the :class:`torch.dtype` with the smallest size and scalar kind that is +// not smaller nor of lower kind than either `type1` or `type2`. See type promotion +// :ref:`documentation ` for more information on the type +// promotion logic. +// +// Args: +// type1 (:class:`torch.dtype`) +// type2 (:class:`torch.dtype`) +// +// Example:: +// +// >>> torch.promote_types(torch.int32, torch.float32) +// torch.float32 +// >>> torch.promote_types(torch.uint8, torch.long) +// torch.long +// +// +//go:linkname PromoteTypes py.promote_types +func PromoteTypes(type1 *py.Object, type2 *py.Object) *py.Object +// None +// +//go:linkname Put py.put +func Put(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QPerChannelAxis py.q_per_channel_axis +func QPerChannelAxis(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QPerChannelScales py.q_per_channel_scales +func QPerChannelScales(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QPerChannelZeroPoints py.q_per_channel_zero_points +func QPerChannelZeroPoints(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QScale py.q_scale +func QScale(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QZeroPoint py.q_zero_point +func QZeroPoint(__llgo_va_list ...interface{}) *py.Object +// +// qr(input, some=True, *, out=None) -> (Tensor, Tensor) +// +// Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`, +// and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R` +// with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and +// :math:`R` being an upper triangular matrix or batch of upper triangular matrices. +// +// If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization. +// Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization. +// +// .. warning:: +// +// :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr` +// and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been +// replaced with a string parameter :attr:`mode`. +// +// ``Q, R = torch.qr(A)`` should be replaced with +// +// .. code:: python +// +// Q, R = torch.linalg.qr(A) +// +// ``Q, R = torch.qr(A, some=False)`` should be replaced with +// +// .. code:: python +// +// Q, R = torch.linalg.qr(A, mode="complete") +// +// .. warning:: +// If you plan to backpropagate through QR, note that the current backward implementation +// is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))` +// columns of :attr:`input` are linearly independent. +// This behavior will probably change once QR supports pivoting. +// +// .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs, +// and may produce different (valid) decompositions on different device types +// or different platforms. +// +// Args: +// input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more +// batch dimensions consisting of matrices of dimension :math:`m \times n`. +// some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for +// complete QR decomposition. If `k = min(m, n)` then: +// +// * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default) +// +// * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n) +// +// Keyword args: +// out (tuple, optional): tuple of `Q` and `R` tensors. +// The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above. +// +// Example:: +// +// >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) +// >>> q, r = torch.qr(a) +// >>> q +// tensor([[-0.8571, 0.3943, 0.3314], +// [-0.4286, -0.9029, -0.0343], +// [ 0.2857, -0.1714, 0.9429]]) +// >>> r +// tensor([[ -14.0000, -21.0000, 14.0000], +// [ 0.0000, -175.0000, 70.0000], +// [ 0.0000, 0.0000, -35.0000]]) +// >>> torch.mm(q, r).round() +// tensor([[ 12., -51., 4.], +// [ 6., 167., -68.], +// [ -4., 24., -41.]]) +// >>> torch.mm(q.t(), q).round() +// tensor([[ 1., 0., 0.], +// [ 0., 1., -0.], +// [ 0., -0., 1.]]) +// >>> a = torch.randn(3, 4, 5) +// >>> q, r = torch.qr(a, some=False) +// >>> torch.allclose(torch.matmul(q, r), a) +// True +// >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5)) +// True +// +// +//go:linkname Qr py.qr +func Qr(input *py.Object, some *py.Object) *py.Object +// +// quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor +// +// Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. +// +// To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location +// of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with +// indices ``i`` and ``j`` in the sorted order, result is computed according to the given +// :attr:`interpolation` method as follows: +// +// - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. +// - ``lower``: ``a``. +// - ``higher``: ``b``. +// - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). +// - ``midpoint``: ``(a + b) / 2``. +// +// If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size +// equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. +// +// .. note:: +// By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. +// +// Args: +// input (Tensor): the input tensor. +// q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. +// dim (int): the dimension to reduce. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword arguments: +// interpolation (str): interpolation method to use when the desired quantile lies between two data points. +// Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. +// Default is ``linear``. +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(2, 3) +// >>> a +// tensor([[ 0.0795, -1.2117, 0.9765], +// [ 1.1707, 0.6706, 0.4884]]) +// >>> q = torch.tensor([0.25, 0.5, 0.75]) +// >>> torch.quantile(a, q, dim=1, keepdim=True) +// tensor([[[-0.5661], +// [ 0.5795]], +// +// [[ 0.0795], +// [ 0.6706]], +// +// [[ 0.5280], +// [ 0.9206]]]) +// >>> torch.quantile(a, q, dim=1, keepdim=True).shape +// torch.Size([3, 2, 1]) +// >>> a = torch.arange(4.) +// >>> a +// tensor([0., 1., 2., 3.]) +// >>> torch.quantile(a, 0.6, interpolation='linear') +// tensor(1.8000) +// >>> torch.quantile(a, 0.6, interpolation='lower') +// tensor(1.) +// >>> torch.quantile(a, 0.6, interpolation='higher') +// tensor(2.) +// >>> torch.quantile(a, 0.6, interpolation='midpoint') +// tensor(1.5000) +// >>> torch.quantile(a, 0.6, interpolation='nearest') +// tensor(2.) +// >>> torch.quantile(a, 0.4, interpolation='nearest') +// tensor(1.) +// +// +//go:linkname Quantile py.quantile +func Quantile(input *py.Object, q *py.Object, dim *py.Object, keepdim *py.Object) *py.Object +// +// quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor +// +// Converts a float tensor to a per-channel quantized tensor with given scales and zero points. +// +// Arguments: +// input (Tensor): float tensor to quantize +// scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)`` +// zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)`` +// axis (int): dimension on which apply per-channel quantization +// dtype (:class:`torch.dtype`): the desired data type of returned tensor. +// Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` +// +// Returns: +// Tensor: A newly quantized tensor +// +// Example:: +// +// >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) +// >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) +// tensor([[-1., 0.], +// [ 1., 2.]], size=(2, 2), dtype=torch.quint8, +// quantization_scheme=torch.per_channel_affine, +// scale=tensor([0.1000, 0.0100], dtype=torch.float64), +// zero_point=tensor([10, 0]), axis=0) +// >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr() +// tensor([[ 0, 10], +// [100, 200]], dtype=torch.uint8) +// +// +//go:linkname QuantizePerChannel py.quantize_per_channel +func QuantizePerChannel(input *py.Object, scales *py.Object, zeroPoints *py.Object, axis *py.Object, dtype *py.Object) *py.Object +// +// quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor +// +// Converts a float tensor to a quantized tensor with given scale and zero point. +// +// Arguments: +// input (Tensor): float tensor or list of tensors to quantize +// scale (float or Tensor): scale to apply in quantization formula +// zero_point (int or Tensor): offset in integer value that maps to float zero +// dtype (:class:`torch.dtype`): the desired data type of returned tensor. +// Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` +// +// Returns: +// Tensor: A newly quantized tensor or list of quantized tensors. +// +// Example:: +// +// >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) +// tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) +// >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() +// tensor([ 0, 10, 20, 30], dtype=torch.uint8) +// >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], +// >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) +// (tensor([-1., 0.], size=(2,), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), +// tensor([-2., 2.], size=(2,), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) +// >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) +// tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) +// +// +//go:linkname QuantizePerTensor py.quantize_per_tensor +func QuantizePerTensor(input *py.Object, scale *py.Object, zeroPoint *py.Object, dtype *py.Object) *py.Object +// +// quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor +// +// Converts a float tensor to a quantized tensor with scale and zero_point calculated +// dynamically based on the input. +// +// Arguments: +// input (Tensor): float tensor or list of tensors to quantize +// dtype (:class:`torch.dtype`): the desired data type of returned tensor. +// Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8`` +// reduce_range (bool): a flag to indicate whether to reduce the range of quantized +// data by 1 bit, it's required to avoid instruction overflow for some hardwares +// +// Returns: +// Tensor: A newly (dynamically) quantized tensor +// +// Example:: +// +// >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False) +// >>> print(t) +// tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941, +// zero_point=85) +// >>> t.int_repr() +// tensor([ 0, 85, 170, 255], dtype=torch.uint8) +// +// +//go:linkname QuantizePerTensorDynamic py.quantize_per_tensor_dynamic +func QuantizePerTensorDynamic(input *py.Object, dtype *py.Object, reduceRange *py.Object) *py.Object +// +// quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor +// +// Applies batch normalization on a 4D (NCHW) quantized tensor. +// +// .. math:: +// +// y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta +// +// Arguments: +// input (Tensor): quantized tensor +// weight (Tensor): float tensor that corresponds to the gamma, size C +// bias (Tensor): float tensor that corresponds to the beta, size C +// mean (Tensor): float mean value in batch normalization, size C +// var (Tensor): float tensor for variance, size C +// eps (float): a value added to the denominator for numerical stability. +// output_scale (float): output quantized tensor scale +// output_zero_point (int): output quantized tensor zero_point +// +// Returns: +// Tensor: A quantized tensor with batch normalization applied. +// +// Example:: +// +// >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) +// >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) +// tensor([[[[-0.2000, -0.2000], +// [ 1.6000, -0.2000]], +// +// [[-0.4000, -0.4000], +// [-0.4000, 0.6000]]], +// +// +// [[[-0.2000, -0.2000], +// [-0.2000, -0.2000]], +// +// [[ 0.6000, -0.4000], +// [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) +// +// +//go:linkname QuantizedBatchNorm py.quantized_batch_norm +func QuantizedBatchNorm(input *py.Object, weight *py.Object, bias *py.Object, mean *py.Object, var_ *py.Object, eps *py.Object, outputScale *py.Object, outputZeroPoint *py.Object) *py.Object +// None +// +//go:linkname QuantizedGruCell py.quantized_gru_cell +func QuantizedGruCell(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QuantizedLstmCell py.quantized_lstm_cell +func QuantizedLstmCell(__llgo_va_list ...interface{}) *py.Object +// +// quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor +// +// Applies a 1D max pooling over an input quantized tensor composed of several input planes. +// +// Arguments: +// input (Tensor): quantized tensor +// kernel_size (list of int): the size of the sliding window +// stride (``list of int``, optional): the stride of the sliding window +// padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 +// dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 +// ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. +// Defaults to False. +// +// +// Returns: +// Tensor: A quantized tensor with max_pool1d applied. +// +// Example:: +// +// >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) +// >>> torch.quantized_max_pool1d(qx, [2]) +// tensor([[0.0000], +// [1.5000]], size=(2, 1), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +// +// +//go:linkname QuantizedMaxPool1d py.quantized_max_pool1d +func QuantizedMaxPool1d(input *py.Object, kernelSize *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, ceilMode *py.Object) *py.Object +// +// quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor +// +// Applies a 2D max pooling over an input quantized tensor composed of several input planes. +// +// Arguments: +// input (Tensor): quantized tensor +// kernel_size (``list of int``): the size of the sliding window +// stride (``list of int``, optional): the stride of the sliding window +// padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 +// dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 +// ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. +// Defaults to False. +// +// +// Returns: +// Tensor: A quantized tensor with max_pool2d applied. +// +// Example:: +// +// >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) +// >>> torch.quantized_max_pool2d(qx, [2,2]) +// tensor([[[[1.5000]], +// +// [[1.5000]]], +// +// +// [[[0.0000]], +// +// [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, +// quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +// +// +//go:linkname QuantizedMaxPool2d py.quantized_max_pool2d +func QuantizedMaxPool2d(input *py.Object, kernelSize *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, ceilMode *py.Object) *py.Object +// None +// +//go:linkname QuantizedMaxPool3d py.quantized_max_pool3d +func QuantizedMaxPool3d(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QuantizedRnnReluCell py.quantized_rnn_relu_cell +func QuantizedRnnReluCell(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname QuantizedRnnTanhCell py.quantized_rnn_tanh_cell +func QuantizedRnnTanhCell(__llgo_va_list ...interface{}) *py.Object +// +// rad2deg(input, *, out=None) -> Tensor +// +// Returns a new tensor with each of the elements of :attr:`input` +// converted from angles in radians to degrees. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword arguments: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) +// >>> torch.rad2deg(a) +// tensor([[ 180.0233, -180.0233], +// [ 359.9894, -359.9894], +// [ 89.9544, -89.9544]]) +// +// +// +//go:linkname Rad2deg py.rad2deg +func Rad2deg(input *py.Object) *py.Object +// None +// +//go:linkname Rad2deg_ py.rad2deg_ +func Rad2deg_(__llgo_va_list ...interface{}) *py.Object +// +// rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// Returns a tensor filled with random numbers from a uniform distribution +// on the interval :math:`[0, 1)` +// +// The shape of the tensor is defined by the variable argument :attr:`size`. +// +// Args: +// size (int...): a sequence of integers defining the shape of the output tensor. +// Can be a variable number of arguments or a collection like a list or tuple. +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Example:: +// +// >>> torch.rand(4) +// tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) +// >>> torch.rand(2, 3) +// tensor([[ 0.8237, 0.5781, 0.6879], +// [ 0.3816, 0.7249, 0.0998]]) +// +// +//go:linkname Rand py.rand +func Rand(__llgo_va_list ...interface{}) *py.Object +// +// rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor with the same size as :attr:`input` that is filled with +// random numbers from a uniform distribution on the interval :math:`[0, 1)`. +// ``torch.rand_like(input)`` is equivalent to +// ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// +// +//go:linkname RandLike py.rand_like +func RandLike(input *py.Object) *py.Object +// +// randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a tensor filled with random integers generated uniformly +// between :attr:`low` (inclusive) and :attr:`high` (exclusive). +// +// The shape of the tensor is defined by the variable argument :attr:`size`. +// +// .. note:: +// With the global dtype default (``torch.float32``), this function returns +// a tensor with dtype ``torch.int64``. +// +// Args: +// low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. +// high (int): One above the highest integer to be drawn from the distribution. +// size (tuple): a tuple defining the shape of the output tensor. +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, +// this function returns a tensor with dtype ``torch.int64``. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.randint(3, 5, (3,)) +// tensor([4, 3, 4]) +// +// +// >>> torch.randint(10, (2, 2)) +// tensor([[0, 2], +// [5, 5]]) +// +// +// >>> torch.randint(3, 10, (2, 2)) +// tensor([[4, 5], +// [6, 7]]) +// +// +// +// +//go:linkname Randint py.randint +func Randint(low *py.Object, high *py.Object, size *py.Object) *py.Object +// +// randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor with the same shape as Tensor :attr:`input` filled with +// random integers generated uniformly between :attr:`low` (inclusive) and +// :attr:`high` (exclusive). +// +// .. note: +// With the global dtype default (``torch.float32``), this function returns +// a tensor with dtype ``torch.int64``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. +// high (int): One above the highest integer to be drawn from the distribution. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// +// +//go:linkname RandintLike py.randint_like +func RandintLike(input *py.Object, low *py.Object, high *py.Object) *py.Object +// +// randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// +// Returns a tensor filled with random numbers from a normal distribution +// with mean `0` and variance `1` (also called the standard normal +// distribution). +// +// .. math:: +// \text{out}_{i} \sim \mathcal{N}(0, 1) +// +// The shape of the tensor is defined by the variable argument :attr:`size`. +// +// Args: +// size (int...): a sequence of integers defining the shape of the output tensor. +// Can be a variable number of arguments or a collection like a list or tuple. +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Example:: +// +// >>> torch.randn(4) +// tensor([-2.1436, 0.9966, 2.3426, -0.6366]) +// >>> torch.randn(2, 3) +// tensor([[ 1.5954, 2.8929, -1.0923], +// [ 1.1719, -0.4709, -0.1996]]) +// +// +//go:linkname Randn py.randn +func Randn(__llgo_va_list ...interface{}) *py.Object +// +// randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor with the same size as :attr:`input` that is filled with +// random numbers from a normal distribution with mean 0 and variance 1. +// ``torch.randn_like(input)`` is equivalent to +// ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// +// +//go:linkname RandnLike py.randn_like +func RandnLike(input *py.Object) *py.Object +// +// randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// Returns a random permutation of integers from ``0`` to ``n - 1``. +// +// Args: +// n (int): the upper bound (exclusive) +// +// Keyword args: +// generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: ``torch.int64``. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// Example:: +// +// >>> torch.randperm(4) +// tensor([2, 1, 0, 3]) +// +// +//go:linkname Randperm py.randperm +func Randperm(n *py.Object) *py.Object +// +// range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1` +// with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is +// the gap between two values in the tensor. +// +// .. math:: +// \text{out}_{i+1} = \text{out}_i + \text{step}. +// +// .. warning:: +// This function is deprecated and will be removed in a future release because its behavior is inconsistent with +// Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end). +// +// Args: +// start (float): the starting value for the set of points. Default: ``0``. +// end (float): the ending value for the set of points +// step (float): the gap between each pair of adjacent points. Default: ``1``. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). If `dtype` is not given, infer the data type from the other input +// arguments. If any of `start`, `end`, or `stop` are floating-point, the +// `dtype` is inferred to be the default dtype, see +// :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to +// be `torch.int64`. +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.range(1, 4) +// tensor([ 1., 2., 3., 4.]) +// >>> torch.range(1, 4, 0.5) +// tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000]) +// +// +//go:linkname Range py.range +func Range(start *py.Object, end *py.Object, step *py.Object) *py.Object +// +// ravel(input) -> Tensor +// +// Return a contiguous flattened tensor. A copy is made only if needed. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> t = torch.tensor([[[1, 2], +// ... [3, 4]], +// ... [[5, 6], +// ... [7, 8]]]) +// >>> torch.ravel(t) +// tensor([1, 2, 3, 4, 5, 6, 7, 8]) +// +// +//go:linkname Ravel py.ravel +func Ravel(input *py.Object) *py.Object +// +// real(input) -> Tensor +// +// Returns a new tensor containing real values of the :attr:`self` tensor. +// The returned tensor and :attr:`self` share the same underlying storage. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x=torch.randn(4, dtype=torch.cfloat) +// >>> x +// tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) +// >>> x.real +// tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) +// +// +// +//go:linkname Real py.real +func Real(input *py.Object) *py.Object +// +// reciprocal(input, *, out=None) -> Tensor +// +// Returns a new tensor with the reciprocal of the elements of :attr:`input` +// +// .. math:: +// \text{out}_{i} = \frac{1}{\text{input}_{i}} +// +// .. note:: +// Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral +// inputs to reciprocal are automatically :ref:`promoted ` to +// the default scalar type. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.4595, -2.1219, -1.4314, 0.7298]) +// >>> torch.reciprocal(a) +// tensor([-2.1763, -0.4713, -0.6986, 1.3702]) +// +// +//go:linkname Reciprocal py.reciprocal +func Reciprocal(input *py.Object) *py.Object +// None +// +//go:linkname Reciprocal_ py.reciprocal_ +func Reciprocal_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Relu py.relu +func Relu(__llgo_va_list ...interface{}) *py.Object +// +// relu_(input) -> Tensor +// +// In-place version of :func:`~relu`. +// +// +//go:linkname Relu_ py.relu_ +func Relu_(input *py.Object) *py.Object +// +// remainder(input, other, *, out=None) -> Tensor +// +// Computes +// `Python's modulus operation `_ +// entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value +// is less than that of :attr:`other`. +// +// It may also be defined in terms of :func:`torch.div` as +// +// .. code:: python +// +// torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer and float inputs. +// +// .. note:: +// Complex inputs are not supported. In some cases, it is not mathematically +// possible to satisfy the definition of a modulo operation with complex numbers. +// See :func:`torch.fmod` for how division by zero is handled. +// +// .. seealso:: +// +// :func:`torch.fmod` which implements C++'s `std::fmod `_. +// This one is defined in terms of division rounding towards zero. +// +// Args: +// input (Tensor or Scalar): the dividend +// other (Tensor or Scalar): the divisor +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) +// tensor([ 1., 0., 1., 1., 0., 1.]) +// >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) +// tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) +// +// +//go:linkname Remainder py.remainder +func Remainder(input *py.Object, other *py.Object) *py.Object +// +// renorm(input, p, dim, maxnorm, *, out=None) -> Tensor +// +// Returns a tensor where each sub-tensor of :attr:`input` along dimension +// :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower +// than the value :attr:`maxnorm` +// +// .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged +// +// Args: +// input (Tensor): the input tensor. +// p (float): the power for the norm computation +// dim (int): the dimension to slice over to get the sub-tensors +// maxnorm (float): the maximum norm to keep each sub-tensor under +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> x = torch.ones(3, 3) +// >>> x[1].fill_(2) +// tensor([ 2., 2., 2.]) +// >>> x[2].fill_(3) +// tensor([ 3., 3., 3.]) +// >>> x +// tensor([[ 1., 1., 1.], +// [ 2., 2., 2.], +// [ 3., 3., 3.]]) +// >>> torch.renorm(x, 1, 0, 5) +// tensor([[ 1.0000, 1.0000, 1.0000], +// [ 1.6667, 1.6667, 1.6667], +// [ 1.6667, 1.6667, 1.6667]]) +// +// +//go:linkname Renorm py.renorm +func Renorm(input *py.Object, p *py.Object, dim *py.Object, maxnorm *py.Object) *py.Object +// +// repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor +// +// Repeat elements of a tensor. +// +// .. warning:: +// +// This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. +// +// Args: +// input (Tensor): the input tensor. +// repeats (Tensor or int): The number of repetitions for each element. +// repeats is broadcasted to fit the shape of the given axis. +// dim (int, optional): The dimension along which to repeat values. +// By default, use the flattened input array, and return a flat output +// array. +// +// Keyword args: +// output_size (int, optional): Total output size for the given axis +// ( e.g. sum of repeats). If given, it will avoid stream synchronization +// needed to calculate output shape of the tensor. +// +// Returns: +// Tensor: Repeated tensor which has the same shape as input, except along the given axis. +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3]) +// >>> x.repeat_interleave(2) +// tensor([1, 1, 2, 2, 3, 3]) +// >>> y = torch.tensor([[1, 2], [3, 4]]) +// >>> torch.repeat_interleave(y, 2) +// tensor([1, 1, 2, 2, 3, 3, 4, 4]) +// >>> torch.repeat_interleave(y, 3, dim=1) +// tensor([[1, 1, 1, 2, 2, 2], +// [3, 3, 3, 4, 4, 4]]) +// >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) +// tensor([[1, 2], +// [3, 4], +// [3, 4]]) +// >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) +// tensor([[1, 2], +// [3, 4], +// [3, 4]]) +// +// If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be +// `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, +// `1` appears `n2` times, `2` appears `n3` times, etc. +// +// .. function:: repeat_interleave(repeats, *) -> Tensor +// :noindex: +// +// Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. +// +// Args: +// repeats (Tensor): The number of repetitions for each element. +// +// Returns: +// Tensor: Repeated tensor of size `sum(repeats)`. +// +// Example:: +// +// >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) +// tensor([0, 1, 1, 2, 2, 2]) +// +// +// +//go:linkname RepeatInterleave py.repeat_interleave +func RepeatInterleave(input *py.Object, repeats *py.Object, dim *py.Object) *py.Object +// +// reshape(input, shape) -> Tensor +// +// Returns a tensor with the same data and number of elements as :attr:`input`, +// but with the specified shape. When possible, the returned tensor will be a view +// of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs +// with compatible strides can be reshaped without copying, but you should not +// depend on the copying vs. viewing behavior. +// +// See :meth:`torch.Tensor.view` on when it is possible to return a view. +// +// A single dimension may be -1, in which case it's inferred from the remaining +// dimensions and the number of elements in :attr:`input`. +// +// Args: +// input (Tensor): the tensor to be reshaped +// shape (tuple of int): the new shape +// +// Example:: +// +// >>> a = torch.arange(4.) +// >>> torch.reshape(a, (2, 2)) +// tensor([[ 0., 1.], +// [ 2., 3.]]) +// >>> b = torch.tensor([[0, 1], [2, 3]]) +// >>> torch.reshape(b, (-1,)) +// tensor([ 0, 1, 2, 3]) +// +// +//go:linkname Reshape py.reshape +func Reshape(input *py.Object, shape *py.Object) *py.Object +// None +// +//go:linkname ResizeAs_ py.resize_as_ +func ResizeAs_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ResizeAsSparse_ py.resize_as_sparse_ +func ResizeAsSparse_(__llgo_va_list ...interface{}) *py.Object +// +// resolve_conj(input) -> Tensor +// +// Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`, +// else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) +// >>> y = x.conj() +// >>> y.is_conj() +// True +// >>> z = y.resolve_conj() +// >>> z +// tensor([-1 - 1j, -2 - 2j, 3 + 3j]) +// >>> z.is_conj() +// False +// +// +//go:linkname ResolveConj py.resolve_conj +func ResolveConj(input *py.Object) *py.Object +// +// resolve_neg(input) -> Tensor +// +// Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`, +// else returns :attr:`input`. The output tensor will always have its negative bit set to `False`. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) +// >>> y = x.conj() +// >>> z = y.imag +// >>> z.is_neg() +// True +// >>> out = z.resolve_neg() +// >>> out +// tensor([-1., -2., 3.]) +// >>> out.is_neg() +// False +// +// +//go:linkname ResolveNeg py.resolve_neg +func ResolveNeg(input *py.Object) *py.Object +// +// result_type(tensor1, tensor2) -> dtype +// +// Returns the :class:`torch.dtype` that would result from performing an arithmetic +// operation on the provided input tensors. See type promotion :ref:`documentation ` +// for more information on the type promotion logic. +// +// Args: +// tensor1 (Tensor or Number): an input tensor or number +// tensor2 (Tensor or Number): an input tensor or number +// +// Example:: +// +// >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) +// torch.float32 +// >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) +// torch.uint8 +// +// +//go:linkname ResultType py.result_type +func ResultType(tensor1 *py.Object, tensor2 *py.Object) *py.Object +// None +// +//go:linkname RnnRelu py.rnn_relu +func RnnRelu(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname RnnReluCell py.rnn_relu_cell +func RnnReluCell(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname RnnTanh py.rnn_tanh +func RnnTanh(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname RnnTanhCell py.rnn_tanh_cell +func RnnTanhCell(__llgo_va_list ...interface{}) *py.Object +// +// roll(input, shifts, dims=None) -> Tensor +// +// Roll the tensor :attr:`input` along the given dimension(s). Elements that are +// shifted beyond the last position are re-introduced at the first position. If +// :attr:`dims` is `None`, the tensor will be flattened before rolling and then +// restored to the original shape. +// +// Args: +// input (Tensor): the input tensor. +// shifts (int or tuple of ints): The number of places by which the elements +// of the tensor are shifted. If shifts is a tuple, dims must be a tuple of +// the same size, and each dimension will be rolled by the corresponding +// value +// dims (int or tuple of ints): Axis along which to roll +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2) +// >>> x +// tensor([[1, 2], +// [3, 4], +// [5, 6], +// [7, 8]]) +// >>> torch.roll(x, 1) +// tensor([[8, 1], +// [2, 3], +// [4, 5], +// [6, 7]]) +// >>> torch.roll(x, 1, 0) +// tensor([[7, 8], +// [1, 2], +// [3, 4], +// [5, 6]]) +// >>> torch.roll(x, -1, 0) +// tensor([[3, 4], +// [5, 6], +// [7, 8], +// [1, 2]]) +// >>> torch.roll(x, shifts=(2, 1), dims=(0, 1)) +// tensor([[6, 5], +// [8, 7], +// [2, 1], +// [4, 3]]) +// +// +//go:linkname Roll py.roll +func Roll(input *py.Object, shifts *py.Object, dims *py.Object) *py.Object +// +// rot90(input, k=1, dims=[0,1]) -> Tensor +// +// Rotate an n-D tensor by 90 degrees in the plane specified by dims axis. +// Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0. +// +// Args: +// input (Tensor): the input tensor. +// k (int): number of times to rotate. Default value is 1 +// dims (a list or tuple): axis to rotate. Default value is [0, 1] +// +// Example:: +// +// >>> x = torch.arange(4).view(2, 2) +// >>> x +// tensor([[0, 1], +// [2, 3]]) +// >>> torch.rot90(x, 1, [0, 1]) +// tensor([[1, 3], +// [0, 2]]) +// +// >>> x = torch.arange(8).view(2, 2, 2) +// >>> x +// tensor([[[0, 1], +// [2, 3]], +// +// [[4, 5], +// [6, 7]]]) +// >>> torch.rot90(x, 1, [1, 2]) +// tensor([[[1, 3], +// [0, 2]], +// +// [[5, 7], +// [4, 6]]]) +// +// +//go:linkname Rot90 py.rot90 +func Rot90(input *py.Object, k *py.Object, dims *py.Object) *py.Object +// +// round(input, *, decimals=0, out=None) -> Tensor +// +// Rounds elements of :attr:`input` to the nearest integer. +// +// For integer inputs, follows the array-api convention of returning a +// copy of the input tensor. +// The return type of output is same as that of input's dtype. +// +// .. note:: +// This function implements the "round half to even" to +// break ties when a number is equidistant from two +// integers (e.g. `round(2.5)` is 2). +// +// When the :attr:\`decimals\` argument is specified the +// algorithm used is similar to NumPy's `around`. This +// algorithm is fast but inexact and it can easily +// overflow for low precision dtypes. +// Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. +// +// .. seealso:: +// :func:`torch.ceil`, which rounds up. +// :func:`torch.floor`, which rounds down. +// :func:`torch.trunc`, which rounds towards zero. +// +// Args: +// input (Tensor): the input tensor. +// decimals (int): Number of decimal places to round to (default: 0). +// If decimals is negative, it specifies the number of positions +// to the left of the decimal point. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) +// tensor([ 5., -2., 9., -8.]) +// +// >>> # Values equidistant from two integers are rounded towards the +// >>> # the nearest even value (zero is treated as even) +// >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) +// tensor([-0., 0., 2., 2.]) +// +// >>> # A positive decimals argument rounds to the to that decimal place +// >>> torch.round(torch.tensor([0.1234567]), decimals=3) +// tensor([0.1230]) +// +// >>> # A negative decimals argument rounds to the left of the decimal +// >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) +// tensor([1000.]) +// +// +//go:linkname Round py.round +func Round(input *py.Object) *py.Object +// None +// +//go:linkname Round_ py.round_ +func Round_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname RowIndicesCopy py.row_indices_copy +func RowIndicesCopy(__llgo_va_list ...interface{}) *py.Object +// +// row_stack(tensors, *, out=None) -> Tensor +// +// Alias of :func:`torch.vstack`. +// +// +//go:linkname RowStack py.row_stack +func RowStack(tensors *py.Object) *py.Object +// None +// +//go:linkname Rrelu py.rrelu +func Rrelu(__llgo_va_list ...interface{}) *py.Object +// +// rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor +// +// In-place version of :func:`~rrelu`. +// +// +//go:linkname Rrelu_ py.rrelu_ +func Rrelu_(input *py.Object, lower *py.Object, upper *py.Object, training *py.Object) *py.Object +// +// rsqrt(input, *, out=None) -> Tensor +// +// Returns a new tensor with the reciprocal of the square-root of each of +// the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.0370, 0.2970, 1.5420, -0.9105]) +// >>> torch.rsqrt(a) +// tensor([ nan, 1.8351, 0.8053, nan]) +// +// +//go:linkname Rsqrt py.rsqrt +func Rsqrt(input *py.Object) *py.Object +// None +// +//go:linkname Rsqrt_ py.rsqrt_ +func Rsqrt_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Rsub py.rsub +func Rsub(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Saddmm py.saddmm +func Saddmm(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname ScalarTensor py.scalar_tensor +func ScalarTensor(__llgo_va_list ...interface{}) *py.Object +// +// scatter(input, dim, index, src) -> Tensor +// +// Out-of-place version of :meth:`torch.Tensor.scatter_` +// +// +//go:linkname Scatter py.scatter +func Scatter(input *py.Object, dim *py.Object, index *py.Object, src *py.Object) *py.Object +// +// scatter_add(input, dim, index, src) -> Tensor +// +// Out-of-place version of :meth:`torch.Tensor.scatter_add_` +// +// +//go:linkname ScatterAdd py.scatter_add +func ScatterAdd(input *py.Object, dim *py.Object, index *py.Object, src *py.Object) *py.Object +// +// scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor +// +// Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` +// +// +//go:linkname ScatterReduce py.scatter_reduce +func ScatterReduce(input *py.Object, dim *py.Object, index *py.Object, src *py.Object, reduce *py.Object) *py.Object +// +// searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor +// +// Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the +// corresponding values in :attr:`values` were inserted before the indices, when sorted, the order +// of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. +// Return a new tensor with the same size as :attr:`values`. More formally, +// the returned index satisfies the following rules: +// +// .. list-table:: +// :widths: 12 10 78 +// :header-rows: 1 +// +// * - :attr:`sorted_sequence` +// - :attr:`right` +// - *returned index satisfies* +// * - 1-D +// - False +// - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` +// * - 1-D +// - True +// - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` +// * - N-D +// - False +// - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` +// * - N-D +// - True +// - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` +// +// Args: +// sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* +// dimension unless :attr:`sorter` is provided, in which case the sequence does not +// need to be sorted +// values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). +// +// Keyword args: +// out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. +// Default value is False, i.e. default output data type is torch.int64. +// right (bool, optional): if False, return the first suitable location that is found. If True, return the +// last such index. If no suitable index found, return 0 for non-numerical value +// (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` +// (one pass the last index of the *innermost* dimension). In other words, if False, +// gets the lower bound index for each value in :attr:`values` on the corresponding +// *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper +// bound index instead. Default value is False. :attr:`side` does the same and is +// preferred. It will error if :attr:`side` is set to "left" while this is True. +// side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` +// and "right" corresponds to True for :attr:`right`. It will error if this is set to +// "left" while :attr:`right` is True. +// out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. +// sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted +// :attr:`sorted_sequence` containing a sequence of indices that sort it in the +// ascending order on the innermost dimension +// +// +// Example:: +// +// >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) +// >>> sorted_sequence +// tensor([[ 1, 3, 5, 7, 9], +// [ 2, 4, 6, 8, 10]]) +// >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) +// >>> values +// tensor([[3, 6, 9], +// [3, 6, 9]]) +// >>> torch.searchsorted(sorted_sequence, values) +// tensor([[1, 3, 4], +// [1, 2, 4]]) +// >>> torch.searchsorted(sorted_sequence, values, side='right') +// tensor([[2, 3, 5], +// [1, 3, 4]]) +// +// >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) +// >>> sorted_sequence_1d +// tensor([1, 3, 5, 7, 9]) +// >>> torch.searchsorted(sorted_sequence_1d, values) +// tensor([[1, 3, 4], +// [1, 3, 4]]) +// +// +//go:linkname Searchsorted py.searchsorted +func Searchsorted(sortedSequence *py.Object, values *py.Object) *py.Object +// None +// +//go:linkname SegmentReduce py.segment_reduce +func SegmentReduce(__llgo_va_list ...interface{}) *py.Object +// +// select(input, dim, index) -> Tensor +// +// Slices the :attr:`input` tensor along the selected dimension at the given index. +// This function returns a view of the original tensor with the given dimension removed. +// +// .. note:: If :attr:`input` is a sparse tensor and returning a view of +// the tensor is not possible, a RuntimeError exception is +// raised. In this is the case, consider using +// :func:`torch.select_copy` function. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the dimension to slice +// index (int): the index to select with +// +// .. note:: +// +// :meth:`select` is equivalent to slicing. For example, +// ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and +// ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. +// +// +//go:linkname Select py.select +func Select(input *py.Object, dim *py.Object, index *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.select`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname SelectCopy py.select_copy +func SelectCopy(__llgo_va_list ...interface{}) *py.Object +// +// select_scatter(input, src, dim, index) -> Tensor +// +// Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index. +// This function returns a tensor with fresh storage; it does not create a view. +// +// +// Args: +// input (Tensor): the input tensor. +// src (Tensor): The tensor to embed into :attr:`input` +// dim (int): the dimension to insert the slice into. +// index (int): the index to select with +// +// .. note:: +// +// :attr:`src` must be of the proper size in order to be embedded +// into :attr:`input`. Specifically, it should have the same shape as +// ``torch.select(input, dim, index)`` +// +// Example:: +// +// >>> a = torch.zeros(2, 2) +// >>> b = torch.ones(2) +// >>> a.select_scatter(b, 0, 0) +// tensor([[1., 1.], +// [0., 0.]]) +// +// +//go:linkname SelectScatter py.select_scatter +func SelectScatter(input *py.Object, src *py.Object, dim *py.Object, index *py.Object) *py.Object +// None +// +//go:linkname Selu py.selu +func Selu(__llgo_va_list ...interface{}) *py.Object +// +// selu_(input) -> Tensor +// +// In-place version of :func:`~selu`. +// +// +//go:linkname Selu_ py.selu_ +func Selu_(input *py.Object) *py.Object +// +// sgn(input, *, out=None) -> Tensor +// +// This function is an extension of torch.sign() to complex tensors. +// It computes a new tensor whose elements have +// the same angles as the corresponding elements of :attr:`input` and +// absolute values (i.e. magnitudes) of one for complex tensors and +// is equivalent to torch.sign() for non-complex tensors. +// +// .. math:: +// \text{out}_{i} = \begin{cases} +// 0 & |\text{{input}}_i| == 0 \\ +// \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise} +// \end{cases} +// +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j]) +// >>> t.sgn() +// tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j]) +// +// +//go:linkname Sgn py.sgn +func Sgn(input *py.Object) *py.Object +// +// sigmoid(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.expit`. +// +// +//go:linkname Sigmoid py.sigmoid +func Sigmoid(input *py.Object) *py.Object +// None +// +//go:linkname Sigmoid_ py.sigmoid_ +func Sigmoid_(__llgo_va_list ...interface{}) *py.Object +// +// sign(input, *, out=None) -> Tensor +// +// Returns a new tensor with the signs of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) +// >>> a +// tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) +// >>> torch.sign(a) +// tensor([ 1., -1., 0., 1.]) +// +// +//go:linkname Sign py.sign +func Sign(input *py.Object) *py.Object +// +// signbit(input, *, out=None) -> Tensor +// +// Tests if each element of :attr:`input` has its sign bit set or not. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) +// >>> torch.signbit(a) +// tensor([ False, True, False, False]) +// >>> a = torch.tensor([-0.0, 0.0]) +// >>> torch.signbit(a) +// tensor([ True, False]) +// +// .. note:: +// signbit handles signed zeros, so negative zero (-0) returns True. +// +// +// +//go:linkname Signbit py.signbit +func Signbit(input *py.Object) *py.Object +// +// sin(input, *, out=None) -> Tensor +// +// Returns a new tensor with the sine of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \sin(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-0.5461, 0.1347, -2.7266, -0.2746]) +// >>> torch.sin(a) +// tensor([-0.5194, 0.1343, -0.4032, -0.2711]) +// +// +//go:linkname Sin py.sin +func Sin(input *py.Object) *py.Object +// None +// +//go:linkname Sin_ py.sin_ +func Sin_(__llgo_va_list ...interface{}) *py.Object +// +// sinc(input, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.sinc`. +// +// +//go:linkname Sinc py.sinc +func Sinc(input *py.Object) *py.Object +// None +// +//go:linkname Sinc_ py.sinc_ +func Sinc_(__llgo_va_list ...interface{}) *py.Object +// +// sinh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the hyperbolic sine of the elements of +// :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \sinh(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) +// >>> torch.sinh(a) +// tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) +// +// .. note:: +// When :attr:`input` is on the CPU, the implementation of torch.sinh may use +// the Sleef library, which rounds very large results to infinity or negative +// infinity. See `here `_ for details. +// +// +//go:linkname Sinh py.sinh +func Sinh(input *py.Object) *py.Object +// None +// +//go:linkname Sinh_ py.sinh_ +func Sinh_(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.slice`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname SliceCopy py.slice_copy +func SliceCopy(__llgo_va_list ...interface{}) *py.Object +// +// slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor +// +// Embeds the values of the :attr:`src` tensor into :attr:`input` at the given +// dimension. +// This function returns a tensor with fresh storage; it does not create a view. +// +// +// Args: +// input (Tensor): the input tensor. +// src (Tensor): The tensor to embed into :attr:`input` +// dim (int): the dimension to insert the slice into +// start (Optional[int]): the start index of where to insert the slice +// end (Optional[int]): the end index of where to insert the slice +// step (int): the how many elements to skip in +// +// Example:: +// +// >>> a = torch.zeros(8, 8) +// >>> b = torch.ones(2, 8) +// >>> a.slice_scatter(b, start=6) +// tensor([[0., 0., 0., 0., 0., 0., 0., 0.], +// [0., 0., 0., 0., 0., 0., 0., 0.], +// [0., 0., 0., 0., 0., 0., 0., 0.], +// [0., 0., 0., 0., 0., 0., 0., 0.], +// [0., 0., 0., 0., 0., 0., 0., 0.], +// [0., 0., 0., 0., 0., 0., 0., 0.], +// [1., 1., 1., 1., 1., 1., 1., 1.], +// [1., 1., 1., 1., 1., 1., 1., 1.]]) +// +// >>> b = torch.ones(8, 2) +// >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2) +// tensor([[0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.], +// [0., 0., 1., 0., 1., 0., 0., 0.]]) +// +// +//go:linkname SliceScatter py.slice_scatter +func SliceScatter(input *py.Object, src *py.Object, dim *py.Object, start *py.Object, end *py.Object, step *py.Object) *py.Object +// +// slogdet(input) -> (Tensor, Tensor) +// +// Alias for :func:`torch.linalg.slogdet` +// +// +//go:linkname Slogdet py.slogdet +func Slogdet(input *py.Object) *py.Object +// +// smm(input, mat) -> Tensor +// +// Performs a matrix multiplication of the sparse matrix :attr:`input` +// with the dense matrix :attr:`mat`. +// +// Args: +// input (Tensor): a sparse matrix to be matrix multiplied +// mat (Tensor): a dense matrix to be matrix multiplied +// +// +//go:linkname Smm py.smm +func Smm(input *py.Object, mat *py.Object) *py.Object +// +// softmax(input, dim, *, dtype=None) -> Tensor +// +// Alias for :func:`torch.nn.functional.softmax`. +// +// +//go:linkname Softmax py.softmax +func Softmax(input *py.Object, dim *py.Object) *py.Object +// +// sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) +// +// Sorts the elements of the :attr:`input` tensor along a given dimension +// in ascending order by value. +// +// If :attr:`dim` is not given, the last dimension of the `input` is chosen. +// +// If :attr:`descending` is ``True`` then the elements are sorted in descending +// order by value. +// +// If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +// the order of equivalent elements. +// +// A namedtuple of (values, indices) is returned, where the `values` are the +// sorted values and `indices` are the indices of the elements in the original +// `input` tensor. +// +// Args: +// input (Tensor): the input tensor. +// dim (int, optional): the dimension to sort along +// descending (bool, optional): controls the sorting order (ascending or descending) +// stable (bool, optional): makes the sorting routine stable, which guarantees that the order +// of equivalent elements is preserved. +// +// Keyword args: +// out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can +// be optionally given to be used as output buffers +// +// Example:: +// +// >>> x = torch.randn(3, 4) +// >>> sorted, indices = torch.sort(x) +// >>> sorted +// tensor([[-0.2162, 0.0608, 0.6719, 2.3332], +// [-0.5793, 0.0061, 0.6058, 0.9497], +// [-0.5071, 0.3343, 0.9553, 1.0960]]) +// >>> indices +// tensor([[ 1, 0, 2, 3], +// [ 3, 1, 0, 2], +// [ 0, 3, 1, 2]]) +// +// >>> sorted, indices = torch.sort(x, 0) +// >>> sorted +// tensor([[-0.5071, -0.2162, 0.6719, -0.5793], +// [ 0.0608, 0.0061, 0.9497, 0.3343], +// [ 0.6058, 0.9553, 1.0960, 2.3332]]) +// >>> indices +// tensor([[ 2, 0, 0, 1], +// [ 0, 1, 1, 2], +// [ 1, 2, 2, 0]]) +// >>> x = torch.tensor([0, 1] * 9) +// >>> x.sort() +// torch.return_types.sort( +// values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), +// indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) +// >>> x.sort(stable=True) +// torch.return_types.sort( +// values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), +// indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) +// +// +//go:linkname Sort py.sort +func Sort(input *py.Object, dim *py.Object, descending *py.Object, stable *py.Object) *py.Object +// sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse +// Column)) ` with specified 2-dimensional blocks at the +// given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +// multiplication operations in BSC format are typically faster than that +// for sparse tensors in COO format. Make you have a look at :ref:`the +// note on the data type of the indices `. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// ccol_indices (array_like): (B+1)-dimensional array of size +// ``(*batchsize, ncolblocks + 1)``. The last element of each +// batch is the number of non-zeros. This tensor encodes the +// index in values and row_indices depending on where the given +// column starts. Each successive number in the tensor subtracted +// by the number before it denotes the number of elements in a +// given column. +// row_indices (array_like): Row block co-ordinates of each block in +// values. (B+1)-dimensional tensor with the same length +// as values. +// values (array_list): Initial blocks for the tensor. Can be a list, +// tuple, NumPy ``ndarray``, and other types that +// represents a (1 + 2 + K)-dimensional tensor where ``K`` is the +// number of dense dimensions. +// size (list, tuple, :class:`torch.Size`, optional): Size of the +// sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * +// blocksize[1], *densesize)`` If not provided, the size will be +// inferred as the minimum size big enough to hold all non-zero +// blocks. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. Default: if None, infers data type from +// :attr:`values`. +// device (:class:`torch.device`, optional): the desired device of +// returned tensor. Default: if None, uses the current device +// for the default tensor type (see +// :func:`torch.set_default_tensor_type`). :attr:`device` will be +// the CPU for CPU tensor types and the current CUDA device for +// CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// +// Example:: +// >>> ccol_indices = [0, 1, 2] +// >>> row_indices = [0, 1] +// >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] +// >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), +// ... torch.tensor(row_indices, dtype=torch.int64), +// ... torch.tensor(values), dtype=torch.double) +// tensor(ccol_indices=tensor([0, 1, 2]), +// row_indices=tensor([0, 1]), +// values=tensor([[[1., 2.], +// [3., 4.]], +// [[5., 6.], +// [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, +// layout=torch.sparse_bsc) +// +// +//go:linkname SparseBscTensor py.sparse_bsc_tensor +func SparseBscTensor(ccolIndices *py.Object, rowIndices *py.Object, values *py.Object, size *py.Object) *py.Object +// sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row)) +// ` with specified 2-dimensional blocks at the given +// :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix +// multiplication operations in BSR format are typically faster than that +// for sparse tensors in COO format. Make you have a look at :ref:`the +// note on the data type of the indices `. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// crow_indices (array_like): (B+1)-dimensional array of size +// ``(*batchsize, nrowblocks + 1)``. The last element of each +// batch is the number of non-zeros. This tensor encodes the +// block index in values and col_indices depending on where the +// given row block starts. Each successive number in the tensor +// subtracted by the number before it denotes the number of +// blocks in a given row. +// col_indices (array_like): Column block co-ordinates of each block +// in values. (B+1)-dimensional tensor with the same length as +// values. +// values (array_list): Initial values for the tensor. Can be a list, +// tuple, NumPy ``ndarray``, scalar, and other types that +// represents a (1 + 2 + K)-dimensional tensor where ``K`` is the +// number of dense dimensions. +// size (list, tuple, :class:`torch.Size`, optional): Size of the +// sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * +// blocksize[1], *densesize)`` where ``blocksize == +// values.shape[1:3]``. If not provided, the size will be +// inferred as the minimum size big enough to hold all non-zero +// blocks. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. Default: if None, infers data type from +// :attr:`values`. +// device (:class:`torch.device`, optional): the desired device of +// returned tensor. Default: if None, uses the current device +// for the default tensor type (see +// :func:`torch.set_default_tensor_type`). :attr:`device` will be +// the CPU for CPU tensor types and the current CUDA device for +// CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// +// Example:: +// >>> crow_indices = [0, 1, 2] +// >>> col_indices = [0, 1] +// >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] +// >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64), +// ... torch.tensor(col_indices, dtype=torch.int64), +// ... torch.tensor(values), dtype=torch.double) +// tensor(crow_indices=tensor([0, 1, 2]), +// col_indices=tensor([0, 1]), +// values=tensor([[[1., 2.], +// [3., 4.]], +// [[5., 6.], +// [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, +// layout=torch.sparse_bsr) +// +// +//go:linkname SparseBsrTensor py.sparse_bsr_tensor +func SparseBsrTensor(crowIndices *py.Object, colIndices *py.Object, values *py.Object, size *py.Object) *py.Object +// sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR, +// CSC, BSR, or BSC - ` with specified values at +// the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse +// matrix multiplication operations in Compressed Sparse format are +// typically faster than that for sparse tensors in COO format. Make you +// have a look at :ref:`the note on the data type of the indices +// `. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// compressed_indices (array_like): (B+1)-dimensional array of size +// ``(*batchsize, compressed_dim_size + 1)``. The last element of +// each batch is the number of non-zero elements or blocks. This +// tensor encodes the index in ``values`` and ``plain_indices`` +// depending on where the given compressed dimension (row or +// column) starts. Each successive number in the tensor +// subtracted by the number before it denotes the number of +// elements or blocks in a given compressed dimension. +// plain_indices (array_like): Plain dimension (column or row) +// co-ordinates of each element or block in values. (B+1)-dimensional +// tensor with the same length as values. +// +// values (array_list): Initial values for the tensor. Can be a list, +// tuple, NumPy ``ndarray``, scalar, and other types. that +// represents a (1+K)-dimensional (for CSR and CSC layouts) or +// (1+2+K)-dimensional tensor (for BSR and BSC layouts) where +// ``K`` is the number of dense dimensions. +// size (list, tuple, :class:`torch.Size`, optional): Size of the +// sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * +// blocksize[1], *densesize)`` where ``blocksize[0] == +// blocksize[1] == 1`` for CSR and CSC formats. If not provided, +// the size will be inferred as the minimum size big enough to +// hold all non-zero elements or blocks. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. Default: if None, infers data type from +// :attr:`values`. +// layout (:class:`torch.layout`, required): the desired layout of +// returned tensor: :attr:`torch.sparse_csr`, +// :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or +// :attr:`torch.sparse_bsc`. +// device (:class:`torch.device`, optional): the desired device of +// returned tensor. Default: if None, uses the current device +// for the default tensor type (see +// :func:`torch.set_default_tensor_type`). :attr:`device` will be +// the CPU for CPU tensor types and the current CUDA device for +// CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// +// Example:: +// >>> compressed_indices = [0, 2, 4] +// >>> plain_indices = [0, 1, 0, 1] +// >>> values = [1, 2, 3, 4] +// >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64), +// ... torch.tensor(plain_indices, dtype=torch.int64), +// ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr) +// tensor(crow_indices=tensor([0, 2, 4]), +// col_indices=tensor([0, 1, 0, 1]), +// values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, +// dtype=torch.float64, layout=torch.sparse_csr) +// +// +//go:linkname SparseCompressedTensor py.sparse_compressed_tensor +func SparseCompressedTensor(compressedIndices *py.Object, plainIndices *py.Object, values *py.Object, size *py.Object) *py.Object +// sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in COO(rdinate) format +// ` with specified values at the given +// :attr:`indices`. +// +// .. note:: +// +// This function returns an :ref:`uncoalesced tensor +// ` when :attr:`is_coalesced` is +// unspecified or ``None``. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// indices (array_like): Initial data for the tensor. Can be a list, tuple, +// NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor` +// internally. The indices are the coordinates of the non-zero values in the matrix, and thus +// should be two-dimensional where the first dimension is the number of tensor dimensions and +// the second dimension is the number of non-zero values. +// values (array_like): Initial values for the tensor. Can be a list, tuple, +// NumPy ``ndarray``, scalar, and other types. +// size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not +// provided the size will be inferred as the minimum size big enough to hold all non-zero +// elements. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if None, infers data type from :attr:`values`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if None, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// is_coalesced (bool, optional): When``True``, the caller is +// responsible for providing tensor indices that correspond to a +// coalesced tensor. If the :attr:`check_invariants` flag is +// False, no error will be raised if the prerequisites are not +// met and this will lead to silently incorrect results. To force +// coalescion please use :meth:`coalesce` on the resulting +// Tensor. +// Default: None: except for trivial cases (e.g. nnz < 2) the +// resulting Tensor has is_coalesced set to ``False```. +// +// Example:: +// +// >>> i = torch.tensor([[0, 1, 1], +// ... [2, 0, 2]]) +// >>> v = torch.tensor([3, 4, 5], dtype=torch.float32) +// >>> torch.sparse_coo_tensor(i, v, [2, 4]) +// tensor(indices=tensor([[0, 1, 1], +// [2, 0, 2]]), +// values=tensor([3., 4., 5.]), +// size=(2, 4), nnz=3, layout=torch.sparse_coo) +// +// >>> torch.sparse_coo_tensor(i, v) # Shape inference +// tensor(indices=tensor([[0, 1, 1], +// [2, 0, 2]]), +// values=tensor([3., 4., 5.]), +// size=(2, 3), nnz=3, layout=torch.sparse_coo) +// +// >>> torch.sparse_coo_tensor(i, v, [2, 4], +// ... dtype=torch.float64, +// ... device=torch.device('cuda:0')) +// tensor(indices=tensor([[0, 1, 1], +// [2, 0, 2]]), +// values=tensor([3., 4., 5.]), +// device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64, +// layout=torch.sparse_coo) +// +// # Create an empty sparse tensor with the following invariants: +// # 1. sparse_dim + dense_dim = len(SparseTensor.shape) +// # 2. SparseTensor._indices().shape = (sparse_dim, nnz) +// # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) +// # +// # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and +// # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0)) +// >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) +// tensor(indices=tensor([], size=(1, 0)), +// values=tensor([], size=(0,)), +// size=(1,), nnz=0, layout=torch.sparse_coo) +// +// # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and +// # sparse_dim = 1 +// >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) +// tensor(indices=tensor([], size=(1, 0)), +// values=tensor([], size=(0, 2)), +// size=(1, 2), nnz=0, layout=torch.sparse_coo) +// +// .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html +// +// +//go:linkname SparseCooTensor py.sparse_coo_tensor +func SparseCooTensor(indices *py.Object, values *py.Object, size *py.Object) *py.Object +// sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column) +// ` with specified values at the given +// :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +// multiplication operations in CSC format are typically faster than that +// for sparse tensors in COO format. Make you have a look at :ref:`the +// note on the data type of the indices `. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// ccol_indices (array_like): (B+1)-dimensional array of size +// ``(*batchsize, ncols + 1)``. The last element of each batch +// is the number of non-zeros. This tensor encodes the index in +// values and row_indices depending on where the given column +// starts. Each successive number in the tensor subtracted by the +// number before it denotes the number of elements in a given +// column. +// row_indices (array_like): Row co-ordinates of each element in +// values. (B+1)-dimensional tensor with the same length as +// values. +// values (array_list): Initial values for the tensor. Can be a list, +// tuple, NumPy ``ndarray``, scalar, and other types that +// represents a (1+K)-dimensional tensor where ``K`` is the number +// of dense dimensions. +// size (list, tuple, :class:`torch.Size`, optional): Size of the +// sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If +// not provided, the size will be inferred as the minimum size +// big enough to hold all non-zero elements. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. Default: if None, infers data type from +// :attr:`values`. +// device (:class:`torch.device`, optional): the desired device of +// returned tensor. Default: if None, uses the current device +// for the default tensor type (see +// :func:`torch.set_default_tensor_type`). :attr:`device` will be +// the CPU for CPU tensor types and the current CUDA device for +// CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// +// Example:: +// >>> ccol_indices = [0, 2, 4] +// >>> row_indices = [0, 1, 0, 1] +// >>> values = [1, 2, 3, 4] +// >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), +// ... torch.tensor(row_indices, dtype=torch.int64), +// ... torch.tensor(values), dtype=torch.double) +// tensor(ccol_indices=tensor([0, 2, 4]), +// row_indices=tensor([0, 1, 0, 1]), +// values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, +// dtype=torch.float64, layout=torch.sparse_csc) +// +// +//go:linkname SparseCscTensor py.sparse_csc_tensor +func SparseCscTensor(ccolIndices *py.Object, rowIndices *py.Object, values *py.Object, size *py.Object) *py.Object +// sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor +// +// Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified +// values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations +// in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look +// at :ref:`the note on the data type of the indices `. +// +// .. note:: +// +// If the ``device`` argument is not specified the device of the given +// :attr:`values` and indices tensor(s) must match. If, however, the +// argument is specified the input Tensors will be converted to the +// given device and in turn determine the device of the constructed +// sparse tensor. +// +// Args: +// crow_indices (array_like): (B+1)-dimensional array of size +// ``(*batchsize, nrows + 1)``. The last element of each batch +// is the number of non-zeros. This tensor encodes the index in +// values and col_indices depending on where the given row +// starts. Each successive number in the tensor subtracted by the +// number before it denotes the number of elements in a given +// row. +// col_indices (array_like): Column co-ordinates of each element in +// values. (B+1)-dimensional tensor with the same length +// as values. +// values (array_list): Initial values for the tensor. Can be a list, +// tuple, NumPy ``ndarray``, scalar, and other types that +// represents a (1+K)-dimensional tensor where ``K`` is the number +// of dense dimensions. +// size (list, tuple, :class:`torch.Size`, optional): Size of the +// sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If +// not provided, the size will be inferred as the minimum size +// big enough to hold all non-zero elements. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of +// returned tensor. Default: if None, infers data type from +// :attr:`values`. +// device (:class:`torch.device`, optional): the desired device of +// returned tensor. Default: if None, uses the current device +// for the default tensor type (see +// :func:`torch.set_default_tensor_type`). :attr:`device` will be +// the CPU for CPU tensor types and the current CUDA device for +// CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// check_invariants (bool, optional): If sparse tensor invariants are checked. +// Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, +// initially False. +// +// Example:: +// >>> crow_indices = [0, 2, 4] +// >>> col_indices = [0, 1, 0, 1] +// >>> values = [1, 2, 3, 4] +// >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64), +// ... torch.tensor(col_indices, dtype=torch.int64), +// ... torch.tensor(values), dtype=torch.double) +// tensor(crow_indices=tensor([0, 2, 4]), +// col_indices=tensor([0, 1, 0, 1]), +// values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, +// dtype=torch.float64, layout=torch.sparse_csr) +// +// +//go:linkname SparseCsrTensor py.sparse_csr_tensor +func SparseCsrTensor(crowIndices *py.Object, colIndices *py.Object, values *py.Object, size *py.Object) *py.Object +// Splits the tensor into chunks. Each chunk is a view of the original tensor. +// +// If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will +// be split into equally sized chunks (if possible). Last chunk will be smaller if +// the tensor size along the given dimension :attr:`dim` is not divisible by +// :attr:`split_size`. +// +// If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split +// into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according +// to :attr:`split_size_or_sections`. +// +// Args: +// tensor (Tensor): tensor to split. +// split_size_or_sections (int) or (list(int)): size of a single chunk or +// list of sizes for each chunk +// dim (int): dimension along which to split the tensor. +// +// Example:: +// +// >>> a = torch.arange(10).reshape(5, 2) +// >>> a +// tensor([[0, 1], +// [2, 3], +// [4, 5], +// [6, 7], +// [8, 9]]) +// >>> torch.split(a, 2) +// (tensor([[0, 1], +// [2, 3]]), +// tensor([[4, 5], +// [6, 7]]), +// tensor([[8, 9]])) +// >>> torch.split(a, [1, 4]) +// (tensor([[0, 1]]), +// tensor([[2, 3], +// [4, 5], +// [6, 7], +// [8, 9]])) +// +// +//go:linkname Split py.split +func Split(tensor *py.Object, splitSizeOrSections *py.Object, dim *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.split`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname SplitCopy py.split_copy +func SplitCopy(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SplitWithSizes py.split_with_sizes +func SplitWithSizes(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname SplitWithSizesCopy py.split_with_sizes_copy +func SplitWithSizesCopy(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Spmm py.spmm +func Spmm(__llgo_va_list ...interface{}) *py.Object +// +// sqrt(input, *, out=None) -> Tensor +// +// Returns a new tensor with the square-root of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \sqrt{\text{input}_{i}} +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-2.0755, 1.0226, 0.0831, 0.4806]) +// >>> torch.sqrt(a) +// tensor([ nan, 1.0112, 0.2883, 0.6933]) +// +// +//go:linkname Sqrt py.sqrt +func Sqrt(input *py.Object) *py.Object +// None +// +//go:linkname Sqrt_ py.sqrt_ +func Sqrt_(__llgo_va_list ...interface{}) *py.Object +// +// square(input, *, out=None) -> Tensor +// +// Returns a new tensor with the square of the elements of :attr:`input`. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-2.0755, 1.0226, 0.0831, 0.4806]) +// >>> torch.square(a) +// tensor([ 4.3077, 1.0457, 0.0069, 0.2310]) +// +// +//go:linkname Square py.square +func Square(input *py.Object) *py.Object +// None +// +//go:linkname Square_ py.square_ +func Square_(__llgo_va_list ...interface{}) *py.Object +// +// squeeze(input, dim=None) -> Tensor +// +// Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. +// +// For example, if `input` is of shape: +// :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` +// will be of shape: :math:`(A \times B \times C \times D)`. +// +// When :attr:`dim` is given, a squeeze operation is done only in the given +// dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, +// ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` +// will squeeze the tensor to the shape :math:`(A \times B)`. +// +// .. note:: The returned tensor shares the storage with the input tensor, +// so changing the contents of one will change the contents of the other. +// +// .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` +// will also remove the batch dimension, which can lead to unexpected +// errors. Consider specifying only the dims you wish to be squeezed. +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints, optional): if given, the input will be squeezed +// only in the specified dimensions. +// +// .. versionchanged:: 2.0 +// :attr:`dim` now accepts tuples of dimensions. +// +// Example:: +// +// >>> x = torch.zeros(2, 1, 2, 1, 2) +// >>> x.size() +// torch.Size([2, 1, 2, 1, 2]) +// >>> y = torch.squeeze(x) +// >>> y.size() +// torch.Size([2, 2, 2]) +// >>> y = torch.squeeze(x, 0) +// >>> y.size() +// torch.Size([2, 1, 2, 1, 2]) +// >>> y = torch.squeeze(x, 1) +// >>> y.size() +// torch.Size([2, 2, 1, 2]) +// >>> y = torch.squeeze(x, (1, 2, 3)) +// torch.Size([2, 2, 2]) +// +// +//go:linkname Squeeze py.squeeze +func Squeeze(input *py.Object, dim *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.squeeze`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname SqueezeCopy py.squeeze_copy +func SqueezeCopy(__llgo_va_list ...interface{}) *py.Object +// +// sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor +// +// Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor +// :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. +// +// Note: This function is equivalent to :func:`torch.addmm`, except +// :attr:`input` and :attr:`mat1` are sparse. +// +// Args: +// input (Tensor): a sparse matrix to be added +// mat1 (Tensor): a sparse matrix to be matrix multiplied +// mat2 (Tensor): a dense matrix to be matrix multiplied +// +// Keyword args: +// beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) +// alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) +// out (Tensor, optional): the output tensor. +// +// +//go:linkname Sspaddmm py.sspaddmm +func Sspaddmm(input *py.Object, mat1 *py.Object, mat2 *py.Object) *py.Object +// +// stack(tensors, dim=0, *, out=None) -> Tensor +// +// Concatenates a sequence of tensors along a new dimension. +// +// All tensors need to be of the same size. +// +// .. seealso:: +// +// :func:`torch.cat` concatenates the given sequence along an existing dimension. +// +// Arguments: +// tensors (sequence of Tensors): sequence of tensors to concatenate +// dim (int): dimension to insert. Has to be between 0 and the number +// of dimensions of concatenated tensors (inclusive) +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// +//go:linkname Stack py.stack +func Stack(tensors *py.Object, dim *py.Object) *py.Object +// +// std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor +// +// Calculates the standard deviation over the dimensions specified by :attr:`dim`. +// :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +// reduce over all dimensions. +// +// The standard deviation (:math:`\sigma`) is calculated as +// +// .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} +// +// where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +// sample mean, :math:`N` is the number of samples and :math:`\delta N` is +// the :attr:`correction`. +// +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// dim (int or tuple of ints): the dimension or dimensions to reduce. +// +// Keyword args: +// correction (int): difference between the sample size and sample degrees of freedom. +// Defaults to `Bessel's correction`_, ``correction=1``. +// +// .. versionchanged:: 2.0 +// Previously this argument was called ``unbiased`` and was a boolean +// with ``True`` corresponding to ``correction=1`` and ``False`` being +// ``correction=0``. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// out (Tensor, optional): the output tensor. +// +// Example: +// +// >>> a = torch.tensor( +// ... [[ 0.2035, 1.2959, 1.8101, -0.4644], +// ... [ 1.5027, -0.3270, 0.5905, 0.6538], +// ... [-1.5745, 1.3330, -0.5596, -0.6548], +// ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) +// >>> torch.std(a, dim=1, keepdim=True) +// tensor([[1.0311], +// [0.7477], +// [1.2204], +// [0.9087]]) +// +// .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction +// +// +// +//go:linkname Std py.std +func Std(input *py.Object, dim *py.Object) *py.Object +// +// std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) +// +// Calculates the standard deviation and mean over the dimensions specified by +// :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or +// ``None`` to reduce over all dimensions. +// +// The standard deviation (:math:`\sigma`) is calculated as +// +// .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} +// +// where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +// sample mean, :math:`N` is the number of samples and :math:`\delta N` is +// the :attr:`correction`. +// +// +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// +// Keyword args: +// correction (int): difference between the sample size and sample degrees of freedom. +// Defaults to `Bessel's correction`_, ``correction=1``. +// +// .. versionchanged:: 2.0 +// Previously this argument was called ``unbiased`` and was a boolean +// with ``True`` corresponding to ``correction=1`` and ``False`` being +// ``correction=0``. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// out (Tensor, optional): the output tensor. +// +// Returns: +// A tuple (std, mean) containing the standard deviation and mean. +// +// Example: +// +// >>> a = torch.tensor( +// ... [[ 0.2035, 1.2959, 1.8101, -0.4644], +// ... [ 1.5027, -0.3270, 0.5905, 0.6538], +// ... [-1.5745, 1.3330, -0.5596, -0.6548], +// ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) +// >>> torch.std_mean(a, dim=0, keepdim=True) +// (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), +// tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) +// +// .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction +// +// +// +//go:linkname StdMean py.std_mean +func StdMean(input *py.Object, dim *py.Object) *py.Object +// Short-time Fourier transform (STFT). +// +// .. warning:: +// From version 1.8.0, :attr:`return_complex` must always be given +// explicitly for real inputs and `return_complex=False` has been +// deprecated. Strongly prefer `return_complex=True` as in a future +// pytorch release, this function will only return complex tensors. +// +// Note that :func:`torch.view_as_real` can be used to recover a real +// tensor with an extra last dimension for real and imaginary components. +// +// .. warning:: +// From version 2.1, a warning will be provided if a :attr:`window` is +// not specified. In a future release, this attribute will be required. +// Not providing a window currently defaults to using a rectangular window, +// which may result in undesirable artifacts. Consider using tapered windows, +// such as :func:`torch.hann_window`. +// +// The STFT computes the Fourier transform of short overlapping windows of the +// input. This giving frequency components of the signal as they change over +// time. The interface of this function is modeled after (but *not* a drop-in +// replacement for) librosa_ stft function. +// +// .. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html +// +// Ignoring the optional batch dimension, this method computes the following +// expression: +// +// .. math:: +// X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}% +// \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ % +// \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right), +// +// where :math:`m` is the index of the sliding window, and :math:`\omega` is +// the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``, +// or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``. +// +// * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time +// sequences. +// +// * If :attr:`hop_length` is ``None`` (default), it is treated as equal to +// ``floor(n_fft / 4)``. +// +// * If :attr:`win_length` is ``None`` (default), it is treated as equal to +// :attr:`n_fft`. +// +// * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from +// :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is +// treated as if having :math:`1` everywhere in the window. If +// :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on +// both sides to length :attr:`n_fft` before being applied. +// +// * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on +// both sides so that the :math:`t`-th frame is centered at time +// :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame +// begins at time :math:`t \times \text{hop\_length}`. +// +// * :attr:`pad_mode` determines the padding method used on :attr:`input` when +// :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for +// all available options. Default is ``"reflect"``. +// +// * If :attr:`onesided` is ``True`` (default for real input), only values for +// :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor +// \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because +// the real-to-complex Fourier transform satisfies the conjugate symmetry, +// i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`. +// Note if the input or window tensors are complex, then :attr:`onesided` +// output is not possible. +// +// * If :attr:`normalized` is ``True`` (default is ``False``), the function +// returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`. +// +// * If :attr:`return_complex` is ``True`` (default if input is complex), the +// return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``, +// the output is a ``input.dim() + 2`` dimensional real tensor where the last +// dimension represents the real and imaginary components. +// +// Returns either a complex tensor of size :math:`(* \times N \times T)` if +// :attr:`return_complex` is true, or a real tensor of size :math:`(* \times N +// \times T \times 2)`. Where :math:`*` is the optional batch size of +// :attr:`input`, :math:`N` is the number of frequencies where STFT is applied +// and :math:`T` is the total number of frames used. +// +// .. warning:: +// This function changed signature at version 0.4.1. Calling with the +// previous signature may cause error or return incorrect result. +// +// Args: +// input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional +// batch dimension +// n_fft (int): size of Fourier transform +// hop_length (int, optional): the distance between neighboring sliding window +// frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``) +// win_length (int, optional): the size of window frame and STFT filter. +// Default: ``None`` (treated as equal to :attr:`n_fft`) +// window (Tensor, optional): the optional window function. +// Shape must be 1d and `<= n_fft` +// Default: ``None`` (treated as window of all :math:`1` s) +// center (bool, optional): whether to pad :attr:`input` on both sides so +// that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. +// Default: ``True`` +// pad_mode (str, optional): controls the padding method used when +// :attr:`center` is ``True``. Default: ``"reflect"`` +// normalized (bool, optional): controls whether to return the normalized STFT results +// Default: ``False`` +// onesided (bool, optional): controls whether to return half of results to +// avoid redundancy for real inputs. +// Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise. +// return_complex (bool, optional): whether to return a complex tensor, or +// a real tensor with an extra last dimension for the real and +// imaginary components. +// +// .. versionchanged:: 2.0 +// ``return_complex`` is now a required argument for real inputs, +// as the default is being transitioned to ``True``. +// +// .. deprecated:: 2.0 +// ``return_complex=False`` is deprecated, instead use ``return_complex=True`` +// Note that calling :func:`torch.view_as_real` on the output will +// recover the deprecated output format. +// +// Returns: +// Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where +// - `B?` is an optional batch dimnsion from the input +// - `N` is the number of frequency samples, `(n_fft // 2) + 1` for +// `onesided=True`, or otherwise `n_fft`. +// - `T` is the number of frames, `1 + L // hop_length` +// for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise. +// - `C?` is an optional length-2 dimension of real and imaginary +// components, present when `return_complex=False`. +// +// +// +//go:linkname Stft py.stft +func Stft(input *py.Object, nFft *py.Object, hopLength *py.Object, winLength *py.Object, window *py.Object, center *py.Object, padMode *py.Object, normalized *py.Object, onesided *py.Object, returnComplex *py.Object) *py.Object +// +// sub(input, other, *, alpha=1, out=None) -> Tensor +// +// Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. +// +// .. math:: +// \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i +// +// +// Supports :ref:`broadcasting to a common shape `, +// :ref:`type promotion `, and integer, float, and complex inputs. +// +// Args: +// input (Tensor): the input tensor. +// other (Tensor or Number): the tensor or number to subtract from :attr:`input`. +// +// Keyword args: +// alpha (Number): the multiplier for :attr:`other`. +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor((1, 2)) +// >>> b = torch.tensor((0, 1)) +// >>> torch.sub(a, b, alpha=2) +// tensor([1, 0]) +// +// +//go:linkname Sub py.sub +func Sub(input *py.Object, other *py.Object) *py.Object +// +// subtract(input, other, *, alpha=1, out=None) -> Tensor +// +// Alias for :func:`torch.sub`. +// +// +//go:linkname Subtract py.subtract +func Subtract(input *py.Object, other *py.Object) *py.Object +// +// sum(input, *, dtype=None) -> Tensor +// +// Returns the sum of all elements in the :attr:`input` tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.randn(1, 3) +// >>> a +// tensor([[ 0.1133, -0.9567, 0.2958]]) +// >>> torch.sum(a) +// tensor(-0.5475) +// +// .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor +// :noindex: +// +// Returns the sum of each row of the :attr:`input` tensor in the given +// dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +// reduce over all of them. +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// If specified, the input tensor is casted to :attr:`dtype` before the operation +// is performed. This is useful for preventing data type overflows. Default: None. +// +// Example:: +// +// >>> a = torch.randn(4, 4) +// >>> a +// tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], +// [-0.2993, 0.9138, 0.9337, -1.6864], +// [ 0.1132, 0.7892, -0.1003, 0.5688], +// [ 0.3637, -0.9906, -0.4752, -1.5197]]) +// >>> torch.sum(a, 1) +// tensor([-0.4598, -0.1381, 1.3708, -2.6217]) +// >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) +// >>> torch.sum(b, (2, 1)) +// tensor([ 435., 1335., 2235., 3135.]) +// +// +//go:linkname Sum py.sum +func Sum(input *py.Object) *py.Object +// +// svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor) +// +// Computes the singular value decomposition of either a matrix or batch of +// matrices :attr:`input`. The singular value decomposition is represented as a +// namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`. +// where :math:`V^{\text{H}}` is the transpose of `V` for real inputs, +// and the conjugate transpose of `V` for complex inputs. +// If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also +// batched with the same batch dimensions as :attr:`input`. +// +// If :attr:`some` is `True` (default), the method returns the reduced singular +// value decomposition. In this case, if the last two dimensions of :attr:`input` are +// `m` and `n`, then the returned `U` and `V` matrices will contain only +// `min(n, m)` orthonormal columns. +// +// If :attr:`compute_uv` is `False`, the returned `U` and `V` will be +// zero-filled matrices of shape `(m, m)` and `(n, n)` +// respectively, and the same device as :attr:`input`. The argument :attr:`some` +// has no effect when :attr:`compute_uv` is `False`. +// +// Supports :attr:`input` of float, double, cfloat and cdouble data types. +// The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will +// always be real-valued, even if :attr:`input` is complex. +// +// .. warning:: +// +// :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd` +// and will be removed in a future PyTorch release. +// +// ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with +// +// .. code:: python +// +// U, S, Vh = torch.linalg.svd(A, full_matrices=not some) +// V = Vh.mH +// +// ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with +// +// .. code:: python +// +// S = torch.linalg.svdvals(A) +// +// .. note:: Differences with :func:`torch.linalg.svd`: +// +// * :attr:`some` is the opposite of +// :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that +// default value for both is `True`, so the default behavior is +// effectively the opposite. +// * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns +// `Vh`, that is, :math:`V^{\text{H}}`. +// * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled +// tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns +// empty tensors. +// +// .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices, +// then the singular values of each matrix in the batch are returned in descending order. +// +// .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`. +// +// .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]` +// and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors +// can be arbitrary bases of the corresponding subspaces. +// +// .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd` +// (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously, +// on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243 +// and later, and MAGMA's routine `gesdd` on earlier versions of CUDA. +// +// .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will +// be represented as a column-major matrix (i.e. Fortran-contiguous). +// +// .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not +// have zero nor repeated singular values. +// +// .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to +// `U` and `V` will be numerically unstable, as they depends on +// :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix +// has small singular values, as these gradients also depend on `S⁻¹`. +// +// .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique, +// as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column. +// The same happens when :attr:`input` has repeated singular values, where one may multiply +// the columns of the spanning subspace in `U` and `V` by a rotation matrix +// and `the resulting vectors will span the same subspace`_. +// Different platforms, like NumPy, or inputs on different device types, +// may produce different `U` and `V` tensors. +// +// Args: +// input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more +// batch dimensions consisting of `(m, n)` matrices. +// some (bool, optional): controls whether to compute the reduced or full decomposition, and +// consequently, the shape of returned `U` and `V`. Default: `True`. +// compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`. +// +// Keyword args: +// out (tuple, optional): the output tuple of tensors +// +// Example:: +// +// >>> a = torch.randn(5, 3) +// >>> a +// tensor([[ 0.2364, -0.7752, 0.6372], +// [ 1.7201, 0.7394, -0.0504], +// [-0.3371, -1.0584, 0.5296], +// [ 0.3550, -0.4022, 1.5569], +// [ 0.2445, -0.0158, 1.1414]]) +// >>> u, s, v = torch.svd(a) +// >>> u +// tensor([[ 0.4027, 0.0287, 0.5434], +// [-0.1946, 0.8833, 0.3679], +// [ 0.4296, -0.2890, 0.5261], +// [ 0.6604, 0.2717, -0.2618], +// [ 0.4234, 0.2481, -0.4733]]) +// >>> s +// tensor([2.3289, 2.0315, 0.7806]) +// >>> v +// tensor([[-0.0199, 0.8766, 0.4809], +// [-0.5080, 0.4054, -0.7600], +// [ 0.8611, 0.2594, -0.4373]]) +// >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t())) +// tensor(8.6531e-07) +// >>> a_big = torch.randn(7, 5, 3) +// >>> u, s, v = torch.svd(a_big) +// >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT)) +// tensor(2.6503e-06) +// +// .. _the resulting vectors will span the same subspace: +// (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD) +// +// +//go:linkname Svd py.svd +func Svd(input *py.Object, some *py.Object, computeUv *py.Object) *py.Object +// +// swapaxes(input, axis0, axis1) -> Tensor +// +// Alias for :func:`torch.transpose`. +// +// This function is equivalent to NumPy's swapaxes function. +// +// Examples:: +// +// >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) +// >>> x +// tensor([[[0, 1], +// [2, 3]], +// +// [[4, 5], +// [6, 7]]]) +// >>> torch.swapaxes(x, 0, 1) +// tensor([[[0, 1], +// [4, 5]], +// +// [[2, 3], +// [6, 7]]]) +// >>> torch.swapaxes(x, 0, 2) +// tensor([[[0, 4], +// [2, 6]], +// +// [[1, 5], +// [3, 7]]]) +// +// +//go:linkname Swapaxes py.swapaxes +func Swapaxes(input *py.Object, axis0 *py.Object, axis1 *py.Object) *py.Object +// +// swapdims(input, dim0, dim1) -> Tensor +// +// Alias for :func:`torch.transpose`. +// +// This function is equivalent to NumPy's swapaxes function. +// +// Examples:: +// +// >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) +// >>> x +// tensor([[[0, 1], +// [2, 3]], +// +// [[4, 5], +// [6, 7]]]) +// >>> torch.swapdims(x, 0, 1) +// tensor([[[0, 1], +// [4, 5]], +// +// [[2, 3], +// [6, 7]]]) +// >>> torch.swapdims(x, 0, 2) +// tensor([[[0, 4], +// [2, 6]], +// +// [[1, 5], +// [3, 7]]]) +// +// +//go:linkname Swapdims py.swapdims +func Swapdims(input *py.Object, dim0 *py.Object, dim1 *py.Object) *py.Object +// None +// +//go:linkname SymConstrainRange py.sym_constrain_range +func SymConstrainRange(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname SymConstrainRangeForSize py.sym_constrain_range_for_size +func SymConstrainRangeForSize(__llgo_va_list ...interface{}) *py.Object +// +// t(input) -> Tensor +// +// Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0 +// and 1. +// +// 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this +// is equivalent to ``transpose(input, 0, 1)``. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x = torch.randn(()) +// >>> x +// tensor(0.1995) +// >>> torch.t(x) +// tensor(0.1995) +// >>> x = torch.randn(3) +// >>> x +// tensor([ 2.4320, -0.4608, 0.7702]) +// >>> torch.t(x) +// tensor([ 2.4320, -0.4608, 0.7702]) +// >>> x = torch.randn(2, 3) +// >>> x +// tensor([[ 0.4875, 0.9158, -0.5872], +// [ 0.3938, -0.6929, 0.6932]]) +// >>> torch.t(x) +// tensor([[ 0.4875, 0.3938], +// [ 0.9158, -0.6929], +// [-0.5872, 0.6932]]) +// +// See also :func:`torch.transpose`. +// +// +//go:linkname T py.t +func T(input *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.t`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname TCopy py.t_copy +func TCopy(__llgo_va_list ...interface{}) *py.Object +// +// take(input, index) -> Tensor +// +// Returns a new tensor with the elements of :attr:`input` at the given indices. +// The input tensor is treated as if it were viewed as a 1-D tensor. The result +// takes the same shape as the indices. +// +// Args: +// input (Tensor): the input tensor. +// index (LongTensor): the indices into tensor +// +// Example:: +// +// >>> src = torch.tensor([[4, 3, 5], +// ... [6, 7, 8]]) +// >>> torch.take(src, torch.tensor([0, 2, 5])) +// tensor([ 4, 5, 8]) +// +// +//go:linkname Take py.take +func Take(input *py.Object, index *py.Object) *py.Object +// +// take_along_dim(input, indices, dim=None, *, out=None) -> Tensor +// +// Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`. +// +// If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d. +// +// Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`, +// are designed to work with this function. See the examples below. +// +// .. note:: +// This function is similar to NumPy's `take_along_axis`. +// See also :func:`torch.gather`. +// +// Args: +// input (Tensor): the input tensor. +// indices (tensor): the indices into :attr:`input`. Must have long dtype. +// dim (int, optional): dimension to select along. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]]) +// >>> max_idx = torch.argmax(t) +// >>> torch.take_along_dim(t, max_idx) +// tensor([60]) +// >>> sorted_idx = torch.argsort(t, dim=1) +// >>> torch.take_along_dim(t, sorted_idx, dim=1) +// tensor([[10, 20, 30], +// [40, 50, 60]]) +// +// +//go:linkname TakeAlongDim py.take_along_dim +func TakeAlongDim(input *py.Object, indices *py.Object, dim *py.Object) *py.Object +// +// tan(input, *, out=None) -> Tensor +// +// Returns a new tensor with the tangent of the elements of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \tan(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([-1.2027, -1.7687, 0.4412, -1.3856]) +// >>> torch.tan(a) +// tensor([-2.5930, 4.9859, 0.4722, -5.3366]) +// +// +//go:linkname Tan py.tan +func Tan(input *py.Object) *py.Object +// None +// +//go:linkname Tan_ py.tan_ +func Tan_(__llgo_va_list ...interface{}) *py.Object +// +// tanh(input, *, out=None) -> Tensor +// +// Returns a new tensor with the hyperbolic tangent of the elements +// of :attr:`input`. +// +// .. math:: +// \text{out}_{i} = \tanh(\text{input}_{i}) +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) +// >>> torch.tanh(a) +// tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) +// +// +//go:linkname Tanh py.tanh +func Tanh(input *py.Object) *py.Object +// None +// +//go:linkname Tanh_ py.tanh_ +func Tanh_(__llgo_va_list ...interface{}) *py.Object +// +// tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor +// +// Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`. +// +// .. warning:: +// +// When working with tensors prefer using :func:`torch.Tensor.clone`, +// :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for +// readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to +// ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)`` +// is equivalent to ``t.clone().detach().requires_grad_(True)``. +// +// .. seealso:: +// +// :func:`torch.as_tensor` preserves autograd history and avoids copies where possible. +// :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array. +// +// Args: +// data (array_like): Initial data for the tensor. Can be a list, tuple, +// NumPy ``ndarray``, scalar, and other types. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, infers data type from :attr:`data`. +// device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor +// then the device of data is used. If None and data is not a tensor then +// the result tensor is constructed on the current device. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// pin_memory (bool, optional): If set, returned tensor would be allocated in +// the pinned memory. Works only for CPU tensors. Default: ``False``. +// +// +// Example:: +// +// >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) +// tensor([[ 0.1000, 1.2000], +// [ 2.2000, 3.1000], +// [ 4.9000, 5.2000]]) +// +// >>> torch.tensor([0, 1]) # Type inference on data +// tensor([ 0, 1]) +// +// >>> torch.tensor([[0.11111, 0.222222, 0.3333333]], +// ... dtype=torch.float64, +// ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device +// tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0') +// +// >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor +// tensor(3.1416) +// +// >>> torch.tensor([]) # Create an empty tensor (of size (0,)) +// tensor([]) +// +// +//go:linkname Tensor py.tensor +func Tensor(data *py.Object) *py.Object +// +// tensor_split(input, indices_or_sections, dim=0) -> List of Tensors +// +// Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, +// along dimension :attr:`dim` according to the indices or number of sections specified +// by :attr:`indices_or_sections`. This function is based on NumPy's +// :func:`numpy.array_split`. +// +// Args: +// input (Tensor): the tensor to split +// indices_or_sections (Tensor, int or list or tuple of ints): +// If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor +// with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. +// If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each +// section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` +// is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` +// sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will +// have size :code:`int(input.size(dim) / n)`. +// +// If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long +// tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices +// in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` +// would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. +// +// If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional +// long tensor on the CPU. +// +// dim (int, optional): dimension along which to split the tensor. Default: ``0`` +// +// Example:: +// +// >>> x = torch.arange(8) +// >>> torch.tensor_split(x, 3) +// (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) +// +// >>> x = torch.arange(7) +// >>> torch.tensor_split(x, 3) +// (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) +// >>> torch.tensor_split(x, (1, 6)) +// (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) +// +// >>> x = torch.arange(14).reshape(2, 7) +// >>> x +// tensor([[ 0, 1, 2, 3, 4, 5, 6], +// [ 7, 8, 9, 10, 11, 12, 13]]) +// >>> torch.tensor_split(x, 3, dim=1) +// (tensor([[0, 1, 2], +// [7, 8, 9]]), +// tensor([[ 3, 4], +// [10, 11]]), +// tensor([[ 5, 6], +// [12, 13]])) +// >>> torch.tensor_split(x, (1, 6), dim=1) +// (tensor([[0], +// [7]]), +// tensor([[ 1, 2, 3, 4, 5], +// [ 8, 9, 10, 11, 12]]), +// tensor([[ 6], +// [13]])) +// +// +//go:linkname TensorSplit py.tensor_split +func TensorSplit(input *py.Object, indicesOrSections *py.Object, dim *py.Object) *py.Object +// Returns a contraction of a and b over multiple dimensions. +// +// :attr:`tensordot` implements a generalized matrix product. +// +// Args: +// a (Tensor): Left tensor to contract +// b (Tensor): Right tensor to contract +// dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to +// contract or explicit lists of dimensions for :attr:`a` and +// :attr:`b` respectively +// +// When called with a non-negative integer argument :attr:`dims` = :math:`d`, and +// the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`, +// respectively, :func:`~torch.tensordot` computes +// +// .. math:: +// r_{i_0,...,i_{m-d}, i_d,...,i_n} +// = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. +// +// When called with :attr:`dims` of the list form, the given dimensions will be contracted +// in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes +// in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted +// dimensions. +// +// Examples:: +// +// >>> a = torch.arange(60.).reshape(3, 4, 5) +// >>> b = torch.arange(24.).reshape(4, 3, 2) +// >>> torch.tensordot(a, b, dims=([1, 0], [0, 1])) +// tensor([[4400., 4730.], +// [4532., 4874.], +// [4664., 5018.], +// [4796., 5162.], +// [4928., 5306.]]) +// +// >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) +// >>> a = torch.randn(3, 4, 5, device='cuda') +// >>> b = torch.randn(4, 5, 6, device='cuda') +// >>> c = torch.tensordot(a, b, dims=2).cpu() +// tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741], +// [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744], +// [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]]) +// +// >>> a = torch.randn(3, 5, 4, 6) +// >>> b = torch.randn(6, 4, 5, 3) +// >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0])) +// tensor([[ 7.7193, -2.4867, -10.3204], +// [ 1.5513, -14.4737, -6.5113], +// [ -0.2850, 4.2573, -3.5997]]) +// +// +//go:linkname Tensordot py.tensordot +func Tensordot(a *py.Object, b *py.Object, dims *py.Object, out *py.Object) *py.Object +// None +// +//go:linkname Threshold py.threshold +func Threshold(__llgo_va_list ...interface{}) *py.Object +// +// threshold_(input, threshold, value) -> Tensor +// +// In-place version of :func:`~threshold`. +// +// +//go:linkname Threshold_ py.threshold_ +func Threshold_(input *py.Object, threshold *py.Object, value *py.Object) *py.Object +// +// tile(input, dims) -> Tensor +// +// Constructs a tensor by repeating the elements of :attr:`input`. +// The :attr:`dims` argument specifies the number of repetitions +// in each dimension. +// +// If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then +// ones are prepended to :attr:`dims` until all dimensions are specified. +// For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims` +// is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2). +// +// Analogously, if :attr:`input` has fewer dimensions than :attr:`dims` +// specifies, then :attr:`input` is treated as if it were unsqueezed at +// dimension zero until it has as many dimensions as :attr:`dims` specifies. +// For example, if :attr:`input` has shape (4, 2) and :attr:`dims` +// is (3, 3, 2, 2), then :attr:`input` is treated as if it had the +// shape (1, 1, 4, 2). +// +// .. note:: +// +// This function is similar to NumPy's tile function. +// +// Args: +// input (Tensor): the tensor whose elements to repeat. +// dims (tuple): the number of repetitions per dimension. +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3]) +// >>> x.tile((2,)) +// tensor([1, 2, 3, 1, 2, 3]) +// >>> y = torch.tensor([[1, 2], [3, 4]]) +// >>> torch.tile(y, (2, 2)) +// tensor([[1, 2, 1, 2], +// [3, 4, 3, 4], +// [1, 2, 1, 2], +// [3, 4, 3, 4]]) +// +// +//go:linkname Tile py.tile +func Tile(input *py.Object, dims *py.Object) *py.Object +// +// topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor) +// +// Returns the :attr:`k` largest elements of the given :attr:`input` tensor along +// a given dimension. +// +// If :attr:`dim` is not given, the last dimension of the `input` is chosen. +// +// If :attr:`largest` is ``False`` then the `k` smallest elements are returned. +// +// A namedtuple of `(values, indices)` is returned with the `values` and +// `indices` of the largest `k` elements of each row of the `input` tensor in the +// given dimension `dim`. +// +// The boolean option :attr:`sorted` if ``True``, will make sure that the returned +// `k` elements are themselves sorted +// +// Args: +// input (Tensor): the input tensor. +// k (int): the k in "top-k" +// dim (int, optional): the dimension to sort along +// largest (bool, optional): controls whether to return largest or +// smallest elements +// sorted (bool, optional): controls whether to return the elements +// in sorted order +// +// Keyword args: +// out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be +// optionally given to be used as output buffers +// +// Example:: +// +// >>> x = torch.arange(1., 6.) +// >>> x +// tensor([ 1., 2., 3., 4., 5.]) +// >>> torch.topk(x, 3) +// torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2])) +// +// +//go:linkname Topk py.topk +func Topk(input *py.Object, k *py.Object, dim *py.Object, largest *py.Object, sorted *py.Object) *py.Object +// +// trace(input) -> Tensor +// +// Returns the sum of the elements of the diagonal of the input 2-D matrix. +// +// Example:: +// +// >>> x = torch.arange(1., 10.).view(3, 3) +// >>> x +// tensor([[ 1., 2., 3.], +// [ 4., 5., 6.], +// [ 7., 8., 9.]]) +// >>> torch.trace(x) +// tensor(15.) +// +// +//go:linkname Trace py.trace +func Trace(input *py.Object) *py.Object +// +// transpose(input, dim0, dim1) -> Tensor +// +// Returns a tensor that is a transposed version of :attr:`input`. +// The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. +// +// If :attr:`input` is a strided tensor then the resulting :attr:`out` +// tensor shares its underlying storage with the :attr:`input` tensor, so +// changing the content of one would change the content of the other. +// +// If :attr:`input` is a :ref:`sparse tensor ` then the +// resulting :attr:`out` tensor *does not* share the underlying storage +// with the :attr:`input` tensor. +// +// If :attr:`input` is a :ref:`sparse tensor ` with compressed +// layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments +// :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must +// both be sparse dimensions. The batch dimensions of a sparse tensor are the +// dimensions preceding the sparse dimensions. +// +// .. note:: +// Transpositions which interchange the sparse dimensions of a `SparseCSR` +// or `SparseCSC` layout tensor will result in the layout changing between +// the two options. Transposition of the sparse dimensions of a ` SparseBSR` +// or `SparseBSC` layout tensor will likewise generate a result with the +// opposite layout. +// +// +// Args: +// input (Tensor): the input tensor. +// dim0 (int): the first dimension to be transposed +// dim1 (int): the second dimension to be transposed +// +// Example:: +// +// >>> x = torch.randn(2, 3) +// >>> x +// tensor([[ 1.0028, -0.9893, 0.5809], +// [-0.1669, 0.7299, 0.4942]]) +// >>> torch.transpose(x, 0, 1) +// tensor([[ 1.0028, -0.1669], +// [-0.9893, 0.7299], +// [ 0.5809, 0.4942]]) +// +// See also :func:`torch.t`. +// +// +//go:linkname Transpose py.transpose +func Transpose(input *py.Object, dim0 *py.Object, dim1 *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.transpose`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname TransposeCopy py.transpose_copy +func TransposeCopy(__llgo_va_list ...interface{}) *py.Object +// +// trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor +// +// Computes the `trapezoidal rule `_ along +// :attr:`dim`. By default the spacing between elements is assumed to be 1, but +// :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +// used to specify arbitrary spacing along :attr:`dim`. +// +// +// Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, +// the default computation is +// +// .. math:: +// \begin{aligned} +// \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) +// \end{aligned} +// +// When :attr:`dx` is specified the computation becomes +// +// .. math:: +// \begin{aligned} +// \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) +// \end{aligned} +// +// effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, +// assuming :attr:`x` is also a one-dimensional tensor with +// elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes +// +// .. math:: +// \begin{aligned} +// \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) +// \end{aligned} +// +// When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. +// The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` +// and :attr:`y`, the function computes the difference between consecutive elements along +// dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have +// the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. +// After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. +// See the examples below for details. +// +// .. note:: +// The trapezoidal rule is a technique for approximating the definite integral of a function +// by averaging its left and right Riemann sums. The approximation becomes more accurate as +// the resolution of the partition increases. +// +// Arguments: +// y (Tensor): Values to use when computing the trapezoidal rule. +// x (Tensor): If specified, defines spacing between values as specified above. +// +// Keyword arguments: +// dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` +// are specified then this defaults to 1. Effectively multiplies the result by its value. +// dim (int): The dimension along which to compute the trapezoidal rule. +// The last (inner-most) dimension by default. +// +// Examples:: +// +// >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 +// >>> y = torch.tensor([1, 5, 10]) +// >>> torch.trapezoid(y) +// tensor(10.5) +// +// >>> # Computes the same trapezoidal rule directly to verify +// >>> (1 + 10 + 10) / 2 +// 10.5 +// +// >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 +// >>> # NOTE: the result is the same as before, but multiplied by 2 +// >>> torch.trapezoid(y, dx=2) +// 21.0 +// +// >>> # Computes the trapezoidal rule in 1D with arbitrary spacing +// >>> x = torch.tensor([1, 3, 6]) +// >>> torch.trapezoid(y, x) +// 28.5 +// +// >>> # Computes the same trapezoidal rule directly to verify +// >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 +// 28.5 +// +// >>> # Computes the trapezoidal rule for each row of a 3x3 matrix +// >>> y = torch.arange(9).reshape(3, 3) +// tensor([[0, 1, 2], +// [3, 4, 5], +// [6, 7, 8]]) +// >>> torch.trapezoid(y) +// tensor([ 2., 8., 14.]) +// +// >>> # Computes the trapezoidal rule for each column of the matrix +// >>> torch.trapezoid(y, dim=0) +// tensor([ 6., 8., 10.]) +// +// >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix +// >>> # with the same arbitrary spacing +// >>> y = torch.ones(3, 3) +// >>> x = torch.tensor([1, 3, 6]) +// >>> torch.trapezoid(y, x) +// array([5., 5., 5.]) +// +// >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix +// >>> # with different arbitrary spacing per row +// >>> y = torch.ones(3, 3) +// >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) +// >>> torch.trapezoid(y, x) +// array([2., 4., 6.]) +// +// +//go:linkname Trapezoid py.trapezoid +func Trapezoid(y *py.Object, x *py.Object) *py.Object +// +// trapz(y, x, *, dim=-1) -> Tensor +// +// Alias for :func:`torch.trapezoid`. +// +// +//go:linkname Trapz py.trapz +func Trapz(y *py.Object, x *py.Object) *py.Object +// +// triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor) +// +// Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A` +// and multiple right-hand sides :math:`b`. +// +// In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular +// (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal. +// +// `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are +// batches of 2D matrices. If the inputs are batches, then returns +// batched outputs `X` +// +// If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and +// :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned, +// the result may contain `NaN` s. +// +// Supports input of float, double, cfloat and cdouble data types. +// +// .. warning:: +// +// :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular` +// and will be removed in a future PyTorch release. +// :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a +// copy of one of the inputs. +// +// ``X = torch.triangular_solve(B, A).solution`` should be replaced with +// +// .. code:: python +// +// X = torch.linalg.solve_triangular(A, B) +// +// Args: +// b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where +// :math:`*` is zero of more batch dimensions +// A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)` +// where :math:`*` is zero or more batch dimensions +// upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``. +// transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``, +// and `op(A) = A` if it is ``False``. Default: ``False``. +// unitriangular (bool, optional): whether :math:`A` is unit triangular. +// If True, the diagonal elements of :math:`A` are assumed to be +// 1 and not referenced from :math:`A`. Default: ``False``. +// +// Keyword args: +// out ((Tensor, Tensor), optional): tuple of two tensors to write +// the output to. Ignored if `None`. Default: `None`. +// +// Returns: +// A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient` +// is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b` +// (or whatever variant of the system of equations, depending on the keyword arguments.) +// +// Examples:: +// +// >>> A = torch.randn(2, 2).triu() +// >>> A +// tensor([[ 1.1527, -1.0753], +// [ 0.0000, 0.7986]]) +// >>> b = torch.randn(2, 3) +// >>> b +// tensor([[-0.0210, 2.3513, -1.5492], +// [ 1.5429, 0.7403, -1.0243]]) +// >>> torch.triangular_solve(b, A) +// torch.return_types.triangular_solve( +// solution=tensor([[ 1.7841, 2.9046, -2.5405], +// [ 1.9320, 0.9270, -1.2826]]), +// cloned_coefficient=tensor([[ 1.1527, -1.0753], +// [ 0.0000, 0.7986]])) +// +// +//go:linkname TriangularSolve py.triangular_solve +func TriangularSolve(b *py.Object, A *py.Object, upper *py.Object, transpose *py.Object, unitriangular *py.Object) *py.Object +// +// tril(input, diagonal=0, *, out=None) -> Tensor +// +// Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices +// :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. +// +// The lower triangular part of the matrix is defined as the elements on and +// below the diagonal. +// +// The argument :attr:`diagonal` controls which diagonal to consider. If +// :attr:`diagonal` = 0, all elements on and below the main diagonal are +// retained. A positive value includes just as many diagonals above the main +// diagonal, and similarly a negative value excludes just as many diagonals below +// the main diagonal. The main diagonal are the set of indices +// :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +// :math:`d_{1}, d_{2}` are the dimensions of the matrix. +// +// Args: +// input (Tensor): the input tensor. +// diagonal (int, optional): the diagonal to consider +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(3, 3) +// >>> a +// tensor([[-1.0813, -0.8619, 0.7105], +// [ 0.0935, 0.1380, 2.2112], +// [-0.3409, -0.9828, 0.0289]]) +// >>> torch.tril(a) +// tensor([[-1.0813, 0.0000, 0.0000], +// [ 0.0935, 0.1380, 0.0000], +// [-0.3409, -0.9828, 0.0289]]) +// +// >>> b = torch.randn(4, 6) +// >>> b +// tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461], +// [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145], +// [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864], +// [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]]) +// >>> torch.tril(b, diagonal=1) +// tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000], +// [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000], +// [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000], +// [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]]) +// >>> torch.tril(b, diagonal=-1) +// tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], +// [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], +// [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000], +// [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]]) +// +// +//go:linkname Tril py.tril +func Tril(input *py.Object, diagonal *py.Object) *py.Object +// +// tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor +// +// Returns the indices of the lower triangular part of a :attr:`row`-by- +// :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +// coordinates of all indices and the second row contains column coordinates. +// Indices are ordered based on rows and then columns. +// +// The lower triangular part of the matrix is defined as the elements on and +// below the diagonal. +// +// The argument :attr:`offset` controls which diagonal to consider. If +// :attr:`offset` = 0, all elements on and below the main diagonal are +// retained. A positive value includes just as many diagonals above the main +// diagonal, and similarly a negative value excludes just as many diagonals below +// the main diagonal. The main diagonal are the set of indices +// :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +// where :math:`d_{1}, d_{2}` are the dimensions of the matrix. +// +// .. note:: +// When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to +// prevent overflow during calculation. +// +// Args: +// row (``int``): number of rows in the 2-D matrix. +// col (``int``): number of columns in the 2-D matrix. +// offset (``int``): diagonal offset from the main diagonal. +// Default: if not provided, 0. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, ``torch.long``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. +// +// Example:: +// +// >>> a = torch.tril_indices(3, 3) +// >>> a +// tensor([[0, 1, 1, 2, 2, 2], +// [0, 0, 1, 0, 1, 2]]) +// +// >>> a = torch.tril_indices(4, 3, -1) +// >>> a +// tensor([[1, 2, 2, 3, 3, 3], +// [0, 0, 1, 0, 1, 2]]) +// +// >>> a = torch.tril_indices(4, 3, 1) +// >>> a +// tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], +// [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]]) +// +// +//go:linkname TrilIndices py.tril_indices +func TrilIndices(row *py.Object, col *py.Object, offset *py.Object) *py.Object +// None +// +//go:linkname TripletMarginLoss py.triplet_margin_loss +func TripletMarginLoss(__llgo_va_list ...interface{}) *py.Object +// +// triu(input, diagonal=0, *, out=None) -> Tensor +// +// Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices +// :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. +// +// The upper triangular part of the matrix is defined as the elements on and +// above the diagonal. +// +// The argument :attr:`diagonal` controls which diagonal to consider. If +// :attr:`diagonal` = 0, all elements on and above the main diagonal are +// retained. A positive value excludes just as many diagonals above the main +// diagonal, and similarly a negative value includes just as many diagonals below +// the main diagonal. The main diagonal are the set of indices +// :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +// :math:`d_{1}, d_{2}` are the dimensions of the matrix. +// +// Args: +// input (Tensor): the input tensor. +// diagonal (int, optional): the diagonal to consider +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(3, 3) +// >>> a +// tensor([[ 0.2309, 0.5207, 2.0049], +// [ 0.2072, -1.0680, 0.6602], +// [ 0.3480, -0.5211, -0.4573]]) +// >>> torch.triu(a) +// tensor([[ 0.2309, 0.5207, 2.0049], +// [ 0.0000, -1.0680, 0.6602], +// [ 0.0000, 0.0000, -0.4573]]) +// >>> torch.triu(a, diagonal=1) +// tensor([[ 0.0000, 0.5207, 2.0049], +// [ 0.0000, 0.0000, 0.6602], +// [ 0.0000, 0.0000, 0.0000]]) +// >>> torch.triu(a, diagonal=-1) +// tensor([[ 0.2309, 0.5207, 2.0049], +// [ 0.2072, -1.0680, 0.6602], +// [ 0.0000, -0.5211, -0.4573]]) +// +// >>> b = torch.randn(4, 6) +// >>> b +// tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], +// [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], +// [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], +// [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]]) +// >>> torch.triu(b, diagonal=1) +// tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], +// [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857], +// [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410], +// [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]]) +// >>> torch.triu(b, diagonal=-1) +// tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], +// [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], +// [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], +// [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]]) +// +// +//go:linkname Triu py.triu +func Triu(input *py.Object, diagonal *py.Object) *py.Object +// +// triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor +// +// Returns the indices of the upper triangular part of a :attr:`row` by +// :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +// coordinates of all indices and the second row contains column coordinates. +// Indices are ordered based on rows and then columns. +// +// The upper triangular part of the matrix is defined as the elements on and +// above the diagonal. +// +// The argument :attr:`offset` controls which diagonal to consider. If +// :attr:`offset` = 0, all elements on and above the main diagonal are +// retained. A positive value excludes just as many diagonals above the main +// diagonal, and similarly a negative value includes just as many diagonals below +// the main diagonal. The main diagonal are the set of indices +// :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +// where :math:`d_{1}, d_{2}` are the dimensions of the matrix. +// +// .. note:: +// When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to +// prevent overflow during calculation. +// +// Args: +// row (``int``): number of rows in the 2-D matrix. +// col (``int``): number of columns in the 2-D matrix. +// offset (``int``): diagonal offset from the main diagonal. +// Default: if not provided, 0. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, ``torch.long``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. +// +// Example:: +// +// >>> a = torch.triu_indices(3, 3) +// >>> a +// tensor([[0, 0, 0, 1, 1, 2], +// [0, 1, 2, 1, 2, 2]]) +// +// >>> a = torch.triu_indices(4, 3, -1) +// >>> a +// tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3], +// [0, 1, 2, 0, 1, 2, 1, 2, 2]]) +// +// >>> a = torch.triu_indices(4, 3, 1) +// >>> a +// tensor([[0, 0, 1], +// [1, 2, 2]]) +// +// +//go:linkname TriuIndices py.triu_indices +func TriuIndices(row *py.Object, col *py.Object, offset *py.Object) *py.Object +// +// true_divide(dividend, divisor, *, out) -> Tensor +// +// Alias for :func:`torch.div` with ``rounding_mode=None``. +// +// +//go:linkname TrueDivide py.true_divide +func TrueDivide(dividend *py.Object, divisor *py.Object) *py.Object +// +// trunc(input, *, out=None) -> Tensor +// +// Returns a new tensor with the truncated integer values of +// the elements of :attr:`input`. +// +// For integer inputs, follows the array-api convention of returning a +// copy of the input tensor. +// +// Args: +// input (Tensor): the input tensor. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.randn(4) +// >>> a +// tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) +// >>> torch.trunc(a) +// tensor([ 3., 0., -0., -0.]) +// +// +//go:linkname Trunc py.trunc +func Trunc(input *py.Object) *py.Object +// None +// +//go:linkname Trunc_ py.trunc_ +func Trunc_(__llgo_va_list ...interface{}) *py.Object +// +// unbind(input, dim=0) -> seq +// +// Removes a tensor dimension. +// +// Returns a tuple of all slices along a given dimension, already without it. +// +// Arguments: +// input (Tensor): the tensor to unbind +// dim (int): dimension to remove +// +// Example:: +// +// >>> torch.unbind(torch.tensor([[1, 2, 3], +// >>> [4, 5, 6], +// >>> [7, 8, 9]])) +// (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) +// +// +//go:linkname Unbind py.unbind +func Unbind(input *py.Object, dim *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.unbind`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname UnbindCopy py.unbind_copy +func UnbindCopy(__llgo_va_list ...interface{}) *py.Object +// +// unflatten(input, dim, sizes) -> Tensor +// +// Expands a dimension of the input tensor over multiple dimensions. +// +// .. seealso:: +// +// :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): Dimension to be unflattened, specified as an index into +// ``input.shape``. +// sizes (Tuple[int]): New shape of the unflattened dimension. +// One of its elements can be `-1` in which case the corresponding output +// dimension is inferred. Otherwise, the product of ``sizes`` *must* +// equal ``input.shape[dim]``. +// +// Returns: +// A View of input with the specified dimension unflattened. +// +// Examples:: +// >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape +// torch.Size([3, 2, 2, 1]) +// >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape +// torch.Size([3, 2, 2, 1]) +// >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape +// torch.Size([5, 2, 2, 3, 1, 1, 3]) +// +// +//go:linkname Unflatten py.unflatten +func Unflatten(input *py.Object, dim *py.Object, sizes *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.unfold`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname UnfoldCopy py.unfold_copy +func UnfoldCopy(__llgo_va_list ...interface{}) *py.Object +// Eliminates all but the first element from every consecutive group of equivalent elements. +// +// .. note:: This function is different from :func:`torch.unique` in the sense that this function +// only eliminates consecutive duplicate values. This semantics is similar to `std::unique` +// in C++. +// +// Args: +// input (Tensor): the input tensor +// return_inverse (bool): Whether to also return the indices for where +// elements in the original input ended up in the returned unique list. +// return_counts (bool): Whether to also return the counts for each unique +// element. +// dim (int): the dimension to apply unique. If ``None``, the unique of the +// flattened input is returned. default: ``None`` +// +// Returns: +// (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing +// +// - **output** (*Tensor*): the output list of unique scalar elements. +// - **inverse_indices** (*Tensor*): (optional) if +// :attr:`return_inverse` is True, there will be an additional +// returned tensor (same shape as input) representing the indices +// for where elements in the original input map to in the output; +// otherwise, this function will only return a single tensor. +// - **counts** (*Tensor*): (optional) if +// :attr:`return_counts` is True, there will be an additional +// returned tensor (same shape as output or output.size(dim), +// if dim was specified) representing the number of occurrences +// for each unique value or tensor. +// +// Example:: +// +// >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2]) +// >>> output = torch.unique_consecutive(x) +// >>> output +// tensor([1, 2, 3, 1, 2]) +// +// >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True) +// >>> output +// tensor([1, 2, 3, 1, 2]) +// >>> inverse_indices +// tensor([0, 0, 1, 1, 2, 3, 3, 4]) +// +// >>> output, counts = torch.unique_consecutive(x, return_counts=True) +// >>> output +// tensor([1, 2, 3, 1, 2]) +// >>> counts +// tensor([2, 2, 1, 2, 1]) +// +// +//go:linkname UniqueConsecutive py.unique_consecutive +func UniqueConsecutive(__llgo_va_list ...interface{}) *py.Object +// +// unsafe_chunk(input, chunks, dim=0) -> List of Tensors +// +// Works like :func:`torch.chunk` but without enforcing the autograd restrictions +// on inplace modification of the outputs. +// +// .. warning:: +// This function is safe to use as long as only the input, or only the outputs +// are modified inplace after calling this function. It is user's +// responsibility to ensure that is the case. If both the input and one or more +// of the outputs are modified inplace, gradients computed by autograd will be +// silently incorrect. +// +// +//go:linkname UnsafeChunk py.unsafe_chunk +func UnsafeChunk(input *py.Object, chunks *py.Object, dim *py.Object) *py.Object +// +// unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors +// +// Works like :func:`torch.split` but without enforcing the autograd restrictions +// on inplace modification of the outputs. +// +// .. warning:: +// This function is safe to use as long as only the input, or only the outputs +// are modified inplace after calling this function. It is user's +// responsibility to ensure that is the case. If both the input and one or more +// of the outputs are modified inplace, gradients computed by autograd will be +// silently incorrect. +// +// +//go:linkname UnsafeSplit py.unsafe_split +func UnsafeSplit(tensor *py.Object, splitSizeOrSections *py.Object, dim *py.Object) *py.Object +// None +// +//go:linkname UnsafeSplitWithSizes py.unsafe_split_with_sizes +func UnsafeSplitWithSizes(__llgo_va_list ...interface{}) *py.Object +// +// unsqueeze(input, dim) -> Tensor +// +// Returns a new tensor with a dimension of size one inserted at the +// specified position. +// +// The returned tensor shares the same underlying data with this tensor. +// +// A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)`` +// can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze` +// applied at :attr:`dim` = ``dim + input.dim() + 1``. +// +// Args: +// input (Tensor): the input tensor. +// dim (int): the index at which to insert the singleton dimension +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3, 4]) +// >>> torch.unsqueeze(x, 0) +// tensor([[ 1, 2, 3, 4]]) +// >>> torch.unsqueeze(x, 1) +// tensor([[ 1], +// [ 2], +// [ 3], +// [ 4]]) +// +// +//go:linkname Unsqueeze py.unsqueeze +func Unsqueeze(input *py.Object, dim *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.unsqueeze`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname UnsqueezeCopy py.unsqueeze_copy +func UnsqueezeCopy(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.values`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ValuesCopy py.values_copy +func ValuesCopy(__llgo_va_list ...interface{}) *py.Object +// +// vander(x, N=None, increasing=False) -> Tensor +// +// Generates a Vandermonde matrix. +// +// The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`. +// If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a +// matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. +// +// Arguments: +// x (Tensor): 1-D input tensor. +// N (int, optional): Number of columns in the output. If N is not specified, +// a square array is returned :math:`(N = len(x))`. +// increasing (bool, optional): Order of the powers of the columns. If True, +// the powers increase from left to right, if False (the default) they are reversed. +// +// Returns: +// Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`, +// the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns +// are :math:`x^0, x^1, ..., x^{(N-1)}`. +// +// Example:: +// +// >>> x = torch.tensor([1, 2, 3, 5]) +// >>> torch.vander(x) +// tensor([[ 1, 1, 1, 1], +// [ 8, 4, 2, 1], +// [ 27, 9, 3, 1], +// [125, 25, 5, 1]]) +// >>> torch.vander(x, N=3) +// tensor([[ 1, 1, 1], +// [ 4, 2, 1], +// [ 9, 3, 1], +// [25, 5, 1]]) +// >>> torch.vander(x, N=3, increasing=True) +// tensor([[ 1, 1, 1], +// [ 1, 2, 4], +// [ 1, 3, 9], +// [ 1, 5, 25]]) +// +// +// +//go:linkname Vander py.vander +func Vander(x *py.Object, N *py.Object, increasing *py.Object) *py.Object +// +// var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor +// +// Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` +// can be a single dimension, list of dimensions, or ``None`` to reduce over all +// dimensions. +// +// The variance (:math:`\sigma^2`) is calculated as +// +// .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 +// +// where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +// sample mean, :math:`N` is the number of samples and :math:`\delta N` is +// the :attr:`correction`. +// +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// +// Keyword args: +// correction (int): difference between the sample size and sample degrees of freedom. +// Defaults to `Bessel's correction`_, ``correction=1``. +// +// .. versionchanged:: 2.0 +// Previously this argument was called ``unbiased`` and was a boolean +// with ``True`` corresponding to ``correction=1`` and ``False`` being +// ``correction=0``. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// out (Tensor, optional): the output tensor. +// +// Example: +// +// >>> a = torch.tensor( +// ... [[ 0.2035, 1.2959, 1.8101, -0.4644], +// ... [ 1.5027, -0.3270, 0.5905, 0.6538], +// ... [-1.5745, 1.3330, -0.5596, -0.6548], +// ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) +// >>> torch.var(a, dim=1, keepdim=True) +// tensor([[1.0631], +// [0.5590], +// [1.4893], +// [0.8258]]) +// +// .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction +// +// +// +//go:linkname Var py.var +func Var(input *py.Object, dim *py.Object) *py.Object +// +// var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) +// +// Calculates the variance and mean over the dimensions specified by :attr:`dim`. +// :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +// reduce over all dimensions. +// +// The variance (:math:`\sigma^2`) is calculated as +// +// .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 +// +// where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +// sample mean, :math:`N` is the number of samples and :math:`\delta N` is +// the :attr:`correction`. +// +// +// +// If :attr:`keepdim` is ``True``, the output tensor is of the same size +// as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +// Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +// output tensor having 1 (or ``len(dim)``) fewer dimension(s). +// +// +// Args: +// input (Tensor): the input tensor. +// +// dim (int or tuple of ints, optional): the dimension or dimensions to reduce. +// If ``None``, all dimensions are reduced. +// +// +// Keyword args: +// correction (int): difference between the sample size and sample degrees of freedom. +// Defaults to `Bessel's correction`_, ``correction=1``. +// +// .. versionchanged:: 2.0 +// Previously this argument was called ``unbiased`` and was a boolean +// with ``True`` corresponding to ``correction=1`` and ``False`` being +// ``correction=0``. +// keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +// out (Tensor, optional): the output tensor. +// +// Returns: +// A tuple (var, mean) containing the variance and mean. +// +// Example: +// +// >>> a = torch.tensor( +// ... [[ 0.2035, 1.2959, 1.8101, -0.4644], +// ... [ 1.5027, -0.3270, 0.5905, 0.6538], +// ... [-1.5745, 1.3330, -0.5596, -0.6548], +// ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) +// >>> torch.var_mean(a, dim=0, keepdim=True) +// (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), +// tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) +// +// .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction +// +// +// +//go:linkname VarMean py.var_mean +func VarMean(input *py.Object, dim *py.Object) *py.Object +// +// vdot(input, other, *, out=None) -> Tensor +// +// Computes the dot product of two 1D vectors along a dimension. +// +// In symbols, this function computes +// +// .. math:: +// +// \sum_{i=1}^n \overline{x_i}y_i. +// +// where :math:`\overline{x_i}` denotes the conjugate for complex +// vectors, and it is the identity for real vectors. +// +// .. note:: +// +// Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product +// of two 1D tensors with the same number of elements. +// +// .. seealso:: +// +// :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension. +// +// Args: +// input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex. +// other (Tensor): second tensor in the dot product, must be 1D. +// +// Keyword args: +// +// .. note:: out (Tensor, optional): the output tensor. +// +// +// Example:: +// +// >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1])) +// tensor(7) +// >>> a = torch.tensor((1 +2j, 3 - 1j)) +// >>> b = torch.tensor((2 +1j, 4 - 0j)) +// >>> torch.vdot(a, b) +// tensor([16.+1.j]) +// >>> torch.vdot(b, a) +// tensor([16.-1.j]) +// +// +//go:linkname Vdot py.vdot +func Vdot(input *py.Object, other *py.Object) *py.Object +// +// view_as_complex(input) -> Tensor +// +// Returns a view of :attr:`input` as a complex tensor. For an input complex +// tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a +// new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last +// dimension of the input tensor is expected to represent the real and imaginary +// components of complex numbers. +// +// .. warning:: +// :func:`view_as_complex` is only supported for tensors with +// :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is +// expected to have the last dimension of :attr:`size` 2. In addition, the +// tensor must have a `stride` of 1 for its last dimension. The strides of all +// other dimensions must be even numbers. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x=torch.randn(4, 2) +// >>> x +// tensor([[ 1.6116, -0.5772], +// [-1.4606, -0.9120], +// [ 0.0786, -1.7497], +// [-0.6561, -1.6623]]) +// >>> torch.view_as_complex(x) +// tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)]) +// +// +//go:linkname ViewAsComplex py.view_as_complex +func ViewAsComplex(input *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.view_as_complex`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ViewAsComplexCopy py.view_as_complex_copy +func ViewAsComplexCopy(__llgo_va_list ...interface{}) *py.Object +// +// view_as_real(input) -> Tensor +// +// Returns a view of :attr:`input` as a real tensor. For an input complex tensor of +// :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new +// real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2 +// represents the real and imaginary components of complex numbers. +// +// .. warning:: +// :func:`view_as_real` is only supported for tensors with ``complex dtypes``. +// +// Args: +// input (Tensor): the input tensor. +// +// Example:: +// +// >>> x=torch.randn(4, dtype=torch.cfloat) +// >>> x +// tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)]) +// >>> torch.view_as_real(x) +// tensor([[ 0.4737, -0.3839], +// [-0.2098, -0.6699], +// [ 0.3470, -0.9451], +// [-0.5174, -1.3136]]) +// +// +//go:linkname ViewAsReal py.view_as_real +func ViewAsReal(input *py.Object) *py.Object +// +// Performs the same operation as :func:`torch.view_as_real`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ViewAsRealCopy py.view_as_real_copy +func ViewAsRealCopy(__llgo_va_list ...interface{}) *py.Object +// +// Performs the same operation as :func:`torch.view`, but all output tensors +// are freshly created instead of aliasing the input. +// +// +//go:linkname ViewCopy py.view_copy +func ViewCopy(__llgo_va_list ...interface{}) *py.Object +// +// vsplit(input, indices_or_sections) -> List of Tensors +// +// Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors +// vertically according to :attr:`indices_or_sections`. Each split is a view of +// :attr:`input`. +// +// This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) +// (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer +// it must evenly divide the split dimension or a runtime error will be thrown. +// +// This function is based on NumPy's :func:`numpy.vsplit`. +// +// Args: +// input (Tensor): tensor to split. +// indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. +// +// Example:: +// >>> t = torch.arange(16.0).reshape(4,4) +// >>> t +// tensor([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.], +// [12., 13., 14., 15.]]) +// >>> torch.vsplit(t, 2) +// (tensor([[0., 1., 2., 3.], +// [4., 5., 6., 7.]]), +// tensor([[ 8., 9., 10., 11.], +// [12., 13., 14., 15.]])) +// >>> torch.vsplit(t, [3, 6]) +// (tensor([[ 0., 1., 2., 3.], +// [ 4., 5., 6., 7.], +// [ 8., 9., 10., 11.]]), +// tensor([[12., 13., 14., 15.]]), +// tensor([], size=(0, 4))) +// +// +// +//go:linkname Vsplit py.vsplit +func Vsplit(input *py.Object, indicesOrSections *py.Object) *py.Object +// +// vstack(tensors, *, out=None) -> Tensor +// +// Stack tensors in sequence vertically (row wise). +// +// This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`. +// +// Args: +// tensors (sequence of Tensors): sequence of tensors to concatenate +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Example:: +// +// >>> a = torch.tensor([1, 2, 3]) +// >>> b = torch.tensor([4, 5, 6]) +// >>> torch.vstack((a,b)) +// tensor([[1, 2, 3], +// [4, 5, 6]]) +// >>> a = torch.tensor([[1],[2],[3]]) +// >>> b = torch.tensor([[4],[5],[6]]) +// >>> torch.vstack((a,b)) +// tensor([[1], +// [2], +// [3], +// [4], +// [5], +// [6]]) +// +// +// +// +//go:linkname Vstack py.vstack +func Vstack(tensors *py.Object) *py.Object +// +// where(condition, input, other, *, out=None) -> Tensor +// +// Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. +// +// The operation is defined as: +// +// .. math:: +// \text{out}_i = \begin{cases} +// \text{input}_i & \text{if } \text{condition}_i \\ +// \text{other}_i & \text{otherwise} \\ +// \end{cases} +// +// .. note:: +// The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. +// +// Arguments: +// condition (BoolTensor): When True (nonzero), yield input, otherwise yield other +// input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices +// where :attr:`condition` is ``True`` +// other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices +// where :attr:`condition` is ``False`` +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// +// Returns: +// Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` +// +// Example:: +// +// >>> x = torch.randn(3, 2) +// >>> y = torch.ones(3, 2) +// >>> x +// tensor([[-0.4620, 0.3139], +// [ 0.3898, -0.7197], +// [ 0.0478, -0.1657]]) +// >>> torch.where(x > 0, 1.0, 0.0) +// tensor([[0., 1.], +// [1., 0.], +// [1., 0.]]) +// >>> torch.where(x > 0, x, y) +// tensor([[ 1.0000, 0.3139], +// [ 0.3898, 1.0000], +// [ 0.0478, 1.0000]]) +// >>> x = torch.randn(2, 2, dtype=torch.double) +// >>> x +// tensor([[ 1.0779, 0.0383], +// [-0.8785, -1.1089]], dtype=torch.float64) +// >>> torch.where(x > 0, x, 0.) +// tensor([[1.0779, 0.0383], +// [0.0000, 0.0000]], dtype=torch.float64) +// +// .. function:: where(condition) -> tuple of LongTensor +// :noindex: +// +// ``torch.where(condition)`` is identical to +// ``torch.nonzero(condition, as_tuple=True)``. +// +// .. note:: +// See also :func:`torch.nonzero`. +// +// +//go:linkname Where py.where +func Where(condition *py.Object, input *py.Object, other *py.Object) *py.Object +// +// xlogy(input, other, *, out=None) -> Tensor +// +// Alias for :func:`torch.special.xlogy`. +// +// +//go:linkname Xlogy py.xlogy +func Xlogy(input *py.Object, other *py.Object) *py.Object +// None +// +//go:linkname Xlogy_ py.xlogy_ +func Xlogy_(__llgo_va_list ...interface{}) *py.Object +// None +// +//go:linkname Zero_ py.zero_ +func Zero_(__llgo_va_list ...interface{}) *py.Object +// +// zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +// +// Returns a tensor filled with the scalar value `0`, with the shape defined +// by the variable argument :attr:`size`. +// +// Args: +// size (int...): a sequence of integers defining the shape of the output tensor. +// Can be a variable number of arguments or a collection like a list or tuple. +// +// Keyword args: +// out (Tensor, optional): the output tensor. +// dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. +// Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). +// layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. +// Default: ``torch.strided``. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, uses the current device for the default tensor type +// (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU +// for CPU tensor types and the current CUDA device for CUDA tensor types. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// +// Example:: +// +// >>> torch.zeros(2, 3) +// tensor([[ 0., 0., 0.], +// [ 0., 0., 0.]]) +// +// >>> torch.zeros(5) +// tensor([ 0., 0., 0., 0., 0.]) +// +// +//go:linkname Zeros py.zeros +func Zeros(__llgo_va_list ...interface{}) *py.Object +// +// zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor +// +// Returns a tensor filled with the scalar value `0`, with the same size as +// :attr:`input`. ``torch.zeros_like(input)`` is equivalent to +// ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. +// +// .. warning:: +// As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, +// the old ``torch.zeros_like(input, out=output)`` is equivalent to +// ``torch.zeros(input.size(), out=output)``. +// +// Args: +// input (Tensor): the size of :attr:`input` will determine size of the output tensor. +// +// Keyword args: +// dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. +// Default: if ``None``, defaults to the dtype of :attr:`input`. +// layout (:class:`torch.layout`, optional): the desired layout of returned tensor. +// Default: if ``None``, defaults to the layout of :attr:`input`. +// device (:class:`torch.device`, optional): the desired device of returned tensor. +// Default: if ``None``, defaults to the device of :attr:`input`. +// requires_grad (bool, optional): If autograd should record operations on the +// returned tensor. Default: ``False``. +// memory_format (:class:`torch.memory_format`, optional): the desired memory format of +// returned Tensor. Default: ``torch.preserve_format``. +// +// Example:: +// +// >>> input = torch.empty(2, 3) +// >>> torch.zeros_like(input) +// tensor([[ 0., 0., 0.], +// [ 0., 0., 0.]]) +// +// +//go:linkname ZerosLike py.zeros_like +func ZerosLike(input *py.Object) *py.Object +// broadcast_shapes(*shapes) -> Size +// +// Similar to :func:`broadcast_tensors` but for shapes. +// +// This is equivalent to +// ``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape`` +// but avoids the need create to intermediate tensors. This is useful for +// broadcasting tensors of common batch shape but different rightmost shape, +// e.g. to broadcast mean vectors with covariance matrices. +// +// Example:: +// +// >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1)) +// torch.Size([1, 3, 2]) +// +// Args: +// \*shapes (torch.Size): Shapes of tensors. +// +// Returns: +// shape (torch.Size): A shape compatible with all input shapes. +// +// Raises: +// RuntimeError: If shapes are incompatible. +// +// +//go:linkname BroadcastShapes py.broadcast_shapes +func BroadcastShapes(__llgo_va_list ...interface{}) *py.Object +// Computes the LU factorization of a matrix or batches of matrices +// :attr:`A`. Returns a tuple containing the LU factorization and +// pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to +// ``True``. +// +// .. warning:: +// +// :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor` +// and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a +// future PyTorch release. +// ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with +// +// .. code:: python +// +// LU, pivots = torch.linalg.lu_factor(A, compute_pivots) +// +// ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with +// +// .. code:: python +// +// LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots) +// +// .. note:: +// * The returned permutation matrix for every matrix in the batch is +// represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``. +// ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm, +// the ``i``-th row was permuted with the ``j-1``-th row. +// * LU factorization with :attr:`pivot` = ``False`` is not available +// for CPU, and attempting to do so will throw an error. However, +// LU factorization with :attr:`pivot` = ``False`` is available for +// CUDA. +// * This function does not check if the factorization was successful +// or not if :attr:`get_infos` is ``True`` since the status of the +// factorization is present in the third element of the return tuple. +// * In the case of batches of square matrices with size less or equal +// to 32 on a CUDA device, the LU factorization is repeated for +// singular matrices due to the bug in the MAGMA library +// (see magma issue 13). +// * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. +// +// .. warning:: +// The gradients of this function will only be finite when :attr:`A` is full rank. +// This is because the LU decomposition is just differentiable at full rank matrices. +// Furthermore, if :attr:`A` is close to not being full rank, +// the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`. +// +// Args: +// A (Tensor): the tensor to factor of size :math:`(*, m, n)` +// pivot (bool, optional): controls whether pivoting is done. Default: ``True`` +// get_infos (bool, optional): if set to ``True``, returns an info IntTensor. +// Default: ``False`` +// out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, +// then the elements in the tuple are Tensor, IntTensor, +// and IntTensor. If :attr:`get_infos` is ``False``, then the +// elements in the tuple are Tensor, IntTensor. Default: ``None`` +// +// Returns: +// (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing +// +// - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)` +// +// - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`. +// ``pivots`` stores all the intermediate transpositions of rows. +// The final permutation ``perm`` could be reconstructed by +// applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``, +// where ``perm`` is initially the identity permutation of :math:`m` elements +// (essentially this is what :func:`torch.lu_unpack` is doing). +// +// - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of +// size :math:`(*)` where non-zero values indicate whether factorization for the matrix or +// each minibatch has succeeded or failed +// +// Example:: +// +// >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) +// >>> # xdoctest: +IGNORE_WANT("non-deterministic") +// >>> A = torch.randn(2, 3, 3) +// >>> A_LU, pivots = torch.lu(A) +// >>> A_LU +// tensor([[[ 1.3506, 2.5558, -0.0816], +// [ 0.1684, 1.1551, 0.1940], +// [ 0.1193, 0.6189, -0.5497]], +// +// [[ 0.4526, 1.2526, -0.3285], +// [-0.7988, 0.7175, -0.9701], +// [ 0.2634, -0.9255, -0.3459]]]) +// >>> pivots +// tensor([[ 3, 3, 3], +// [ 3, 3, 3]], dtype=torch.int32) +// >>> A_LU, pivots, info = torch.lu(A, get_infos=True) +// >>> if info.nonzero().size(0) == 0: +// ... print('LU factorization succeeded for all samples!') +// LU factorization succeeded for all samples! +// +// +//go:linkname Lu py.lu +func Lu(__llgo_va_list ...interface{}) *py.Object +// Performs linear Principal Component Analysis (PCA) on a low-rank +// matrix, batches of such matrices, or sparse matrix. +// +// This function returns a namedtuple ``(U, S, V)`` which is the +// nearly optimal approximation of a singular value decomposition of +// a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`. +// +// .. note:: The relation of ``(U, S, V)`` to PCA is as follows: +// +// - :math:`A` is a data matrix with ``m`` samples and +// ``n`` features +// +// - the :math:`V` columns represent the principal directions +// +// - :math:`S ** 2 / (m - 1)` contains the eigenvalues of +// :math:`A^T A / (m - 1)` which is the covariance of +// ``A`` when ``center=True`` is provided. +// +// - ``matmul(A, V[:, :k])`` projects data to the first k +// principal components +// +// .. note:: Different from the standard SVD, the size of returned +// matrices depend on the specified rank and q +// values as follows: +// +// - :math:`U` is m x q matrix +// +// - :math:`S` is q-vector +// +// - :math:`V` is n x q matrix +// +// .. note:: To obtain repeatable results, reset the seed for the +// pseudorandom number generator +// +// Args: +// +// A (Tensor): the input tensor of size :math:`(*, m, n)` +// +// q (int, optional): a slightly overestimated rank of +// :math:`A`. By default, ``q = min(6, m, +// n)``. +// +// center (bool, optional): if True, center the input tensor, +// otherwise, assume that the input is +// centered. +// +// niter (int, optional): the number of subspace iterations to +// conduct; niter must be a nonnegative +// integer, and defaults to 2. +// +// References:: +// +// - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding +// structure with randomness: probabilistic algorithms for +// constructing approximate matrix decompositions, +// arXiv:0909.4061 [math.NA; math.PR], 2009 (available at +// `arXiv `_). +// +// +// +//go:linkname PcaLowrank py.pca_lowrank +func PcaLowrank(A *py.Object, q *py.Object, center *py.Object, niter *py.Object) *py.Object +// Return the singular value decomposition ``(U, S, V)`` of a matrix, +// batches of matrices, or a sparse matrix :math:`A` such that +// :math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then +// SVD is computed for the matrix :math:`A - M`. +// +// .. note:: The implementation is based on the Algorithm 5.1 from +// Halko et al, 2009. +// +// .. note:: To obtain repeatable results, reset the seed for the +// pseudorandom number generator +// +// .. note:: The input is assumed to be a low-rank matrix. +// +// .. note:: In general, use the full-rank SVD implementation +// :func:`torch.linalg.svd` for dense matrices due to its 10-fold +// higher performance characteristics. The low-rank SVD +// will be useful for huge sparse matrices that +// :func:`torch.linalg.svd` cannot handle. +// +// Args:: +// A (Tensor): the input tensor of size :math:`(*, m, n)` +// +// q (int, optional): a slightly overestimated rank of A. +// +// niter (int, optional): the number of subspace iterations to +// conduct; niter must be a nonnegative +// integer, and defaults to 2 +// +// M (Tensor, optional): the input tensor's mean of size +// :math:`(*, 1, n)`. +// +// References:: +// - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding +// structure with randomness: probabilistic algorithms for +// constructing approximate matrix decompositions, +// arXiv:0909.4061 [math.NA; math.PR], 2009 (available at +// `arXiv `_). +// +// +// +//go:linkname SvdLowrank py.svd_lowrank +func SvdLowrank(A *py.Object, q *py.Object, niter *py.Object, M *py.Object) *py.Object +// unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor] +// +// Returns the unique elements of the input tensor. +// +// .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that +// this function also eliminates non-consecutive duplicate values. +// +// .. note:: Currently in the CUDA implementation and the CPU implementation, +// `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument. +// Sorting could be slow, so if your input tensor is already sorted, it is recommended to use +// :func:`torch.unique_consecutive` which avoids the sorting. +// +// Args: +// input (Tensor): the input tensor +// sorted (bool): Whether to sort the unique elements in ascending order +// before returning as output. +// return_inverse (bool): Whether to also return the indices for where +// elements in the original input ended up in the returned unique list. +// return_counts (bool): Whether to also return the counts for each unique +// element. +// dim (int, optional): the dimension to operate upon. If ``None``, the +// unique of the flattened input is returned. Otherwise, each of the +// tensors indexed by the given dimension is treated as one of the +// elements to apply the unique operation upon. See examples for more +// details. Default: ``None`` +// +// Returns: +// (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing +// +// - **output** (*Tensor*): the output list of unique scalar elements. +// - **inverse_indices** (*Tensor*): (optional) if +// :attr:`return_inverse` is True, there will be an additional +// returned tensor (same shape as input) representing the indices +// for where elements in the original input map to in the output; +// otherwise, this function will only return a single tensor. +// - **counts** (*Tensor*): (optional) if +// :attr:`return_counts` is True, there will be an additional +// returned tensor (same shape as output or output.size(dim), +// if dim was specified) representing the number of occurrences +// for each unique value or tensor. +// +// Example:: +// +// >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long)) +// >>> output +// tensor([1, 2, 3]) +// +// >>> output, inverse_indices = torch.unique( +// ... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True) +// >>> output +// tensor([1, 2, 3]) +// >>> inverse_indices +// tensor([0, 2, 1, 2]) +// +// >>> output, inverse_indices = torch.unique( +// ... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True) +// >>> output +// tensor([1, 2, 3]) +// >>> inverse_indices +// tensor([[0, 2], +// [1, 2]]) +// +// >>> a = torch.tensor([ +// ... [ +// ... [1, 1, 0, 0], +// ... [1, 1, 0, 0], +// ... [0, 0, 1, 1], +// ... ], +// ... [ +// ... [0, 0, 1, 1], +// ... [0, 0, 1, 1], +// ... [1, 1, 1, 1], +// ... ], +// ... [ +// ... [1, 1, 0, 0], +// ... [1, 1, 0, 0], +// ... [0, 0, 1, 1], +// ... ], +// ... ]) +// +// >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]` +// >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match +// >>> # each other, so one of them will be removed. +// >>> (a[0, :, :] == a[2, :, :]).all() +// tensor(True) +// >>> a_unique_dim0 = torch.unique(a, dim=0) +// >>> a_unique_dim0 +// tensor([[[0, 0, 1, 1], +// [0, 0, 1, 1], +// [1, 1, 1, 1]], +// [[1, 1, 0, 0], +// [1, 1, 0, 0], +// [0, 0, 1, 1]]]) +// +// >>> # Notice which sub-tensors from `a` match with the sub-tensors from +// >>> # `a_unique_dim0`: +// >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all() +// tensor(True) +// >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all() +// tensor(True) +// +// >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are +// >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of +// >>> # them will be removed. +// >>> (a[:, 0, :] == a[:, 1, :]).all() +// tensor(True) +// >>> torch.unique(a, dim=1) +// tensor([[[0, 0, 1, 1], +// [1, 1, 0, 0]], +// [[1, 1, 1, 1], +// [0, 0, 1, 1]], +// [[0, 0, 1, 1], +// [1, 1, 0, 0]]]) +// +// >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared. +// >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and +// >>> # `a[:, :, 3]` match each other as well. So in this case, two of the +// >>> # sub-tensors will be removed. +// >>> (a[:, :, 0] == a[:, :, 1]).all() +// tensor(True) +// >>> (a[:, :, 2] == a[:, :, 3]).all() +// tensor(True) +// >>> torch.unique(a, dim=2) +// tensor([[[0, 1], +// [0, 1], +// [1, 0]], +// [[1, 0], +// [1, 0], +// [1, 1]], +// [[0, 1], +// [0, 1], +// [1, 0]]]) +// +// +//go:linkname Unique py.unique +func Unique(input *py.Object, sorted *py.Object, returnInverse *py.Object, returnCounts *py.Object, dim *py.Object) *py.Object +// Converts a tensor of flat indices into a tuple of coordinate tensors that +// index into an arbitrary tensor of the specified shape. +// +// Args: +// indices (Tensor): An integer tensor containing indices into the +// flattened version of an arbitrary tensor of shape :attr:`shape`. +// All elements must be in the range ``[0, prod(shape) - 1]``. +// +// shape (int, sequence of ints, or torch.Size): The shape of the arbitrary +// tensor. All elements must be non-negative. +// +// Returns: +// tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with +// dimension ``i`` of :attr:`shape`. Each tensor has the same shape as +// ``indices`` and contains one index into dimension ``i`` for each of the +// flat indices given by ``indices``. +// +// Example:: +// +// >>> import torch +// >>> torch.unravel_index(torch.tensor(4), (3, 2)) +// (tensor(2), +// tensor(0)) +// +// >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2)) +// (tensor([2, 0]), +// tensor([0, 1])) +// +// >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2)) +// (tensor([0, 0, 1, 1, 2, 2]), +// tensor([0, 1, 0, 1, 0, 1])) +// +// >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10)) +// (tensor([1, 5]), +// tensor([2, 6]), +// tensor([3, 7]), +// tensor([4, 8])) +// +// >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10)) +// (tensor([[1], [5]]), +// tensor([[2], [6]]), +// tensor([[3], [7]]), +// tensor([[4], [8]])) +// +// >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100)) +// (tensor([[12], [56]]), +// tensor([[34], [78]])) +// +// +//go:linkname UnravelIndex py.unravel_index +func UnravelIndex(indices *py.Object, shape *py.Object) *py.Object +// Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1 +// +//go:linkname CompiledWithCxx11Abi py.compiled_with_cxx11_abi +func CompiledWithCxx11Abi() *py.Object +// Find the k largest (or smallest) eigenvalues and the corresponding +// eigenvectors of a symmetric positive definite generalized +// eigenvalue problem using matrix-free LOBPCG methods. +// +// This function is a front-end to the following LOBPCG algorithms +// selectable via `method` argument: +// +// `method="basic"` - the LOBPCG method introduced by Andrew +// Knyazev, see [Knyazev2001]. A less robust method, may fail when +// Cholesky is applied to singular input. +// +// `method="ortho"` - the LOBPCG method with orthogonal basis +// selection [StathopoulosEtal2002]. A robust method. +// +// Supported inputs are dense, sparse, and batches of dense matrices. +// +// .. note:: In general, the basic method spends least time per +// iteration. However, the robust methods converge much faster and +// are more stable. So, the usage of the basic method is generally +// not recommended but there exist cases where the usage of the +// basic method may be preferred. +// +// .. warning:: The backward method does not support sparse and complex inputs. +// It works only when `B` is not provided (i.e. `B == None`). +// We are actively working on extensions, and the details of +// the algorithms are going to be published promptly. +// +// .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not. +// To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric +// in first-order optimization routines, prior to running `lobpcg` +// we do the following symmetrization map: `A -> (A + A.t()) / 2`. +// The map is performed only when the `A` requires gradients. +// +// Args: +// +// A (Tensor): the input tensor of size :math:`(*, m, m)` +// +// B (Tensor, optional): the input tensor of size :math:`(*, m, +// m)`. When not specified, `B` is interpreted as +// identity matrix. +// +// X (tensor, optional): the input tensor of size :math:`(*, m, n)` +// where `k <= n <= m`. When specified, it is used as +// initial approximation of eigenvectors. X must be a +// dense tensor. +// +// iK (tensor, optional): the input tensor of size :math:`(*, m, +// m)`. When specified, it will be used as preconditioner. +// +// k (integer, optional): the number of requested +// eigenpairs. Default is the number of :math:`X` +// columns (when specified) or `1`. +// +// n (integer, optional): if :math:`X` is not specified then `n` +// specifies the size of the generated random +// approximation of eigenvectors. Default value for `n` +// is `k`. If :math:`X` is specified, the value of `n` +// (when specified) must be the number of :math:`X` +// columns. +// +// tol (float, optional): residual tolerance for stopping +// criterion. Default is `feps ** 0.5` where `feps` is +// smallest non-zero floating-point number of the given +// input tensor `A` data type. +// +// largest (bool, optional): when True, solve the eigenproblem for +// the largest eigenvalues. Otherwise, solve the +// eigenproblem for smallest eigenvalues. Default is +// `True`. +// +// method (str, optional): select LOBPCG method. See the +// description of the function above. Default is +// "ortho". +// +// niter (int, optional): maximum number of iterations. When +// reached, the iteration process is hard-stopped and +// the current approximation of eigenpairs is returned. +// For infinite iteration but until convergence criteria +// is met, use `-1`. +// +// tracker (callable, optional) : a function for tracing the +// iteration process. When specified, it is called at +// each iteration step with LOBPCG instance as an +// argument. The LOBPCG instance holds the full state of +// the iteration process in the following attributes: +// +// `iparams`, `fparams`, `bparams` - dictionaries of +// integer, float, and boolean valued input +// parameters, respectively +// +// `ivars`, `fvars`, `bvars`, `tvars` - dictionaries +// of integer, float, boolean, and Tensor valued +// iteration variables, respectively. +// +// `A`, `B`, `iK` - input Tensor arguments. +// +// `E`, `X`, `S`, `R` - iteration Tensor variables. +// +// For instance: +// +// `ivars["istep"]` - the current iteration step +// `X` - the current approximation of eigenvectors +// `E` - the current approximation of eigenvalues +// `R` - the current residual +// `ivars["converged_count"]` - the current number of converged eigenpairs +// `tvars["rerr"]` - the current state of convergence criteria +// +// Note that when `tracker` stores Tensor objects from +// the LOBPCG instance, it must make copies of these. +// +// If `tracker` sets `bvars["force_stop"] = True`, the +// iteration process will be hard-stopped. +// +// ortho_iparams, ortho_fparams, ortho_bparams (dict, optional): +// various parameters to LOBPCG algorithm when using +// `method="ortho"`. +// +// Returns: +// +// E (Tensor): tensor of eigenvalues of size :math:`(*, k)` +// +// X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)` +// +// References: +// +// [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal +// Preconditioned Eigensolver: Locally Optimal Block Preconditioned +// Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2), +// 517-541. (25 pages) +// https://epubs.siam.org/doi/abs/10.1137/S1064827500366124 +// +// [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng +// Wu. (2002) A Block Orthogonalization Procedure with Constant +// Synchronization Requirements. SIAM J. Sci. Comput., 23(6), +// 2165-2182. (18 pages) +// https://epubs.siam.org/doi/10.1137/S1064827500370883 +// +// [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming +// Gu. (2018) A Robust and Efficient Implementation of LOBPCG. +// SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages) +// https://epubs.siam.org/doi/abs/10.1137/17M1129830 +// +// +// +//go:linkname Lobpcg py.lobpcg +func Lobpcg(A *py.Object, k *py.Object, B *py.Object, X *py.Object, n *py.Object, iK *py.Object, niter *py.Object, tol *py.Object, largest *py.Object, method *py.Object, tracker *py.Object, orthoIparams *py.Object, orthoFparams *py.Object, orthoBparams *py.Object) *py.Object +// from_dlpack(ext_tensor) -> Tensor +// +// Converts a tensor from an external library into a ``torch.Tensor``. +// +// The returned PyTorch tensor will share the memory with the input tensor +// (which may have come from another library). Note that in-place operations +// will therefore also affect the data of the input tensor. This may lead to +// unexpected issues (e.g., other libraries may have read-only flags or +// immutable data structures), so the user should only do this if they know +// for sure that this is fine. +// +// Args: +// ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule): +// The tensor or DLPack capsule to convert. +// +// If ``ext_tensor`` is a tensor (or ndarray) object, it must support +// the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__`` +// method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is +// an opaque ``PyCapsule`` instance, typically produced by a +// ``to_dlpack`` function or method. +// +// Examples:: +// +// >>> import torch.utils.dlpack +// >>> t = torch.arange(4) +// +// # Convert a tensor directly (supported in PyTorch >= 1.10) +// >>> t2 = torch.from_dlpack(t) +// >>> t2[:2] = -1 # show that memory is shared +// >>> t2 +// tensor([-1, -1, 2, 3]) +// >>> t +// tensor([-1, -1, 2, 3]) +// +// # The old-style DLPack usage, with an intermediate capsule object +// >>> capsule = torch.utils.dlpack.to_dlpack(t) +// >>> capsule +// +// >>> t3 = torch.from_dlpack(capsule) +// >>> t3 +// tensor([-1, -1, 2, 3]) +// >>> t3[0] = -9 # now we're sharing memory between 3 tensors +// >>> t3 +// tensor([-9, -1, 2, 3]) +// >>> t2 +// tensor([-9, -1, 2, 3]) +// >>> t +// tensor([-9, -1, 2, 3]) +// +// +// +//go:linkname FromDlpack py.from_dlpack +func FromDlpack(extTensor *py.Object) *py.Object +// to_dlpack(tensor) -> PyCapsule +// +// Returns an opaque object (a "DLPack capsule") representing the tensor. +// +// .. note:: +// ``to_dlpack`` is a legacy DLPack interface. The capsule it returns +// cannot be used for anything in Python other than use it as input to +// ``from_dlpack``. The more idiomatic use of DLPack is to call +// ``from_dlpack`` directly on the tensor object - this works when that +// object has a ``__dlpack__`` method, which PyTorch and most other +// libraries indeed have now. +// +// .. warning:: +// Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``. +// Behavior when a capsule is consumed multiple times is undefined. +// +// Args: +// tensor: a tensor to be exported +// +// The DLPack capsule shares the tensor's memory. +// +// +//go:linkname ToDlpack py.to_dlpack +func ToDlpack(tensor *py.Object) *py.Object +// None +// +//go:linkname MatrixRank py.matrix_rank +func MatrixRank(input *py.Object, tol *py.Object, symmetric *py.Object) *py.Object +// None +// +//go:linkname Eig py.eig +func Eig(self *py.Object, eigenvectors *py.Object) *py.Object +// None +// +//go:linkname Solve py.solve +func Solve(input *py.Object, A *py.Object) *py.Object +// None +// +//go:linkname Lstsq py.lstsq +func Lstsq(input *py.Object, A *py.Object) *py.Object +// None +// +//go:linkname Symeig py.symeig +func Symeig(input *py.Object, eigenvectors *py.Object, upper *py.Object) *py.Object +// +// Optimizes given model/function using TorchDynamo and specified backend. +// +// Concretely, for every frame executed within the compiled region, we will attempt +// to compile it and cache the compiled result on the code object for future +// use. A single frame may be compiled multiple times if previous compiled +// results are not applicable for subsequent calls (this is called a "guard +// failure), you can use TORCH_LOGS=guards to debug these situations. +// Multiple compiled results can be associated with a frame up to +// ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which +// point we will fall back to eager. Note that compile caches are per +// *code object*, not frame; if you dynamically create multiple copies of a +// function, they will all share the same code cache. +// +// Args: +// model (Callable): Module/function to optimize +// fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions +// in the function that it will optimize. If True, then we require that the entire function be +// capturable into a single graph. If this is not possible (that is, if there are graph breaks), +// then this will raise an error. +// dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt +// to generate a kernel that is as dynamic as possible to avoid recompilations when +// sizes change. This may not always work as some operations/optimizations will +// force specialization; use TORCH_LOGS=dynamic to debug overspecialization. +// When this is False, we will NEVER generate dynamic kernels, we will always specialize. +// By default (None), we automatically detect if dynamism has occurred and compile a more +// dynamic kernel upon recompile. +// backend (str or Callable): backend to be used +// +// - "inductor" is the default backend, which is a good balance between performance and overhead +// +// - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()` +// +// - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)` +// +// - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html +// mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs" +// +// - "default" is the default mode, which is a good balance between performance and overhead +// +// - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs, +// useful for small batches. Reduction of overhead can come at the cost of more memory +// usage, as we will cache the workspace memory required for the invocation so that we +// do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed +// to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs. +// There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints +// to debug. +// +// - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions +// It enables CUDA graphs by default. +// +// - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs +// +// - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()` +// +// options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are +// +// - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set +// +// - `max_autotune` which will profile to pick the best matmul configuration +// +// - `fallback_random` which is useful when debugging accuracy issues +// +// - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores +// +// - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs +// +// - `trace.enabled` which is the most useful debugging flag to turn on +// +// - `trace.graph_diagram` which will show you a picture of your graph after fusion +// +// - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()` +// disable (bool): Turn torch.compile() into a no-op for testing +// +// Example:: +// +// @torch.compile(options={"triton.cudagraphs": True}, fullgraph=True) +// def foo(x): +// return torch.sin(x) + torch.cos(x) +// +// +// +//go:linkname Compile py.compile +func Compile(model *py.Object) *py.Object +// +// Conditionally applies `true_fn` or `false_fn`. +// +// .. warning:: +// `torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types and +// doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch. +// Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype +// +// `cond` is structured control flow operator. That is, it is like a Python if-statement, +// but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be +// capturable using torch.compile and torch.export. +// +// Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following:: +// +// def cond(pred, true_branch, false_branch, operands): +// if pred: +// return true_branch(*operands) +// else: +// return false_branch(*operands) +// +// Args: +// pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element, +// indicating which branch function to apply. +// +// true_fn (Callable): A callable function (a -> b) that is within the +// scope that is being traced. +// +// false_fn (Callable): A callable function (a -> b) that is within the +// scope that is being traced. The true branch and false branch must +// have consistent input and outputs, meaning the inputs have to be +// the same, and the outputs have to be the same type and shape. +// +// operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the true/false functions. +// +// Example:: +// +// def true_fn(x: torch.Tensor): +// return x.cos() +// def false_fn(x: torch.Tensor): +// return x.sin() +// return cond(x.shape[0] > 4, true_fn, false_fn, (x,)) +// +// Restrictions: +// - The conditional statement (aka `pred`) must meet one of the following constraints: +// +// - It's a `torch.Tensor` with only one element, and torch.bool dtype +// +// - It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10` +// +// - The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints: +// +// - The function signature must match with operands. +// +// - The function must return a tensor with the same metadata, e.g. shape, +// dtype, etc. +// +// - The function cannot have in-place mutations on inputs or global variables. +// (Note: in-place tensor operations such as `add_` for intermediate results +// are allowed in a branch) +// +// .. warning:: +// Temporal Limitations: +// +// - `cond` only supports **inference** right now. Autograd will be supported in the future. +// +// - The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future. +// +// +// +//go:linkname Cond py.cond +func Cond(pred *py.Object, trueFn *py.Object, falseFn *py.Object, operands *py.Object) *py.Object +// +// vmap is the vectorizing map; ``vmap(func)`` returns a new function that +// maps ``func`` over some dimension of the inputs. Semantically, vmap +// pushes the map into PyTorch operations called by ``func``, effectively +// vectorizing those operations. +// +// vmap is useful for handling batch dimensions: one can write a function +// ``func`` that runs on examples and then lift it to a function that can +// take batches of examples with ``vmap(func)``. vmap can also be used to +// compute batched gradients when composed with autograd. +// +// .. note:: +// :func:`torch.vmap` is aliased to :func:`torch.func.vmap` for +// convenience. Use whichever one you'd like. +// +// Args: +// func (function): A Python function that takes one or more arguments. +// Must return one or more Tensors. +// in_dims (int or nested structure): Specifies which dimension of the +// inputs should be mapped over. ``in_dims`` should have a +// structure like the inputs. If the ``in_dim`` for a particular +// input is None, then that indicates there is no map dimension. +// Default: 0. +// out_dims (int or Tuple[int]): Specifies where the mapped dimension +// should appear in the outputs. If ``out_dims`` is a Tuple, then +// it should have one element per output. Default: 0. +// randomness (str): Specifies whether the randomness in this +// vmap should be the same or different across batches. If 'different', +// the randomness for each batch will be different. If 'same', the +// randomness will be the same across batches. If 'error', any calls to +// random functions will error. Default: 'error'. WARNING: this flag +// only applies to random PyTorch operations and does not apply to +// Python's random module or numpy randomness. +// chunk_size (None or int): If None (default), apply a single vmap over inputs. +// If not None, then compute the vmap :attr:`chunk_size` samples at a time. +// Note that :attr:`chunk_size=1` is equivalent to computing the vmap with a for-loop. +// If you run into memory issues computing the vmap, please try a non-None chunk_size. +// +// Returns: +// Returns a new "batched" function. It takes the same inputs as +// ``func``, except each input has an extra dimension at the index +// specified by ``in_dims``. It takes returns the same outputs as +// ``func``, except each output has an extra dimension at the index +// specified by ``out_dims``. +// +// .. warning: +// :func:`vmap` works best with functional-style code. Please do not +// perform any side-effects in ``func``, with the exception of +// in-place PyTorch operations. Examples of side-effects include mutating +// Python data structures and assigning values to variables not captured +// in ``func``. +// +// One example of using :func:`vmap` is to compute batched dot products. PyTorch +// doesn't provide a batched ``torch.dot`` API; instead of unsuccessfully +// rummaging through docs, use :func:`vmap` to construct a new function. +// +// >>> torch.dot # [D], [D] -> [] +// >>> batched_dot = torch.func.vmap(torch.dot) # [N, D], [N, D] -> [N] +// >>> x, y = torch.randn(2, 5), torch.randn(2, 5) +// >>> batched_dot(x, y) +// +// :func:`vmap` can be helpful in hiding batch dimensions, leading to a simpler +// model authoring experience. +// +// >>> batch_size, feature_size = 3, 5 +// >>> weights = torch.randn(feature_size, requires_grad=True) +// >>> +// >>> def model(feature_vec): +// >>> # Very simple linear model with activation +// >>> return feature_vec.dot(weights).relu() +// >>> +// >>> examples = torch.randn(batch_size, feature_size) +// >>> result = torch.vmap(model)(examples) +// +// :func:`vmap` can also help vectorize computations that were previously difficult +// or impossible to batch. One example is higher-order gradient computation. +// The PyTorch autograd engine computes vjps (vector-Jacobian products). +// Computing a full Jacobian matrix for some function f: R^N -> R^N usually +// requires N calls to ``autograd.grad``, one per Jacobian row. Using :func:`vmap`, +// we can vectorize the whole computation, computing the Jacobian in a single +// call to ``autograd.grad``. +// +// >>> # Setup +// >>> N = 5 +// >>> f = lambda x: x ** 2 +// >>> x = torch.randn(N, requires_grad=True) +// >>> y = f(x) +// >>> I_N = torch.eye(N) +// >>> +// >>> # Sequential approach +// >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0] +// >>> for v in I_N.unbind()] +// >>> jacobian = torch.stack(jacobian_rows) +// >>> +// >>> # vectorized gradient computation +// >>> def get_vjp(v): +// >>> return torch.autograd.grad(y, x, v) +// >>> jacobian = torch.vmap(get_vjp)(I_N) +// +// :func:`vmap` can also be nested, producing an output with multiple batched dimensions +// +// >>> torch.dot # [D], [D] -> [] +// >>> batched_dot = torch.vmap(torch.vmap(torch.dot)) # [N1, N0, D], [N1, N0, D] -> [N1, N0] +// >>> x, y = torch.randn(2, 3, 5), torch.randn(2, 3, 5) +// >>> batched_dot(x, y) # tensor of size [2, 3] +// +// If the inputs are not batched along the first dimension, ``in_dims`` specifies +// the dimension that each inputs are batched along as +// +// >>> torch.dot # [N], [N] -> [] +// >>> batched_dot = torch.vmap(torch.dot, in_dims=1) # [N, D], [N, D] -> [D] +// >>> x, y = torch.randn(2, 5), torch.randn(2, 5) +// >>> batched_dot(x, y) # output is [5] instead of [2] if batched along the 0th dimension +// +// If there are multiple inputs each of which is batched along different dimensions, +// ``in_dims`` must be a tuple with the batch dimension for each input as +// +// >>> torch.dot # [D], [D] -> [] +// >>> batched_dot = torch.vmap(torch.dot, in_dims=(0, None)) # [N, D], [D] -> [N] +// >>> x, y = torch.randn(2, 5), torch.randn(5) +// >>> batched_dot(x, y) # second arg doesn't have a batch dim because in_dim[1] was None +// +// If the input is a Python struct, ``in_dims`` must be a tuple containing a struct +// matching the shape of the input: +// +// >>> f = lambda dict: torch.dot(dict['x'], dict['y']) +// >>> x, y = torch.randn(2, 5), torch.randn(5) +// >>> input = {'x': x, 'y': y} +// >>> batched_dot = torch.vmap(f, in_dims=({'x': 0, 'y': None},)) +// >>> batched_dot(input) +// +// By default, the output is batched along the first dimension. However, it can be batched +// along any dimension by using ``out_dims`` +// +// >>> f = lambda x: x ** 2 +// >>> x = torch.randn(2, 5) +// >>> batched_pow = torch.vmap(f, out_dims=1) +// >>> batched_pow(x) # [5, 2] +// +// For any function that uses kwargs, the returned function will not batch the kwargs but will +// accept kwargs +// +// >>> x = torch.randn([2, 5]) +// >>> def fn(x, scale=4.): +// >>> return x * scale +// >>> +// >>> batched_pow = torch.vmap(fn) +// >>> assert torch.allclose(batched_pow(x), x * 4) +// >>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5] +// +// .. note:: +// vmap does not provide general autobatching or handle variable-length +// sequences out of the box. +// +// +//go:linkname Vmap py.vmap +func Vmap(func_ *py.Object, inDims *py.Object, outDims *py.Object, randomness *py.Object) *py.Object diff --git a/py/tqdm/go.mod b/py/tqdm/go.mod new file mode 100644 index 00000000..050bba12 --- /dev/null +++ b/py/tqdm/go.mod @@ -0,0 +1,5 @@ +module github.com/PengPengPeng717/llpkg/py/tqdm + +go 1.24.5 + +require github.com/goplus/lib v0.3.0 diff --git a/py/tqdm/go.sum b/py/tqdm/go.sum new file mode 100644 index 00000000..54e0f00c --- /dev/null +++ b/py/tqdm/go.sum @@ -0,0 +1,2 @@ +github.com/goplus/lib v0.3.0 h1:y0ZGb5Q/RikW1oMMB4Di7XIZIpuzh/7mlrR8HNbxXCA= +github.com/goplus/lib v0.3.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/py/tqdm/llpkg.cfg b/py/tqdm/llpkg.cfg new file mode 100644 index 00000000..dd54a827 --- /dev/null +++ b/py/tqdm/llpkg.cfg @@ -0,0 +1,17 @@ +{ + "type": "python", + "upstream": { + "installer": { + "name": "pip" + }, + "package": { + "name": "tqdm", + "version": "4.66.3" + } + }, + "llpyg": { + "output_dir": "./test", + "mod_name": "github.com/PengPengPeng717/llpkg/py/tqdm", + "mod_depth": 1 + } +} diff --git a/py/tqdm/llpyg.cfg b/py/tqdm/llpyg.cfg new file mode 100644 index 00000000..9481046e --- /dev/null +++ b/py/tqdm/llpyg.cfg @@ -0,0 +1,7 @@ +{ + "name": "tqdm", + "libName": "tqdm", + "modules": [ + "tqdm" + ] +} diff --git a/py/tqdm/tqdm.go b/py/tqdm/tqdm.go new file mode 100644 index 00000000..5188a2d8 --- /dev/null +++ b/py/tqdm/tqdm.go @@ -0,0 +1,40 @@ +package tqdm + +import ( + "github.com/goplus/lib/py" + _ "unsafe" +) + +const LLGoPackage = "py.tqdm" +// +// Registers the given `tqdm` instance with +// `pandas.core.groupby.DataFrameGroupBy.progress_apply`. +// +// +//go:linkname TqdmPandas py.tqdm_pandas +func TqdmPandas(tclass *py.Object) *py.Object +// +// Parameters (internal use only) +// --------- +// fp : file-like object for tqdm +// argv : list (default: sys.argv[1:]) +// +// +//go:linkname Main py.main +func Main(fp *py.Object, argv *py.Object) *py.Object +// Shortcut for `tqdm.gui.tqdm(range(*args), **kwargs)`. +// +//go:linkname Tgrange py.tgrange +func Tgrange(__llgo_va_list ...interface{}) *py.Object +// Shortcut for tqdm(range(*args), **kwargs). +// +//go:linkname Trange py.trange +func Trange(__llgo_va_list ...interface{}) *py.Object +// See tqdm.notebook.tqdm for full documentation +// +//go:linkname TqdmNotebook py.tqdm_notebook +func TqdmNotebook(__llgo_va_list ...interface{}) *py.Object +// Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`. +// +//go:linkname Tnrange py.tnrange +func Tnrange(__llgo_va_list ...interface{}) *py.Object diff --git a/zlib/_demo/crc32demo/demo.go b/zlib/_demo/crc32demo/demo.go deleted file mode 100644 index 7615486c..00000000 --- a/zlib/_demo/crc32demo/demo.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "fmt" - "unsafe" - - "github.com/goplus/llpkg/zlib" -) - -func main() { - ul := zlib.ULong(0) - data := "Hello world" - res := ul.Crc32Z( - (*zlib.Bytef)(unsafe.Pointer(unsafe.StringData(data))), - zlib.ZSizeT(uintptr(len(data))), - ) - fmt.Printf("%08x\n", res) -} diff --git a/zlib/_demo/efficiency/efficiency.go b/zlib/_demo/efficiency/efficiency.go deleted file mode 100644 index 9bd8a57d..00000000 --- a/zlib/_demo/efficiency/efficiency.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "unsafe" - - "github.com/goplus/llpkg/zlib" - - "github.com/goplus/lib/c" -) - -func main() { - txt := []byte("zlib is a software library used for data compression. It was created by Jean-loup Gailly and Mark Adler and first released in 1995. zlib is designed to be a free, legally unencumbered—that is, not covered by any patents—alternative to the proprietary DEFLATE compression algorithm, which is often used in software applications for data compression.The library provides functions to compress and decompress data using the DEFLATE algorithm, which is a combination of the LZ77 algorithm and Huffman coding. zlib is notable for its versatility; it can be used in a wide range of applications, from web servers and web clients compressing HTTP data, to the compression of data for storage or transmission in various file formats, such as PNG, ZIP, and GZIP.") - txtLen := zlib.ULong(len(txt)) - - for level := 0; level <= 9; level++ { - cmpSize := zlib.ULongf(zlib.CompressBound(txtLen)) - cmpData := make([]byte, int(cmpSize)) - data := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(cmpData))) - source := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(txt))) - res := zlib.Compress2(data, &cmpSize, source, txtLen, c.Int(level)) - if res != zlib.OK { - c.Printf(c.Str("\nCompression failed at level %d: %d\n"), level, res) - continue - } - - c.Printf(c.Str("Compression level %d: Text length = %d, Compressed size = %d\n"), level, txtLen, cmpSize) - - ucmpSize := zlib.ULongf(txtLen) - ucmp := make([]byte, int(ucmpSize)) - ucmpData := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(ucmp))) - cmpSource := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(cmpData))) - - unRes := zlib.Uncompress(ucmpData, &ucmpSize, cmpSource, zlib.ULong(cmpSize)) - if unRes != zlib.OK { - c.Printf(c.Str("\nDecompression failed at level %d: %d\n"), level, unRes) - continue - } - } -} diff --git a/zlib/_demo/normal/normal.go b/zlib/_demo/normal/normal.go deleted file mode 100644 index e69eb2cb..00000000 --- a/zlib/_demo/normal/normal.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "unsafe" - - "github.com/goplus/llpkg/zlib" - - "github.com/goplus/lib/c" -) - -func main() { - txt := []byte("zlib is a software library used for data compression. It was created by Jean-loup Gailly and Mark Adler and first released in 1995. zlib is designed to be a free, legally unencumbered—that is, not covered by any patents—alternative to the proprietary DEFLATE compression algorithm, which is often used in software applications for data compression.The library provides functions to compress and decompress data using the DEFLATE algorithm, which is a combination of the LZ77 algorithm and Huffman coding. zlib is notable for its versatility; it can be used in a wide range of applications, from web servers and web clients compressing HTTP data, to the compression of data for storage or transmission in various file formats, such as PNG, ZIP, and GZIP.") - txtLen := zlib.ULong(len(txt)) - - cmpSize := zlib.ULongf(zlib.CompressBound(txtLen)) - cmpData := make([]byte, int(cmpSize)) - data := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(cmpData))) - txtData := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(txt))) - - res := zlib.Compress(data, &cmpSize, txtData, txtLen) - if res != zlib.OK { - c.Printf(c.Str("\nCompression failed: %d\n"), res) - return - } - - c.Printf(c.Str("Text length = %d, Compressed size = %d\n"), txtLen, cmpSize) - - ucmpSize := zlib.ULongf(txtLen) - ucmp := make([]byte, int(ucmpSize)) - ucmpPtr := (*zlib.Bytef)(unsafe.Pointer(unsafe.SliceData(ucmp))) - - unRes := zlib.Uncompress(ucmpPtr, &ucmpSize, data, zlib.ULong(cmpSize)) - c.Printf(c.Str("Decompression result = %d, Decompressed size %d\n"), unRes, ucmpSize) - - if unRes != zlib.OK { - c.Printf(c.Str("\nDecompression failed: %d\n"), unRes) - return - } - - c.Printf(c.Str("Decompressed data: \n")) - for i := 0; i < int(ucmpSize); i++ { - c.Printf(c.Str("%c"), ucmp[i]) - } -} diff --git a/zlib/go.mod b/zlib/go.mod deleted file mode 100644 index 46b9aa65..00000000 --- a/zlib/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/goplus/llpkg/zlib - -go 1.20 - -require github.com/goplus/lib v0.2.0 diff --git a/zlib/go.sum b/zlib/go.sum deleted file mode 100644 index 512980a5..00000000 --- a/zlib/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/goplus/lib v0.2.0 h1:AjqkN1XK5H23wZMMlpaUYAMCDAdSBQ2NMFrLtSh7W4g= -github.com/goplus/lib v0.2.0/go.mod h1:SgJv3oPqLLHCu0gcL46ejOP3x7/2ry2Jtxu7ta32kp0= diff --git a/zlib/llcppg.cfg b/zlib/llcppg.cfg deleted file mode 100644 index acb218f3..00000000 --- a/zlib/llcppg.cfg +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "zlib", - "cflags": "$(pkg-config --cflags zlib)", - "libs": "$(pkg-config --libs zlib)", - "include": [ - "zlib.h", - "zconf.h" - ], - "trimPrefixes": ["Z_","ZLIB_", "zlib"], - "cplusplus": false, - "deps": ["c/os"], - "keepUnderScore": false, - "symMap":{ - "compress":"Compress", - "compress2":"Compress2", - "uncompress":"Uncompress", - "uncompress2":"Uncompress2", - "compressBound":"CompressBound" - } -} diff --git a/zlib/llcppg.pub b/zlib/llcppg.pub deleted file mode 100644 index a43151e2..00000000 --- a/zlib/llcppg.pub +++ /dev/null @@ -1,26 +0,0 @@ -Byte -Bytef -alloc_func AllocFunc -charf Charf -free_func FreeFunc -gzFile GzFile -gzFile_s GzFileS -gz_header GzHeader -gz_header_s GzHeaderS -gz_headerp GzHeaderp -in_func InFunc -internal_state InternalState -intf Intf -out_func OutFunc -uInt UInt -uIntf UIntf -uLong ULong -uLongf ULongf -voidp Voidp -voidpc Voidpc -voidpf Voidpf -z_crc_t ZCrcT -z_size_t ZSizeT -z_stream ZStream -z_stream_s ZStreamS -z_streamp ZStreamp \ No newline at end of file diff --git a/zlib/llpkg.cfg b/zlib/llpkg.cfg deleted file mode 100644 index dfdf09cc..00000000 --- a/zlib/llpkg.cfg +++ /dev/null @@ -1,8 +0,0 @@ -{ - "upstream": { - "package": { - "name": "zlib", - "version": "1.3.1" - } - } -} \ No newline at end of file diff --git a/zlib/zconf.go b/zlib/zconf.go deleted file mode 100644 index 1a863118..00000000 --- a/zlib/zconf.go +++ /dev/null @@ -1,23 +0,0 @@ -package zlib - -import ( - "github.com/goplus/lib/c" - _ "unsafe" -) - -const MAX_MEM_LEVEL = 9 -const MAX_WBITS = 15 - -type ZSizeT c.SizeT -type Byte c.Char -type UInt c.Uint -type ULong c.Ulong -type Bytef Byte -type Charf c.Char -type Intf c.Int -type UIntf UInt -type ULongf ULong -type Voidpc c.Pointer -type Voidpf c.Pointer -type Voidp c.Pointer -type ZCrcT c.Uint diff --git a/zlib/zlib.go b/zlib/zlib.go deleted file mode 100644 index 48a051de..00000000 --- a/zlib/zlib.go +++ /dev/null @@ -1,1518 +0,0 @@ -package zlib - -import ( - "github.com/goplus/lib/c" - "github.com/goplus/lib/c/os" - _ "unsafe" -) - -const VERSION = "1.3.1" -const VERNUM = 0x1310 -const VER_MAJOR = 1 -const VER_MINOR = 3 -const VER_REVISION = 1 -const VER_SUBREVISION = 0 -const NO_FLUSH = 0 -const PARTIAL_FLUSH = 1 -const SYNC_FLUSH = 2 -const FULL_FLUSH = 3 -const FINISH = 4 -const BLOCK = 5 -const TREES = 6 -const OK = 0 -const STREAM_END = 1 -const NEED_DICT = 2 -const NO_COMPRESSION = 0 -const BEST_SPEED = 1 -const BEST_COMPRESSION = 9 -const FILTERED = 1 -const HUFFMAN_ONLY = 2 -const RLE = 3 -const FIXED = 4 -const DEFAULT_STRATEGY = 0 -const BINARY = 0 -const TEXT = 1 -const UNKNOWN = 2 -const DEFLATED = 8 -const NULL = 0 - -// llgo:type C -type AllocFunc func(Voidpf, UInt, UInt) Voidpf - -// llgo:type C -type FreeFunc func(Voidpf, Voidpf) - -type InternalState struct { - Unused [8]uint8 -} - -type ZStreamS struct { - NextIn *Bytef - AvailIn UInt - TotalIn ULong - NextOut *Bytef - AvailOut UInt - TotalOut ULong - Msg *c.Char - State *InternalState - Zalloc AllocFunc - Zfree FreeFunc - Opaque Voidpf - DataType c.Int - Adler ULong - Reserved ULong -} -type ZStream ZStreamS -type ZStreamp *ZStream - -/* - gzip header information passed to and from zlib routines. See RFC 1952 - -for more details on the meanings of these fields. -*/ -type GzHeaderS struct { - Text c.Int - Time ULong - Xflags c.Int - Os c.Int - Extra *Bytef - ExtraLen UInt - ExtraMax UInt - Name *Bytef - NameMax UInt - Comment *Bytef - CommMax UInt - Hcrc c.Int - Done c.Int -} -type GzHeader GzHeaderS -type GzHeaderp *GzHeader - -/* basic functions */ -//go:linkname Version C.zlibVersion -func Version() *c.Char - -/* -ZEXTERN int ZEXPORT deflateInit(z_streamp strm, int level); - - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. If - zalloc and zfree are set to Z_NULL, deflateInit updates them to use default - allocation functions. total_in, total_out, adler, and msg are initialized. - - The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at all - (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION - requests a default compromise between speed and compression (currently - equivalent to level 6). - - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if level is not a valid compression level, or - Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). msg is set to null - if there is no error message. deflateInit does not perform any compression: - this will be done by deflate(). -*/ -//go:linkname Deflate C.deflate -func Deflate(strm ZStreamp, flush c.Int) c.Int - -/* - deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. deflate performs one or both of the - following actions: - - - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in and avail_in are updated and - processing will resume at this point for the next call of deflate(). - - - Generate more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. - Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary. Some output may be provided even if - flush is zero. - - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating avail_in or avail_out accordingly; avail_out should - never be zero before the call. The application can consume the compressed - output when it wants, for example when the output buffer is full (avail_out - == 0), or after each call of deflate(). If deflate returns Z_OK and with - zero avail_out, it must be called again after making room in the output - buffer because there might be more output pending. See deflatePending(), - which can be used if desired to determine whether or not there is more output - in that case. - - Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumulate before producing output, in order to - maximize compression. - - If the parameter flush is set to Z_SYNC_FLUSH, all pending output is - flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In - particular avail_in is zero after the call if enough output space has been - provided before the call.) Flushing may degrade compression for some - compression algorithms and so it should be used only when necessary. This - completes the current deflate block and follows it with an empty stored block - that is three bits plus filler bits to the next byte, followed by four bytes - (00 00 ff ff). - - If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the - output buffer, but the output is not aligned to a byte boundary. All of the - input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. - This completes the current deflate block and follows it with an empty fixed - codes block that is 10 bits long. This assures that enough bytes are output - in order for the decompressor to finish the block before the empty fixed - codes block. - - If flush is set to Z_BLOCK, a deflate block is completed and emitted, as - for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to - seven bits of the current block are held to be written as the next byte after - the next deflate block is completed. In this case, the decompressor may not - be provided enough bits at this point in order to complete decompression of - the data provided so far to the compressor. It may need to wait for the next - block to be emitted. This is for advanced applications that need to control - the emission of deflate blocks. - - If flush is set to Z_FULL_FLUSH, all output is flushed as with - Z_SYNC_FLUSH, and the compression state is reset so that decompression can - restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade - compression. - - If deflate returns with avail_out == 0, this function must be called again - with the same value of the flush parameter and more output space (updated - avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that - avail_out is greater than six when the flush marker begins, in order to avoid - repeated flush markers upon calling deflate() again when avail_out == 0. - - If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there was - enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this - function must be called again with Z_FINISH and more output space (updated - avail_out) but no more input data, until it returns with Z_STREAM_END or an - error. After deflate has returned Z_STREAM_END, the only possible operations - on the stream are deflateReset or deflateEnd. - - Z_FINISH can be used in the first deflate call after deflateInit if all the - compression is to be done in a single step. In order to complete in one - call, avail_out must be at least the value returned by deflateBound (see - below). Then deflate is guaranteed to return Z_STREAM_END. If not enough - output space is provided, deflate will not return Z_STREAM_END, and it must - be called again as described above. - - deflate() sets strm->adler to the Adler-32 checksum of all input read - so far (that is, total_in bytes). If a gzip stream is being generated, then - strm->adler will be the CRC-32 checksum of the input read so far. (See - deflateInit2 below.) - - deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is - considered binary. This field is only for information purposes and does not - affect the compression algorithm in any manner. - - deflate() returns Z_OK if some progress has been made (more input - processed or more output produced), Z_STREAM_END if all input has been - consumed and all output has been produced (only when flush is set to - Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was Z_NULL or the state was inadvertently written over - by the application), or Z_BUF_ERROR if no progress is possible (for example - avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and - deflate() can be called again with more input and more output space to - continue compressing. -*/ -//go:linkname DeflateEnd C.deflateEnd -func DeflateEnd(strm ZStreamp) c.Int - -/* -ZEXTERN int ZEXPORT inflateInit(z_streamp strm); - - Initializes the internal stream state for decompression. The fields - next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. In the current version of inflate, the provided input is not - read or consumed. The allocation of a sliding window will be deferred to - the first call of inflate (if the decompression does not complete on the - first call). If zalloc and zfree are set to Z_NULL, inflateInit updates - them to use default allocation functions. total_in, total_out, adler, and - msg are initialized. - - inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit does not perform any decompression. - Actual decompression will be done by inflate(). So next_in, and avail_in, - next_out, and avail_out are unused and unchanged. The current - implementation of inflateInit() does not process any header information -- - that is deferred until inflate() is called. -*/ -//go:linkname Inflate C.inflate -func Inflate(strm ZStreamp, flush c.Int) c.Int - -/* - inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. inflate performs one or both of the - following actions: - - - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), then next_in and avail_in are updated - accordingly, and processing will resume at this point for the next call of - inflate(). - - - Generate more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there is - no more input data or no more space in the output buffer (see below about - the flush parameter). - - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming more - output, and updating the next_* and avail_* values accordingly. If the - caller of inflate() does not provide both available input and available - output space, it is possible that there will be no progress made. The - application can consume the uncompressed output when it wants, for example - when the output buffer is full (avail_out == 0), or after each call of - inflate(). If inflate returns Z_OK and with zero avail_out, it must be - called again after making room in the output buffer because there might be - more output pending. - - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, - Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() - stop if and when it gets to the next deflate block boundary. When decoding - the zlib or gzip format, this will cause inflate() to return immediately - after the header and before the first block. When doing a raw inflate, - inflate() will go ahead and process the first block, and will return when it - gets to the end of that block, or when it runs out of data. - - The Z_BLOCK option assists in appending to or combining deflate streams. - To assist in this, on return inflate() always sets strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 if - inflate() is currently decoding the last block in the deflate stream, plus - 128 if inflate() returned immediately after decoding an end-of-block code or - decoding the complete header up to just before the first byte of the deflate - stream. The end-of-block will not be indicated until all of the uncompressed - data from that block has been written to strm->next_out. The number of - unused bits may in general be greater than seven, except when bit 7 of - data_type is set, in which case the number of unused bits will be less than - eight. data_type is set as noted here every time inflate() returns for all - flush options, and so can be used to determine the amount of currently - consumed input in bits. - - The Z_TREES option behaves as Z_BLOCK does, but it also returns when the - end of each deflate block header is reached, before any actual data in that - block is decoded. This allows the caller to determine the length of the - deflate block header for later use in random access within a deflate block. - 256 is added to the value of strm->data_type when inflate() returns - immediately after reaching the end of the deflate block header. - - inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step (a - single call of inflate), the parameter flush should be set to Z_FINISH. In - this case all pending input is processed and all pending output is flushed; - avail_out must be large enough to hold all of the uncompressed data for the - operation to complete. (The size of the uncompressed data may have been - saved by the compressor for this purpose.) The use of Z_FINISH is not - required to perform an inflation in one step. However it may be used to - inform inflate that a faster approach can be used for the single inflate() - call. Z_FINISH also informs inflate to not maintain a sliding window if the - stream completes, which reduces inflate's memory footprint. If the stream - does not complete, either because not all of the stream is provided or not - enough output space is provided, then a sliding window will be allocated and - inflate() can be called again to continue the operation as if Z_NO_FLUSH had - been used. - - In this implementation, inflate() always flushes as much output as - possible to the output buffer, and always uses the faster approach on the - first call. So the effects of the flush parameter in this implementation are - on the return value of inflate() as noted below, when inflate() returns early - when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of - memory for a sliding window when Z_FINISH is used. - - If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the Adler-32 checksum of the dictionary - chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the Adler-32 checksum of all output produced so far (that is, - total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed Adler-32 - checksum is equal to that saved by the compressor and returns Z_STREAM_END - only if the checksum is correct. - - inflate() can decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically, if requested when - initializing with inflateInit2(). Any information contained in the gzip - header is not retained unless inflateGetHeader() is used. When processing - gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output - produced so far. The CRC-32 is checked against the gzip trailer, as is the - uncompressed length, modulo 2^32. - - inflate() returns Z_OK if some progress has been made (more input processed - or more output produced), Z_STREAM_END if the end of the compressed data has - been reached and all uncompressed output has been produced, Z_NEED_DICT if a - preset dictionary is needed at this point, Z_DATA_ERROR if the input data was - corrupted (input stream not conforming to the zlib format or incorrect check - value, in which case strm->msg points to a string with a more specific - error), Z_STREAM_ERROR if the stream structure was inconsistent (for example - next_in or next_out was Z_NULL, or the state was inadvertently written over - by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR - if no progress was possible or if there was not enough room in the output - buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and - inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may - then call inflateSync() to look for a good compression block if a partial - recovery of the data is to be attempted. -*/ -//go:linkname InflateEnd C.inflateEnd -func InflateEnd(strm ZStreamp) c.Int - -/* -ZEXTERN int ZEXPORT deflateInit2(z_streamp strm, - int level, - int method, - int windowBits, - int memLevel, - int strategy); - - This is another version of deflateInit with more compression options. The - fields zalloc, zfree and opaque must be initialized before by the caller. - - The method parameter is the compression method. It must be Z_DEFLATED in - this version of the library. - - The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if - deflateInit is used instead. - - For the current implementation of deflate(), a windowBits value of 8 (a - window size of 256 bytes) is not supported. As a result, a request for 8 - will result in 9 (a 512-byte window). In that case, providing 8 to - inflateInit2() will result in an error when the zlib header with 9 is - checked against the initialization of inflate(). The remedy is to not use 8 - with deflateInit2() with this initialization, or at least in that case use 9 - with inflateInit2(). - - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data - with no zlib header or trailer, and will not compute a check value. - - windowBits can also be greater than 15 for optional gzip encoding. Add - 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), no - header crc, and the operating system will be set to the appropriate value, - if the operating system was determined at compile time. If a gzip stream is - being written, strm->adler is a CRC-32 instead of an Adler-32. - - For raw deflate or gzip encoding, a request for a 256-byte window is - rejected as invalid, since only the zlib header provides a means of - transmitting the window size to the decompressor. - - The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but is - slow and reduces compression ratio; memLevel=9 uses maximum memory for - optimal speed. The default value is 8. See zconf.h for total memory usage - as a function of windowBits and memLevel. - - The strategy parameter is used to tune the compression algorithm. Use the - value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a - filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no - string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman - coding and less string matching; it is somewhat intermediate between - Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as - fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The - strategy parameter only affects the compression ratio but not the - correctness of the compressed output even if it is not set appropriately. - Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler - decoder for special applications. - - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid - method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is - incompatible with the version assumed by the caller (ZLIB_VERSION). msg is - set to null if there is no error message. deflateInit2 does not perform any - compression: this will be done by deflate(). -*/ -//go:linkname DeflateSetDictionary C.deflateSetDictionary -func DeflateSetDictionary(strm ZStreamp, dictionary *Bytef, dictLength UInt) c.Int - -/* - Initializes the compression dictionary from the given byte sequence - without producing any compressed output. When using the zlib format, this - function must be called immediately after deflateInit, deflateInit2 or - deflateReset, and before any call of deflate. When doing raw deflate, this - function must be called either before any call of deflate, or immediately - after the completion of a deflate block, i.e. after all input has been - consumed and all output has been delivered when using any of the flush - options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The - compressor and decompressor must use exactly the same dictionary (see - inflateSetDictionary). - - The dictionary should consist of strings (byte sequences) that are likely - to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a - dictionary is most useful when the data to be compressed is short and can be - predicted with good accuracy; the data can then be compressed better than - with the default empty dictionary. - - Depending on the size of the compression data structures selected by - deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size - provided in deflateInit or deflateInit2. Thus the strings most likely to be - useful should be put at the end of the dictionary, not at the front. In - addition, the current implementation of deflate will use at most the window - size minus 262 bytes of the provided dictionary. - - Upon return of this function, strm->adler is set to the Adler-32 value - of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The Adler-32 value - applies to the whole dictionary even if only a subset of the dictionary is - actually used by the compressor.) If a raw deflate was requested, then the - Adler-32 value is not computed and strm->adler is not set. - - deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent (for example if deflate has already been called for this stream - or if not at a block boundary for raw deflate). deflateSetDictionary does - not perform any compression: this will be done by deflate(). -*/ -//go:linkname DeflateGetDictionary C.deflateGetDictionary -func DeflateGetDictionary(strm ZStreamp, dictionary *Bytef, dictLength *UInt) c.Int - -/* - Returns the sliding dictionary being maintained by deflate. dictLength is - set to the number of bytes in the dictionary, and that many bytes are copied - to dictionary. dictionary must have enough space, where 32768 bytes is - always enough. If deflateGetDictionary() is called with dictionary equal to - Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similarly, if dictLength is Z_NULL, then it is not set. - - deflateGetDictionary() may return a length less than the window size, even - when more than the window size in input has been provided. It may return up - to 258 bytes less in that case, due to how zlib's implementation of deflate - manages the sliding window and lookahead for matches, where matches can be - up to 258 bytes long. If the application needs the last window-size bytes of - input, then that would need to be saved by the application outside of zlib. - - deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the - stream state is inconsistent. -*/ -//go:linkname DeflateCopy C.deflateCopy -func DeflateCopy(dest ZStreamp, source ZStreamp) c.Int - -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when several compression strategies will be - tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed - by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and can - consume lots of memory. - - deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ -//go:linkname DeflateReset C.deflateReset -func DeflateReset(strm ZStreamp) c.Int - -/* - This function is equivalent to deflateEnd followed by deflateInit, but - does not free and reallocate the internal compression state. The stream - will leave the compression level and any other attributes that may have been - set unchanged. total_in, total_out, adler, and msg are initialized. - - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ -//go:linkname DeflateParams C.deflateParams -func DeflateParams(strm ZStreamp, level c.Int, strategy c.Int) c.Int - -/* - Dynamically update the compression level and compression strategy. The - interpretation of level and strategy is as in deflateInit2(). This can be - used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different strategy. - If the compression approach (which is a function of the level) or the - strategy is changed, and if there have been any deflate() calls since the - state was initialized or reset, then the input available so far is - compressed with the old level and strategy using deflate(strm, Z_BLOCK). - There are three approaches for the compression levels 0, 1..3, and 4..9 - respectively. The new level and strategy will take effect at the next call - of deflate(). - - If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does - not have enough output space to complete, then the parameter change will not - take effect. In this case, deflateParams() can be called again with the - same parameters and more output space to try again. - - In order to assure a change in the parameters on the first try, the - deflate stream should be flushed using deflate() with Z_BLOCK or other flush - request until strm.avail_out is not zero, before calling deflateParams(). - Then no more input data should be provided before the deflateParams() call. - If this is done, the old level and strategy will be applied to the data - compressed before deflateParams(), and the new level and strategy will be - applied to the data compressed after deflateParams(). - - deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream - state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if - there was not enough output space to complete the compression of the - available input data before a change in the strategy or approach. Note that - in the case of a Z_BUF_ERROR, the parameters are not changed. A return - value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be - retried with more output space. -*/ -//go:linkname DeflateTune C.deflateTune -func DeflateTune(strm ZStreamp, good_length c.Int, max_lazy c.Int, nice_length c.Int, max_chain c.Int) c.Int - -/* - Fine tune deflate's internal compression parameters. This should only be - used by someone who understands the algorithm used by zlib's deflate for - searching for the best matching string, and even then only by the most - fanatic optimizer trying to squeeze out the last compressed bit for their - specific input data. Read the deflate.c source code for the meaning of the - max_lazy, good_length, nice_length, and max_chain parameters. - - deflateTune() can be called after deflateInit() or deflateInit2(), and - returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. -*/ -//go:linkname DeflateBound C.deflateBound -func DeflateBound(strm ZStreamp, sourceLen ULong) ULong - -/* - deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() or - deflateInit2(), and after deflateSetHeader(), if used. This would be used - to allocate an output buffer for deflation in a single pass, and so would be - called before deflate(). If that first deflate() call is provided the - sourceLen input bytes, an output buffer allocated to the size returned by - deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed - to return Z_STREAM_END. Note that it is possible for the compressed size to - be larger than the value returned by deflateBound() if flush options other - than Z_FINISH or Z_NO_FLUSH are used. -*/ -//go:linkname DeflatePending C.deflatePending -func DeflatePending(strm ZStreamp, pending *c.Uint, bits *c.Int) c.Int - -/* - deflatePending() returns the number of bytes and bits of output that have - been generated, but not yet provided in the available output. The bytes not - provided would be due to the available output space having being consumed. - The number of bits of output not provided are between 0 and 7, where they - await more bits to join them in order to fill out a full byte. If pending - or bits are Z_NULL, then those values are not set. - - deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ -//go:linkname DeflatePrime C.deflatePrime -func DeflatePrime(strm ZStreamp, bits c.Int, value c.Int) c.Int - -/* - deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the bits - leftover from a previous deflate stream when appending to it. As such, this - function can only be used for raw deflate, and must be used before the first - deflate() call after a deflateInit2() or deflateReset(). bits must be less - than or equal to 16, and that many of the least significant bits of value - will be inserted in the output. - - deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough - room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the - source stream state was inconsistent. -*/ -//go:linkname DeflateSetHeader C.deflateSetHeader -func DeflateSetHeader(strm ZStreamp, head GzHeaderp) c.Int - -/* -ZEXTERN int ZEXPORT inflateInit2(z_streamp strm, - int windowBits); - - This is another version of inflateInit with an extra parameter. The - fields next_in, avail_in, zalloc, zfree and opaque must be initialized - before by the caller. - - The windowBits parameter is the base two logarithm of the maximum window - size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value - provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window - size is given as input, inflate() will return with the error code - Z_DATA_ERROR instead of trying to allocate a larger window. - - windowBits can also be zero to request that inflate use the window size in - the zlib header of the compressed stream. - - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, - not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This - is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom - format is developed using the raw deflate format for compressed data, it is - recommended that a check value such as an Adler-32 or a CRC-32 be applied to - the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments - above on the use in deflateInit2() applies to the magnitude of windowBits. - - windowBits can also be greater than 15 for optional gzip decoding. Add - 32 to windowBits to enable zlib and gzip decoding with automatic header - detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a - CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see - below), inflate() will *not* automatically decode concatenated gzip members. - inflate() will return Z_STREAM_END at the end of the gzip member. The state - would need to be reset to continue decoding a subsequent gzip member. This - *must* be done if there is more data after a gzip member, in order for the - decompression to be compliant with the gzip standard (RFC 1952). - - inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller, or Z_STREAM_ERROR if the parameters are - invalid, such as a null pointer to the structure. msg is set to null if - there is no error message. inflateInit2 does not perform any decompression - apart from possibly reading the zlib header if present: actual decompression - will be done by inflate(). (So next_in and avail_in may be modified, but - next_out and avail_out are unused and unchanged.) The current implementation - of inflateInit2() does not process any header information -- that is - deferred until inflate() is called. -*/ -//go:linkname InflateSetDictionary C.inflateSetDictionary -func InflateSetDictionary(strm ZStreamp, dictionary *Bytef, dictLength UInt) c.Int - -/* - Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor - can be determined from the Adler-32 value returned by that call of inflate. - The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called at any - time to set the dictionary. If the provided dictionary is smaller than the - window and there is already data in the window, then the provided dictionary - will amend what's there. The application must insure that the dictionary - that was used for compression is provided. - - inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is - inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect Adler-32 value). inflateSetDictionary does not - perform any decompression: this will be done by subsequent calls of - inflate(). -*/ -//go:linkname InflateGetDictionary C.inflateGetDictionary -func InflateGetDictionary(strm ZStreamp, dictionary *Bytef, dictLength *UInt) c.Int - -/* - Returns the sliding dictionary being maintained by inflate. dictLength is - set to the number of bytes in the dictionary, and that many bytes are copied - to dictionary. dictionary must have enough space, where 32768 bytes is - always enough. If inflateGetDictionary() is called with dictionary equal to - Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similarly, if dictLength is Z_NULL, then it is not set. - - inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the - stream state is inconsistent. -*/ -//go:linkname InflateSync C.inflateSync -func InflateSync(strm ZStreamp) c.Int - -/* - Skips invalid compressed data until a possible full flush point (see above - for the description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. - - inflateSync searches for a 00 00 FF FF pattern in the compressed data. - All full flush points have this pattern, but not all occurrences of this - pattern are full flush points. - - inflateSync returns Z_OK if a possible full flush point has been found, - Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point - has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. - In the success case, the application may save the current value of total_in - which indicates where valid compressed data was found. In the error case, - the application may repeatedly call inflateSync, providing more input each - time, until success or end of the input data. -*/ -//go:linkname InflateCopy C.inflateCopy -func InflateCopy(dest ZStreamp, source ZStreamp) c.Int - -/* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when randomly accessing a large stream. The - first pass through the stream can periodically record the inflate state, - allowing restarting inflate at those points when randomly accessing the - stream. - - inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being Z_NULL). msg is left unchanged in both source and - destination. -*/ -//go:linkname InflateReset C.inflateReset -func InflateReset(strm ZStreamp) c.Int - -/* - This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate the internal decompression state. The - stream will keep attributes that may have been set by inflateInit2. - total_in, total_out, adler, and msg are initialized. - - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL). -*/ -//go:linkname InflateReset2 C.inflateReset2 -func InflateReset2(strm ZStreamp, windowBits c.Int) c.Int - -/* - This function is the same as inflateReset, but it also permits changing - the wrap and window size requests. The windowBits parameter is interpreted - the same as it is for inflateInit2. If the window size is changed, then the - memory allocated for the window is freed, and the window will be reallocated - by inflate() if needed. - - inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being Z_NULL), or if - the windowBits parameter is invalid. -*/ -//go:linkname InflatePrime C.inflatePrime -func InflatePrime(strm ZStreamp, bits c.Int, value c.Int) c.Int - -/* - This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. - - If bits is negative, then the input stream bit buffer is emptied. Then - inflatePrime() can be called again to put bits in the buffer. This is used - to clear out bits leftover after feeding inflate a block description prior - to feeding inflate codes. - - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ -//go:linkname InflateMark C.inflateMark -func InflateMark(strm ZStreamp) c.Long - -/* - This function returns two values, one in the lower 16 bits of the return - value, and the other in the remaining upper bits, obtained by shifting the - return value down 16 bits. If the upper value is -1 and the lower value is - zero, then inflate() is currently decoding information outside of a block. - If the upper value is -1 and the lower value is non-zero, then inflate is in - the middle of a stored block, with the lower value equaling the number of - bytes from the input remaining to copy. If the upper value is not -1, then - it is the number of bits back from the current bit position in the input of - the code (literal or length/distance pair) currently being processed. In - that case the lower value is the number of bytes already emitted for that - code. - - A code is being processed if inflate is waiting for more input to complete - decoding of the code, or if it has completed decoding but is waiting for - more output space to write the literal or match data. - - inflateMark() is used to mark locations in the input data for random - access, which may be at bit positions, and to note those cases where the - output of a code may span boundaries of random access blocks. The current - location in the input stream can be determined from avail_in and data_type - as noted in the description for the Z_BLOCK flush parameter for inflate. - - inflateMark returns the value noted above, or -65536 if the provided - source stream state was inconsistent. -*/ -//go:linkname InflateGetHeader C.inflateGetHeader -func InflateGetHeader(strm ZStreamp, head GzHeaderp) c.Int - -// llgo:type C -type InFunc func(c.Pointer, **c.Char) c.Uint - -// llgo:type C -type OutFunc func(c.Pointer, *c.Char, c.Uint) c.Int - -//go:linkname InflateBack C.inflateBack -func InflateBack(strm ZStreamp, in InFunc, in_desc c.Pointer, out OutFunc, out_desc c.Pointer) c.Int - -/* - inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is potentially more efficient than - inflate() for file i/o applications, in that it avoids copying between the - output and the sliding window by simply making the window itself the output - buffer. inflate() can be faster on modern CPUs when used with large - buffers. inflateBack() trusts the application to not change the output - buffer passed by the output function, at least until inflateBack() returns. - - inflateBackInit() must be called first to allocate the internal state - and to initialize the state with the user-provided window buffer. - inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free the - allocated state. - - A raw deflate stream is one with no zlib or gzip header or trailer. - This routine would normally be used in a utility that reads zip or gzip - files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects only - the raw deflate stream to decompress. This is different from the default - behavior of inflate(), which expects a zlib header and trailer around the - deflate stream. - - inflateBack() uses two subroutines supplied by the caller that are then - called by inflateBack() for input and output. inflateBack() calls those - routines until it reads a complete deflate stream and writes out all of the - uncompressed data, or until it encounters an error. The function's - parameters and return types are defined above in the in_func and out_func - typedefs. inflateBack() will call in(in_desc, &buf) which should return the - number of bytes of provided input, and a pointer to that input in buf. If - there is no input available, in() must return zero -- buf is ignored in that - case -- and inflateBack() will return a buffer error. inflateBack() will - call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. - out() should return zero on success, or non-zero on failure. If out() - returns non-zero, inflateBack() will return with an error. Neither in() nor - out() are permitted to change the contents of the window provided to - inflateBackInit(), which is also the buffer that out() uses to write from. - The length written by out() will be at most the window size. Any non-zero - amount of input may be provided by in(). - - For convenience, inflateBack() can be provided input on the first call by - setting strm->next_in and strm->avail_in. If that input is exhausted, then - in() will be called. Therefore strm->next_in must be initialized before - calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called - immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in - must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. - - The in_desc and out_desc parameters of inflateBack() is passed as the - first parameter of in() and out() respectively when they are called. These - descriptors can be optionally used to pass any information that the caller- - supplied in() and out() functions need to do their job. - - On return, inflateBack() will set strm->next_in and strm->avail_in to - pass back any unused input that was provided by the last in() call. The - return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format error - in the deflate stream (in which case strm->msg is set to indicate the nature - of the error), or Z_STREAM_ERROR if the stream was not properly initialized. - In the case of Z_BUF_ERROR, an input or output error can be distinguished - using strm->next_in which will be Z_NULL only if in() returned an error. If - strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning - non-zero. (in() will always be called before out(), so strm->next_in is - assured to be defined if out() returns non-zero.) Note that inflateBack() - cannot return Z_OK. -*/ -//go:linkname InflateBackEnd C.inflateBackEnd -func InflateBackEnd(strm ZStreamp) c.Int - -/* - All memory allocated by inflateBackInit() is freed. - - inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream - state was inconsistent. -*/ -//go:linkname CompileFlags C.zlibCompileFlags -func CompileFlags() ULong - -/* - The following utility functions are implemented on top of the basic - stream-oriented functions. To simplify the interface, some default options - are assumed (compression level and memory usage, standard memory allocation - functions). The source code of these utility functions can be modified if - you need special options. -*/ -//go:linkname Compress C.compress -func Compress(dest *Bytef, destLen *ULongf, source *Bytef, sourceLen ULong) c.Int - -/* - Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed data. compress() is equivalent to compress2() with a level - parameter of Z_DEFAULT_COMPRESSION. - - compress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer. -*/ -//go:linkname Compress2 C.compress2 -func Compress2(dest *Bytef, destLen *ULongf, source *Bytef, sourceLen ULong, level c.Int) c.Int - -/* - Compresses the source buffer into the destination buffer. The level - parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the - destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed data. - - compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_BUF_ERROR if there was not enough room in the output buffer, - Z_STREAM_ERROR if the level parameter is invalid. -*/ -//go:linkname CompressBound C.compressBound -func CompressBound(sourceLen ULong) ULong - -/* - compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before a - compress() or compress2() call to allocate the destination buffer. -*/ -//go:linkname Uncompress C.uncompress -func Uncompress(dest *Bytef, destLen *ULongf, source *Bytef, sourceLen ULong) c.Int - -/* - Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total size - of the destination buffer, which must be large enough to hold the entire - uncompressed data. (The size of the uncompressed data must have been saved - previously by the compressor and transmitted to the decompressor by some - mechanism outside the scope of this compression library.) Upon exit, destLen - is the actual size of the uncompressed data. - - uncompress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In - the case where there is not enough room, uncompress() will fill the output - buffer with the uncompressed data up to that point. -*/ -//go:linkname Uncompress2 C.uncompress2 -func Uncompress2(dest *Bytef, destLen *ULongf, source *Bytef, sourceLen *ULong) c.Int - -type GzFileS struct { - Have c.Uint - Next *c.Char - Pos os.OffT -} -type GzFile *GzFileS - -/* -ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode); - - Open the gzip (.gz) file at path for reading and decompressing, or - compressing and writing. The mode parameter is as in fopen ("rb" or "wb") - but can also include a compression level ("wb9") or a strategy: 'f' for - filtered data as in "wb6f", 'h' for Huffman-only compression as in "wb1h", - 'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression - as in "wb9F". (See the description of deflateInit2 for more information - about the strategy parameter.) 'T' will request transparent writing or - appending with no compression and not using the gzip format. - - "a" can be used instead of "w" to request that the gzip stream that will - be written be appended to the file. "+" will result in an error, since - reading and writing to the same gzip file is not supported. The addition of - "x" when writing will create the file exclusively, which fails if the file - already exists. On systems that support it, the addition of "e" when - reading or writing will set the flag to close the file on an execve() call. - - These functions, as well as gzip, will read and decode a sequence of gzip - streams in a file. The append function of gzopen() can be used to create - such a file. (Also see gzflush() for another way to do this.) When - appending, gzopen does not test whether the file begins with a gzip stream, - nor does it look for the end of the gzip streams to begin appending. gzopen - will simply append a gzip stream to the existing file. - - gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. When - reading, this will be detected automatically by looking for the magic two- - byte gzip header. - - gzopen returns NULL if the file could not be opened, if there was - insufficient memory to allocate the gzFile state, or if an invalid mode was - specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). - errno can be checked to determine if the reason gzopen failed was that the - file could not be opened. -*/ -//go:linkname Gzdopen C.gzdopen -func Gzdopen(fd c.Int, mode *c.Char) GzFile - -/* - Associate a gzFile with the file descriptor fd. File descriptors are - obtained from calls like open, dup, creat, pipe or fileno (if the file has - been previously opened with fopen). The mode parameter is as in gzopen. - - The next call of gzclose on the returned gzFile will also close the file - descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor - fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, - mode);. The duplicated descriptor should be saved to avoid a leak, since - gzdopen does not close fd if it fails. If you are using fileno() to get the - file descriptor from a FILE *, then you will have to use dup() to avoid - double-close()ing the file descriptor. Both gzclose() and fclose() will - close the associated file descriptor, so they need to have different file - descriptors. - - gzdopen returns NULL if there was insufficient memory to allocate the - gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not - provided, or '+' was provided), or if fd is -1. The file descriptor is not - used until the next gz* read, write, seek, or close operation, so gzdopen - will not detect if fd is invalid (unless fd is -1). -*/ -//go:linkname Gzbuffer C.gzbuffer -func Gzbuffer(file GzFile, size c.Uint) c.Int - -/* - Set the internal buffer size used by this library's functions for file to - size. The default buffer size is 8192 bytes. This function must be called - after gzopen() or gzdopen(), and before any other calls that read or write - the file. The buffer memory allocation is always deferred to the first read - or write. Three times that size in buffer space is allocated. A larger - buffer size of, for example, 64K or 128K bytes will noticeably increase the - speed of decompression (reading). - - The new buffer size also affects the maximum length for gzprintf(). - - gzbuffer() returns 0 on success, or -1 on failure, such as being called - too late. -*/ -//go:linkname Gzsetparams C.gzsetparams -func Gzsetparams(file GzFile, level c.Int, strategy c.Int) c.Int - -/* - Dynamically update the compression level and strategy for file. See the - description of deflateInit2 for the meaning of these parameters. Previously - provided data is flushed before applying the parameter changes. - - gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not - opened for writing, Z_ERRNO if there is an error writing the flushed data, - or Z_MEM_ERROR if there is a memory allocation error. -*/ -//go:linkname Gzread C.gzread -func Gzread(file GzFile, buf Voidp, len c.Uint) c.Int - -/* - Read and decompress up to len uncompressed bytes from file into buf. If - the input file is not in gzip format, gzread copies the given number of - bytes into the buffer directly from the file. - - After reaching the end of a gzip stream in the input, gzread will continue - to read, looking for another gzip stream. Any number of gzip streams may be - concatenated in the input file, and will all be decompressed by gzread(). - If something other than a gzip stream is encountered after a gzip stream, - that remaining trailing garbage is ignored (and no error is returned). - - gzread can be used to read a gzip file that is being concurrently written. - Upon reaching the end of the input, gzread will return with the available - data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then - gzclearerr can be used to clear the end of file indicator in order to permit - gzread to be tried again. Z_OK indicates that a gzip stream was completed - on the last gzread. Z_BUF_ERROR indicates that the input file ended in the - middle of a gzip stream. Note that gzread does not return -1 in the event - of an incomplete gzip stream. This error is deferred until gzclose(), which - will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip - stream. Alternatively, gzerror can be used before gzclose to detect this - case. - - gzread returns the number of uncompressed bytes actually read, less than - len for end of file, or -1 for error. If len is too large to fit in an int, - then nothing is read, -1 is returned, and the error state is set to - Z_STREAM_ERROR. -*/ -//go:linkname Gzfread C.gzfread -func Gzfread(buf Voidp, size ZSizeT, nitems ZSizeT, file GzFile) ZSizeT - -/* - Read and decompress up to nitems items of size size from file into buf, - otherwise operating as gzread() does. This duplicates the interface of - stdio's fread(), with size_t request and return types. If the library - defines size_t, then z_size_t is identical to size_t. If not, then z_size_t - is an unsigned integer type that can contain a pointer. - - gzfread() returns the number of full items read of size size, or zero if - the end of the file was reached and a full item could not be read, or if - there was an error. gzerror() must be consulted if zero is returned in - order to determine if there was an error. If the multiplication of size and - nitems overflows, i.e. the product does not fit in a z_size_t, then nothing - is read, zero is returned, and the error state is set to Z_STREAM_ERROR. - - In the event that the end of file is reached and only a partial item is - available at the end, i.e. the remaining uncompressed data length is not a - multiple of size, then the final partial item is nevertheless read into buf - and the end-of-file flag is set. The length of the partial item read is not - provided, but could be inferred from the result of gztell(). This behavior - is the same as the behavior of fread() implementations in common libraries, - but it prevents the direct use of gzfread() to read a concurrently written - file, resetting and retrying on end-of-file, when size is not 1. -*/ -//go:linkname Gzwrite C.gzwrite -func Gzwrite(file GzFile, buf Voidpc, len c.Uint) c.Int - -/* - Compress and write the len uncompressed bytes at buf to file. gzwrite - returns the number of uncompressed bytes written or 0 in case of error. -*/ -//go:linkname Gzfwrite C.gzfwrite -func Gzfwrite(buf Voidpc, size ZSizeT, nitems ZSizeT, file GzFile) ZSizeT - -/* - Compress and write nitems items of size size from buf to file, duplicating - the interface of stdio's fwrite(), with size_t request and return types. If - the library defines size_t, then z_size_t is identical to size_t. If not, - then z_size_t is an unsigned integer type that can contain a pointer. - - gzfwrite() returns the number of full items written of size size, or zero - if there was an error. If the multiplication of size and nitems overflows, - i.e. the product does not fit in a z_size_t, then nothing is written, zero - is returned, and the error state is set to Z_STREAM_ERROR. -*/ -//go:linkname Gzprintf C.gzprintf -func Gzprintf(file GzFile, format *c.Char, __llgo_va_list ...interface{}) c.Int - -/* - Convert, format, compress, and write the arguments (...) to file under - control of the string format, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written, or a negative zlib error code in case - of error. The number of uncompressed bytes written is limited to 8191, or - one less than the buffer size given to gzbuffer(). The caller should assure - that this limit is not exceeded. If it is exceeded, then gzprintf() will - return an error (0) with nothing written. In this case, there may also be a - buffer overflow with unpredictable consequences, which is possible only if - zlib was compiled with the insecure functions sprintf() or vsprintf(), - because the secure snprintf() or vsnprintf() functions were not available. - This can be determined using zlibCompileFlags(). -*/ -//go:linkname Gzputs C.gzputs -func Gzputs(file GzFile, s *c.Char) c.Int - -/* - Compress and write the given null-terminated string s to file, excluding - the terminating null character. - - gzputs returns the number of characters written, or -1 in case of error. -*/ -//go:linkname Gzgets C.gzgets -func Gzgets(file GzFile, buf *c.Char, len c.Int) *c.Char - -/* - Read and decompress bytes from file into buf, until len-1 characters are - read, or until a newline character is read and transferred to buf, or an - end-of-file condition is encountered. If any characters are read or if len - is one, the string is terminated with a null character. If no characters - are read due to an end-of-file or len is less than one, then the buffer is - left untouched. - - gzgets returns buf which is a null-terminated string, or it returns NULL - for end-of-file or in case of error. If there was an error, the contents at - buf are indeterminate. -*/ -//go:linkname Gzputc C.gzputc -func Gzputc(file GzFile, c c.Int) c.Int - -/* - Compress and write c, converted to an unsigned char, into file. gzputc - returns the value that was written, or -1 in case of error. -*/ -//go:linkname Gzgetc C.gzgetc -func Gzgetc(file GzFile) c.Int - -/* - Read and decompress one byte from file. gzgetc returns this byte or -1 - in case of end of file or error. This is implemented as a macro for speed. - As such, it does not do all of the checking the other functions do. I.e. - it does not check to see if file is NULL, nor whether the structure file - points to has been clobbered or not. -*/ -//go:linkname Gzungetc C.gzungetc -func Gzungetc(c c.Int, file GzFile) c.Int - -/* - Push c back onto the stream for file to be read as the first character on - the next read. At least one character of push-back is always allowed. - gzungetc() returns the character pushed, or -1 on failure. gzungetc() will - fail if c is -1, and may fail if a character has been pushed but not read - yet. If gzungetc is used immediately after gzopen or gzdopen, at least the - output buffer size of pushed characters is allowed. (See gzbuffer above.) - The pushed character will be discarded if the stream is repositioned with - gzseek() or gzrewind(). -*/ -//go:linkname Gzflush C.gzflush -func Gzflush(file GzFile, flush c.Int) c.Int - -/* -ZEXTERN z_off_t ZEXPORT gzseek(gzFile file, - z_off_t offset, int whence); - - Set the starting position to offset relative to whence for the next gzread - or gzwrite on file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); - the value SEEK_END is not supported. - - If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are - supported; gzseek then compresses a sequence of zeroes up to the new - starting position. - - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error, in - particular if the file is opened for writing and the new starting position - would be before the current position. -*/ -//go:linkname Gzrewind C.gzrewind -func Gzrewind(file GzFile) c.Int - -/* -ZEXTERN z_off_t ZEXPORT gzoffset(gzFile file); - - Return the current compressed (actual) read or write offset of file. This - offset includes the count of bytes that precede the gzip stream, for example - when appending or when using gzdopen() for reading. When reading, the - offset does not include as yet unused buffered input. This information can - be used for a progress indicator. On error, gzoffset() returns -1. -*/ -//go:linkname Gzeof C.gzeof -func Gzeof(file GzFile) c.Int - -/* - Return true (1) if the end-of-file indicator for file has been set while - reading, false (0) otherwise. Note that the end-of-file indicator is set - only if the read tried to go past the end of the input, but came up short. - Therefore, just like feof(), gzeof() may return false even if there is no - more data to read, in the event that the last read request was for the exact - number of bytes remaining in the input file. This will happen if the input - file size is an exact multiple of the buffer size. - - If gzeof() returns true, then the read functions will return no more data, - unless the end-of-file indicator is reset by gzclearerr() and the input file - has grown since the previous end of file was detected. -*/ -//go:linkname Gzdirect C.gzdirect -func Gzdirect(file GzFile) c.Int - -/* - Return true (1) if file is being copied directly while reading, or false - (0) if file is a gzip stream being decompressed. - - If the input file is empty, gzdirect() will return true, since the input - does not contain a gzip stream. - - If gzdirect() is used immediately after gzopen() or gzdopen() it will - cause buffers to be allocated to allow reading the file to determine if it - is a gzip file. Therefore if gzbuffer() is used, it should be called before - gzdirect(). - - When writing, gzdirect() returns true (1) if transparent writing was - requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: - gzdirect() is not needed when writing. Transparent writing must be - explicitly requested, so the application already knows the answer. When - linking statically, using gzdirect() will include all of the zlib code for - gzip file reading and decompression, which may not be desired.) -*/ -//go:linkname Gzclose C.gzclose -func Gzclose(file GzFile) c.Int - -/* - Flush all pending output for file, if necessary, close file and - deallocate the (de)compression state. Note that once file is closed, you - cannot call gzerror with file, since its structures have been deallocated. - gzclose must not be called more than once on the same file, just as free - must not be called more than once on the same allocation. - - gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a - file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the - last read ended in the middle of a gzip stream, or Z_OK on success. -*/ -//go:linkname GzcloseR C.gzclose_r -func GzcloseR(file GzFile) c.Int - -//go:linkname GzcloseW C.gzclose_w -func GzcloseW(file GzFile) c.Int - -/* - Same as gzclose(), but gzclose_r() is only for use when reading, and - gzclose_w() is only for use when writing or appending. The advantage to - using these instead of gzclose() is that they avoid linking in zlib - compression or decompression code that is not used when only reading or only - writing respectively. If gzclose() is used, then both compression and - decompression code will be included the application when linking to a static - zlib library. -*/ -//go:linkname Gzerror C.gzerror -func Gzerror(file GzFile, errnum *c.Int) *c.Char - -/* - Return the error message for the last error which occurred on file. - errnum is set to zlib error number. If an error occurred in the file system - and not in the compression library, errnum is set to Z_ERRNO and the - application may consult errno to get the exact error code. - - The application must not modify the returned string. Future calls to - this function may invalidate the previously returned string. If file is - closed, then the string previously returned by gzerror will no longer be - available. - - gzerror() should be used to distinguish errors from end-of-file for those - functions above that do not distinguish those cases in their return values. -*/ -//go:linkname Gzclearerr C.gzclearerr -func Gzclearerr(file GzFile) - -/* - These functions are not related to compression but are exported - anyway because they might be useful in applications using the compression - library. -*/ -// llgo:link ULong.Adler32 C.adler32 -func (recv_ ULong) Adler32(buf *Bytef, len UInt) ULong { - return 0 -} - -/* - Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. An Adler-32 value is in the range of a 32-bit - unsigned integer. If buf is Z_NULL, this function returns the required - initial value for the checksum. - - An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed - much faster. - - Usage example: - - uLong adler = adler32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); - } - if (adler != original_adler) error(); -*/ -// llgo:link ULong.Adler32Z C.adler32_z -func (recv_ ULong) Adler32Z(buf *Bytef, len ZSizeT) ULong { - return 0 -} - -/* -ZEXTERN uLong ZEXPORT adler32_combine(uLong adler1, uLong adler2, - z_off_t len2); - - Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 - and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for - each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note - that the z_off_t type (like off_t) is a signed integer. If len2 is - negative, the result has no meaning or utility. -*/ -// llgo:link ULong.Crc32 C.crc32 -func (recv_ ULong) Crc32(buf *Bytef, len UInt) ULong { - return 0 -} - -/* - Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. A CRC-32 value is in the range of a 32-bit unsigned integer. - If buf is Z_NULL, this function returns the required initial value for the - crc. Pre- and post-conditioning (one's complement) is performed within this - function so it shouldn't be done by the application. - - Usage example: - - uLong crc = crc32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - crc = crc32(crc, buffer, length); - } - if (crc != original_crc) error(); -*/ -// llgo:link ULong.Crc32Z C.crc32_z -func (recv_ ULong) Crc32Z(buf *Bytef, len ZSizeT) ULong { - return 0 -} - -/* -ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t len2); - - Return the operator corresponding to length len2, to be used with - crc32_combine_op(). len2 must be non-negative. -*/ -// llgo:link ULong.Crc32CombineOp C.crc32_combine_op -func (recv_ ULong) Crc32CombineOp(crc2 ULong, op ULong) ULong { - return 0 -} - -/* deflateInit and inflateInit are macros to allow checking the zlib version - * and the compiler's view of z_stream: - */ -//go:linkname DeflateInit_ C.deflateInit_ -func DeflateInit_(strm ZStreamp, level c.Int, version *c.Char, stream_size c.Int) c.Int - -//go:linkname InflateInit_ C.inflateInit_ -func InflateInit_(strm ZStreamp, version *c.Char, stream_size c.Int) c.Int - -//go:linkname DeflateInit2_ C.deflateInit2_ -func DeflateInit2_(strm ZStreamp, level c.Int, method c.Int, windowBits c.Int, memLevel c.Int, strategy c.Int, version *c.Char, stream_size c.Int) c.Int - -//go:linkname InflateInit2_ C.inflateInit2_ -func InflateInit2_(strm ZStreamp, windowBits c.Int, version *c.Char, stream_size c.Int) c.Int - -//go:linkname InflateBackInit_ C.inflateBackInit_ -func InflateBackInit_(strm ZStreamp, windowBits c.Int, window *c.Char, version *c.Char, stream_size c.Int) c.Int - -//go:linkname Gzgetc_ C.gzgetc_ -func Gzgetc_(file GzFile) c.Int - -//go:linkname Gzopen C.gzopen -func Gzopen(*c.Char, *c.Char) GzFile - -//go:linkname Gzseek C.gzseek -func Gzseek(GzFile, os.OffT, c.Int) os.OffT - -//go:linkname Gztell C.gztell -func Gztell(GzFile) os.OffT - -//go:linkname Gzoffset C.gzoffset -func Gzoffset(GzFile) os.OffT - -// llgo:link ULong.Adler32Combine C.adler32_combine -func (recv_ ULong) Adler32Combine(ULong, os.OffT) ULong { - return 0 -} - -// llgo:link ULong.Crc32Combine C.crc32_combine -func (recv_ ULong) Crc32Combine(ULong, os.OffT) ULong { - return 0 -} - -//go:linkname Crc32CombineGen C.crc32_combine_gen -func Crc32CombineGen(os.OffT) ULong - -/* undocumented functions */ -//go:linkname ZError C.zError -func ZError(c.Int) *c.Char - -//go:linkname InflateSyncPoint C.inflateSyncPoint -func InflateSyncPoint(ZStreamp) c.Int - -//go:linkname GetCrcTable C.get_crc_table -func GetCrcTable() *ZCrcT - -//go:linkname InflateUndermine C.inflateUndermine -func InflateUndermine(ZStreamp, c.Int) c.Int - -//go:linkname InflateValidate C.inflateValidate -func InflateValidate(ZStreamp, c.Int) c.Int - -//go:linkname InflateCodesUsed C.inflateCodesUsed -func InflateCodesUsed(ZStreamp) c.Ulong - -//go:linkname InflateResetKeep C.inflateResetKeep -func InflateResetKeep(ZStreamp) c.Int - -//go:linkname DeflateResetKeep C.deflateResetKeep -func DeflateResetKeep(ZStreamp) c.Int - -//go:linkname Gzvprintf C.gzvprintf -func Gzvprintf(file GzFile, format *c.Char, va c.VaList) c.Int diff --git a/zlib/zlib_autogen_link.go b/zlib/zlib_autogen_link.go deleted file mode 100644 index d74d3ea0..00000000 --- a/zlib/zlib_autogen_link.go +++ /dev/null @@ -1,8 +0,0 @@ -package zlib - -import ( - _ "github.com/goplus/lib/c" - _ "github.com/goplus/lib/c/os" -) - -const LLGoPackage string = "link: $(pkg-config --libs zlib);"