diff --git a/.github/workflows/positron-python-ci.yml b/.github/workflows/positron-python-ci.yml new file mode 100644 index 00000000000..498137d59a6 --- /dev/null +++ b/.github/workflows/positron-python-ci.yml @@ -0,0 +1,441 @@ +name: 'Positron Python CI' + +on: + push: + branches: + - main + paths: + - 'extensions/positron-python/**' + pull_request: + branches: + - main + paths: + - 'extensions/positron-python/**' + + +defaults: + run: + working-directory: 'extensions/positron-python' + +env: + NODE_VERSION: '18.17.1' + PYTHON_VERSION: '3.10' + PROJECT_DIR: 'extensions/positron-python' + PYTHON_SRC_DIR: 'extensions/positron-python/pythonFiles' + # Force a path with spaces and to test extension works in these scenarios + # Unicode characters are causing 2.7 failures so skip that for now. + special-working-directory: './path with spaces' + special-working-directory-relative: 'path with spaces' + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Node ${{ env.NODE_VERSION }} + uses: actions/setup-node@v3 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'yarn' + + - name: Install Yarn + run: npm install -g yarn + + - name: Install Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' + + - name: Install Node dependencies + run: yarn install --immutable --network-timeout 120000 --prefer-offline + + - name: Run `gulp prePublishNonBundle` + run: yarn prePublish + + - name: Check dependencies + run: yarn checkDependencies + + - name: Lint TypeScript code + run: yarn lint + + - name: Check TypeScript format + run: yarn format-check + + - name: Check Python format + run: | + python -m pip install -U black + python -m black . --check + working-directory: ${{ env.PYTHON_SRC_DIR }} + + - name: Run Ruff + run: | + python -m pip install -U ruff + python -m ruff check . + working-directory: ${{ env.PYTHON_SRC_DIR }} + + check-types: + name: Check Python types + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Use Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' + + - name: Install base Python requirements + run: 'python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/python --no-cache-dir --implementation py -r requirements.txt' + + - name: Install Positron IPyKernel requirements + run: python scripts/vendor.py + + - name: Install other Python requirements + run: | + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy + python -m pip install --upgrade -r build/test-requirements.txt + python -m pip install --upgrade -r ./pythonFiles/positron/pinned-test-requirements.txt + + - name: Run Pyright + uses: jakebailey/pyright-action@v2 + with: + version: 1.1.308 + working-directory: ${{ env.PYTHON_SRC_DIR }} + + python-tests: + name: Python Tests + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }}/${{ env.PROJECT_DIR}} + strategy: + fail-fast: false + matrix: + # TODO: Decide whether we want to match upstream matrix. + # # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # # macOS runners are expensive, and we assume that Ubuntu is enough to cover the Unix case. + # os: [ubuntu-latest, windows-latest] + # # Run the tests on the oldest and most recent versions of Python. + # python: ['3.8', '3.x'] # run for 3 pytest versions, most recent stable, oldest version supported and pre-release + # pytest-version: ['pytest', 'pytest@pre-release', 'pytest==6.2.0'] + include: + - os: 'ubuntu-latest' + python: '3.8' + pytest-version: 'pytest<8.1.1' + - os: 'macos-latest' + python: '3.9' + pytest-version: 'pytest<8.1.1' + - os: 'windows-latest' + python: '3.10' + pytest-version: 'pytest<8.1.1' + - os: 'ubuntu-latest' + python: '3.11' + pytest-version: 'pytest<8.1.1' + - os: 'ubuntu-latest' + python: '3.12' + pytest-version: 'pytest<8.1.1' + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install specific pytest version + if: matrix.pytest-version == 'pytest@pre-release' + run: | + python -m pip install --pre pytest + + - name: Install specific pytest version + if: matrix.pytest-version != 'pytest@pre-release' + run: | + python -m pip install "${{ matrix.pytest-version }}" + + - name: Install specific pytest version + run: python -m pytest --version + + - name: Install base Python requirements + run: 'python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/python --no-cache-dir --implementation py -r requirements.txt' + + - name: Install test requirements + run: python -m pip install -r build/test-requirements.txt + + - name: Run Python unit tests + run: python pythonFiles/tests/run_all.py + + positron-ipykernel-tests: + name: Test Positron IPyKernel + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: 'ubuntu-latest' + python: '3.8' + time-elapsed: '' + - os: 'macos-latest' + python: '3.9' + time-elapsed: '' + - os: 'windows-latest' + python: '3.10' + time-elapsed: '' + - os: 'ubuntu-latest' + python: '3.11' + time-elapsed: '' + - os: 'ubuntu-latest' + python: '3.12' + time-elapsed: '' + - os: 'ubuntu-latest' + python: '3.10' + time-elapsed: '3 months' + - os: 'ubuntu-latest' + python: '3.10' + time-elapsed: '6 months' + - os: 'ubuntu-latest' + python: '3.10' + time-elapsed: '9 months' + - os: 'ubuntu-latest' + python: '3.10' + time-elapsed: '1 year' + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + cache: 'pip' + + - name: Install Positron IPyKernel requirements + run: python scripts/vendor.py + + - name: Install Positron IPyKernel test requirements + run: python -m pip install --prefer-binary --upgrade -r pythonFiles/positron/pinned-test-requirements.txt + + - name: Get date for older dependencies + if: ${{ matrix.time-elapsed != '' }} + run: | + echo "SNAPSHOT_DATE=$(date -d '-${{ matrix.time-elapsed }}' --iso-8601)" >> $GITHUB_ENV + + - name: Get older PyPI snapshot + if: ${{ matrix.time-elapsed != '' }} + run: | + python -m pip config set global.index-url https://packagemanager.posit.co/pypi/${{ env.SNAPSHOT_DATE }}/simple + python -m pip config set global.trusted-host packagemanager.posit.co + python -m pip install --prefer-binary --force-reinstall -r pythonFiles/positron/data-science-requirements.txt + + - name: Run Positron IPyKernel unit tests + run: pytest pythonFiles/positron + + typescript-tests: + name: Test TypeScript + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }}/${{ env.PROJECT_DIR}} + strategy: + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python: ['3.x'] + test-suite: [ts-unit, venv, single-workspace, debugger, functional, smoke] + # TODO: Add integration tests on windows and ubuntu. This requires updating + # src/test/positron/testElectron.ts to support installing Positron on these platforms. + exclude: + - os: windows-latest + test-suite: venv + - os: windows-latest + test-suite: debugger + - os: windows-latest + test-suite: single-workspace + - os: windows-latest + test-suite: smoke + - os: ubuntu-latest + test-suite: venv + - os: ubuntu-latest + test-suite: debugger + - os: ubuntu-latest + test-suite: single-workspace + - os: ubuntu-latest + test-suite: smoke + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Install Node ${{ env.NODE_VERSION }} + uses: actions/setup-node@v3 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'yarn' + cache-dependency-path: ${{ env.special-working-directory-relative }}/${{ env.PROJECT_DIR }}/yarn.lock + + - name: Install Yarn + run: npm install -g yarn + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + cache: 'pip' + + - name: Install Node dependencies + run: yarn install --immutable --network-timeout 120000 --prefer-offline + + - name: Run `gulp prePublishNonBundle` + run: yarn prePublish + + - name: Localization + run: npx @vscode/l10n-dev@latest export ./src + + - name: Install test requirements + run: python -m pip install --upgrade -r ./build/test-requirements.txt + + - name: Install functional test requirements + run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt + if: matrix.test-suite == 'functional' + + - name: Prepare pipenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install pipenv + python -m pipenv run python ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} pipenvPath + + - name: Prepare poetry for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install poetry + Move-Item -Path ".\build\ci\pyproject.toml" -Destination . + poetry env use python + + - name: Prepare virtualenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install virtualenv + python -m virtualenv .virtualenv/ + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".virtualenv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } else { + & ".virtualenv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } + + - name: Prepare venv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' && startsWith(matrix.python, 3.) + run: | + python -m venv .venv + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".venv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } else { + & ".venv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } + + - name: Prepare conda for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + # 1. For `*.testvirtualenvs.test.ts` + if ('${{ matrix.os }}' -match 'windows-latest') { + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath python.exe + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath Scripts | Join-Path -ChildPath conda + } else{ + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath python + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath conda + } + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaExecPath $condaExecPath + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaPath + & $condaExecPath init --all + + - name: Prepare VSIX for smoke tests + run: yarn package --allow-star-activation + if: matrix.test-suite == 'smoke' + + - name: Set CI_PYTHON_PATH and CI_DISABLE_AUTO_SELECTION + run: | + echo "CI_PYTHON_PATH=$(which python)" >> $GITHUB_ENV + echo "CI_DISABLE_AUTO_SELECTION=1" >> $GITHUB_ENV + shell: bash + if: matrix.test-suite != 'ts-unit' + + # Run TypeScript unit tests only for Python 3.X. + - name: Run TypeScript unit tests + run: yarn test:unittests + if: matrix.test-suite == 'ts-unit' && startsWith(matrix.python, 3.) + + # The virtual environment based tests use the `testSingleWorkspace` set of tests + # with the environment variable `TEST_FILES_SUFFIX` set to `testvirtualenvs`, + # which is set in the "Prepare environment for venv tests" step. + # We also use a third-party GitHub Action to install xvfb on Linux, + # run tests and then clean up the process once the tests ran. + # See https://github.com/GabrielBB/xvfb-action + - name: Run venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + CI_PYTHON_VERSION: ${{ matrix.python }} + POSITRON_GITHUB_PAT: ${{ secrets.POSITRON_GITHUB_PAT }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: yarn testSingleWorkspace + working-directory: ${{ env.special-working-directory }}/${{ env.PROJECT_DIR }} + if: matrix.test-suite == 'venv' + + - name: Run single-workspace tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + POSITRON_GITHUB_PAT: ${{ secrets.POSITRON_GITHUB_PAT }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: yarn testSingleWorkspace + working-directory: ${{ env.special-working-directory }}/${{ env.PROJECT_DIR }} + if: matrix.test-suite == 'single-workspace' + + - name: Run debugger tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + POSITRON_GITHUB_PAT: ${{ secrets.POSITRON_GITHUB_PAT }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: yarn testDebugger + working-directory: ${{ env.special-working-directory }}/${{ env.PROJECT_DIR }} + if: matrix.test-suite == 'debugger' + + - name: Run TypeScript functional tests + run: yarn test:functional + if: matrix.test-suite == 'functional' + + - name: Run smoke tests + env: + POSITRON_GITHUB_PAT: ${{ secrets.POSITRON_GITHUB_PAT }} + run: yarn tsc && node ./out/test/smokeTest.js + if: matrix.test-suite == 'smoke' diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index d8d43a72326..00000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "extensions/positron-python"] - path = extensions/positron-python - url = git@github.com:posit-dev/positron-python.git diff --git a/.vscode/settings.json b/.vscode/settings.json index d31c1786e5b..0f62a0752e2 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -122,6 +122,9 @@ "editor.defaultFormatter": "rust-lang.rust-analyzer", "editor.formatOnSave": true, }, + "[diff]": { + "files.trimTrailingWhitespace": false + }, "rust-analyzer.linkedProjects": [ "cli/Cargo.toml" ], diff --git a/build/npm/preinstall.js b/build/npm/preinstall.js index e1ea9dc7ebd..edf0d98c3d5 100644 --- a/build/npm/preinstall.js +++ b/build/npm/preinstall.js @@ -164,25 +164,3 @@ function getHeaderInfo(rcFile) { ? { disturl, target } : undefined; } - - -// --- Start Positron --- -console.log(`Updating positron built-in extensions...`); - -// For dev environments: if a local sync of a submodule already -// exists, "absorb" it as if it were originally added via submodule -if (fs.existsSync('extensions/positron-python/.git') && - !fs.existsSync('.git/modules/extensions/positron-python')) { - cp.execSync('git submodule absorbgitdirs extensions/positron-python'); - console.log(`Absorbed local sync of positron-python`); -} - -cp.execSync('git submodule init', {stdio: 'inherit'}); - -// For unattended builds: config with PAT -if (process.env['POSITRON_GITHUB_PAT']) { - cp.execSync(`git config submodule.extensions/positron-python.url https://${process.env['POSITRON_GITHUB_PAT']}@github.com/posit-dev/positron-python.git`, {stdio: 'inherit'}); -} -cp.execSync('git submodule update --init --recursive', {stdio: 'inherit'}); - -// --- End Positron --- diff --git a/extensions/positron-python b/extensions/positron-python deleted file mode 160000 index 2992e6ca30c..00000000000 --- a/extensions/positron-python +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 2992e6ca30c69613a9a2285050d13f0d5fbf55ca diff --git a/extensions/positron-python/.devcontainer/Dockerfile b/extensions/positron-python/.devcontainer/Dockerfile new file mode 100644 index 00000000000..3e7e9e9cf09 --- /dev/null +++ b/extensions/positron-python/.devcontainer/Dockerfile @@ -0,0 +1,18 @@ +FROM mcr.microsoft.com/devcontainers/typescript-node:18-bookworm + +RUN apt-get install -y wget bzip2 + +# Run in silent mode and save downloaded script as anaconda.sh. +# Run with /bin/bash and run in silent mode to /opt/conda. +# Also get rid of installation script after finishing. +RUN wget --quiet https://repo.anaconda.com/archive/Anaconda3-2023.07-1-Linux-x86_64.sh -O ~/anaconda.sh && \ + /bin/bash ~/anaconda.sh -b -p /opt/conda && \ + rm ~/anaconda.sh + +ENV PATH="/opt/conda/bin:$PATH" + +# Sudo apt update needs to run in order for installation of fish to work . +RUN sudo apt update && \ + sudo apt install fish -y + + diff --git a/extensions/positron-python/.devcontainer/devcontainer.json b/extensions/positron-python/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..fe15f35764e --- /dev/null +++ b/extensions/positron-python/.devcontainer/devcontainer.json @@ -0,0 +1,30 @@ +// For format details, see https://aka.ms/devcontainer.json. +{ + "name": "VS Code Python Dev Container", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "build": { + "dockerfile": "./Dockerfile", + "context": ".." + }, + "customizations": { + "vscode": { + "extensions": [ + "editorconfig.editorconfig", + "esbenp.prettier-vscode", + "dbaeumer.vscode-eslint", + "ms-python.python", + "ms-python.black-formatter", + "ms-python.vscode-pylance", + "charliermarsh.ruff" + ] + } + }, + // Commands to execute on container creation,start. + "postCreateCommand": "bash scripts/postCreateCommand.sh", + "onCreateCommand": "bash scripts/onCreateCommand.sh", + + "containerEnv": { + "CI_PYTHON_PATH": "/workspaces/vscode-python/.venv/bin/python" + } + +} diff --git a/extensions/positron-python/.editorconfig b/extensions/positron-python/.editorconfig new file mode 100644 index 00000000000..f7dad5b8251 --- /dev/null +++ b/extensions/positron-python/.editorconfig @@ -0,0 +1,22 @@ +# EditorConfig is awesome: http://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Tab indentation +[*] +indent_style = space +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true + +# The indent size used in the `package.json` file cannot be changed +# https://github.com/npm/npm/pull/3180#issuecomment-16336516 +[{.travis.yml,npm-shrinkwrap.json,package.json}] +indent_style = space +indent_size = 4 + +# --- Start Positron --- +[*.patch] +trim_trailing_whitespace = false +# --- End Positron --- diff --git a/extensions/positron-python/.eslintignore b/extensions/positron-python/.eslintignore new file mode 100644 index 00000000000..7f6bb48d6c8 --- /dev/null +++ b/extensions/positron-python/.eslintignore @@ -0,0 +1,273 @@ +pythonExtensionApi/out/ + +# The following files were grandfathered out of eslint. They can be removed as time permits. + +src/test/analysisEngineTest.ts +src/test/ciConstants.ts +src/test/common.ts +src/test/constants.ts +src/test/core.ts +src/test/extension-version.functional.test.ts +src/test/fixtures.ts +src/test/index.ts +src/test/initialize.ts +src/test/mockClasses.ts +src/test/performanceTest.ts +src/test/proc.ts +src/test/smokeTest.ts +src/test/standardTest.ts +src/test/startupTelemetry.unit.test.ts +src/test/sourceMapSupport.test.ts +src/test/sourceMapSupport.unit.test.ts +src/test/testBootstrap.ts +src/test/testLogger.ts +src/test/testRunner.ts +src/test/textUtils.ts +src/test/unittests.ts +src/test/vscode-mock.ts + +src/test/interpreters/mocks.ts +src/test/interpreters/virtualEnvs/condaInheritEnvPrompt.unit.test.ts +src/test/interpreters/pythonPathUpdaterFactory.unit.test.ts +src/test/interpreters/activation/service.unit.test.ts +src/test/interpreters/helpers.unit.test.ts +src/test/interpreters/display.unit.test.ts + +src/test/terminals/codeExecution/terminalCodeExec.unit.test.ts +src/test/terminals/codeExecution/codeExecutionManager.unit.test.ts +src/test/terminals/codeExecution/djangoShellCodeExect.unit.test.ts + +src/test/activation/activeResource.unit.test.ts +src/test/activation/extensionSurvey.unit.test.ts + +src/test/utils/fs.ts + +src/test/api.functional.test.ts + +src/test/testing/mocks.ts +src/test/testing/common/debugLauncher.unit.test.ts +src/test/testing/common/services/configSettingService.unit.test.ts + +src/test/common/exitCIAfterTestReporter.ts + + +src/test/common/terminals/activator/index.unit.test.ts +src/test/common/terminals/activator/base.unit.test.ts +src/test/common/terminals/shellDetector.unit.test.ts +src/test/common/terminals/service.unit.test.ts +src/test/common/terminals/helper.unit.test.ts +src/test/common/terminals/activation.unit.test.ts +src/test/common/terminals/shellDetectors/shellDetectors.unit.test.ts +src/test/common/terminals/environmentActivationProviders/terminalActivation.testvirtualenvs.ts + +src/test/common/socketStream.test.ts + +src/test/common/configSettings.test.ts + +src/test/common/experiments/telemetry.unit.test.ts + +src/test/common/platform/filesystem.unit.test.ts +src/test/common/platform/errors.unit.test.ts +src/test/common/platform/utils.ts +src/test/common/platform/fs-temp.unit.test.ts +src/test/common/platform/fs-temp.functional.test.ts +src/test/common/platform/filesystem.functional.test.ts +src/test/common/platform/filesystem.test.ts + +src/test/common/utils/cacheUtils.unit.test.ts +src/test/common/utils/decorators.unit.test.ts +src/test/common/utils/version.unit.test.ts + +src/test/common/configSettings/configSettings.unit.test.ts +src/test/common/serviceRegistry.unit.test.ts +src/test/common/extensions.unit.test.ts +src/test/common/variables/envVarsService.unit.test.ts +src/test/common/helpers.test.ts +src/test/common/application/commands/reloadCommand.unit.test.ts + +src/test/common/installer/channelManager.unit.test.ts +src/test/common/installer/pipInstaller.unit.test.ts +src/test/common/installer/installer.invalidPath.unit.test.ts +src/test/common/installer/pipEnvInstaller.unit.test.ts +src/test/common/installer/productPath.unit.test.ts + +src/test/common/socketCallbackHandler.test.ts + +src/test/common/process/decoder.test.ts +src/test/common/process/processFactory.unit.test.ts +src/test/common/process/pythonToolService.unit.test.ts +src/test/common/process/proc.observable.test.ts +src/test/common/process/logger.unit.test.ts +src/test/common/process/proc.exec.test.ts +src/test/common/process/pythonProcess.unit.test.ts +src/test/common/process/proc.unit.test.ts + +src/test/common/interpreterPathService.unit.test.ts + + +src/test/pythonFiles/formatting/dummy.ts + +src/test/debugger/extension/adapter/adapter.test.ts +src/test/debugger/extension/adapter/outdatedDebuggerPrompt.unit.test.ts +src/test/debugger/extension/adapter/factory.unit.test.ts +src/test/debugger/extension/adapter/activator.unit.test.ts +src/test/debugger/extension/adapter/logging.unit.test.ts +src/test/debugger/extension/hooks/childProcessAttachHandler.unit.test.ts +src/test/debugger/extension/hooks/childProcessAttachService.unit.test.ts +src/test/debugger/utils.ts +src/test/debugger/common/protocolparser.test.ts +src/test/debugger/envVars.test.ts + +src/test/telemetry/index.unit.test.ts +src/test/telemetry/envFileTelemetry.unit.test.ts + +src/test/application/diagnostics/checks/macPythonInterpreter.unit.test.ts +src/test/application/diagnostics/checks/pythonInterpreter.unit.test.ts +src/test/application/diagnostics/checks/invalidLaunchJsonDebugger.unit.test.ts +src/test/application/diagnostics/checks/powerShellActivation.unit.test.ts +src/test/application/diagnostics/checks/invalidPythonPathInDebugger.unit.test.ts +src/test/application/diagnostics/checks/envPathVariable.unit.test.ts +src/test/application/diagnostics/applicationDiagnostics.unit.test.ts +src/test/application/diagnostics/promptHandler.unit.test.ts +src/test/application/diagnostics/sourceMapSupportService.unit.test.ts +src/test/application/diagnostics/commands/ignore.unit.test.ts + +src/test/performance/load.perf.test.ts + +src/client/interpreter/configuration/interpreterSelector/commands/base.ts +src/client/interpreter/configuration/interpreterSelector/commands/resetInterpreter.ts +src/client/interpreter/configuration/pythonPathUpdaterServiceFactory.ts +src/client/interpreter/configuration/services/globalUpdaterService.ts +src/client/interpreter/configuration/services/workspaceUpdaterService.ts +src/client/interpreter/configuration/services/workspaceFolderUpdaterService.ts +src/client/interpreter/helpers.ts +src/client/interpreter/virtualEnvs/condaInheritEnvPrompt.ts +src/client/interpreter/display/index.ts + +src/client/extension.ts +src/client/sourceMapSupport.ts +src/client/startupTelemetry.ts + +src/client/terminals/codeExecution/terminalCodeExecution.ts +src/client/terminals/codeExecution/codeExecutionManager.ts +src/client/terminals/codeExecution/djangoContext.ts + +src/client/activation/commands.ts +src/client/activation/progress.ts +src/client/activation/extensionSurvey.ts +src/client/activation/common/analysisOptions.ts +src/client/activation/languageClientMiddleware.ts + +src/client/formatters/serviceRegistry.ts +src/client/formatters/helper.ts +src/client/formatters/dummyFormatter.ts +src/client/formatters/baseFormatter.ts + +src/client/testing/serviceRegistry.ts +src/client/testing/main.ts +src/client/testing/configurationFactory.ts +src/client/testing/common/constants.ts +src/client/testing/common/testUtils.ts +src/client/testing/common/socketServer.ts +src/client/testing/common/runner.ts + +src/client/common/helpers.ts +src/client/common/net/browser.ts +src/client/common/net/socket/socketCallbackHandler.ts +src/client/common/net/socket/socketServer.ts +src/client/common/net/socket/SocketStream.ts +src/client/common/editor.ts +src/client/common/contextKey.ts +src/client/common/experiments/telemetry.ts +src/client/common/platform/serviceRegistry.ts +src/client/common/platform/errors.ts +src/client/common/platform/fs-temp.ts +src/client/common/platform/fs-paths.ts +src/client/common/platform/registry.ts +src/client/common/platform/pathUtils.ts +src/client/common/persistentState.ts +src/client/common/terminal/activator/base.ts +src/client/common/terminal/activator/powershellFailedHandler.ts +src/client/common/terminal/activator/index.ts +src/client/common/terminal/helper.ts +src/client/common/terminal/syncTerminalService.ts +src/client/common/terminal/factory.ts +src/client/common/terminal/commandPrompt.ts +src/client/common/terminal/service.ts +src/client/common/terminal/shellDetector.ts +src/client/common/terminal/shellDetectors/userEnvironmentShellDetector.ts +src/client/common/terminal/shellDetectors/vscEnvironmentShellDetector.ts +src/client/common/terminal/shellDetectors/terminalNameShellDetector.ts +src/client/common/terminal/shellDetectors/settingsShellDetector.ts +src/client/common/terminal/shellDetectors/baseShellDetector.ts +src/client/common/utils/decorators.ts +src/client/common/utils/enum.ts +src/client/common/utils/platform.ts +src/client/common/utils/stopWatch.ts +src/client/common/utils/random.ts +src/client/common/utils/sysTypes.ts +src/client/common/utils/misc.ts +src/client/common/utils/cacheUtils.ts +src/client/common/utils/workerPool.ts +src/client/common/extensions.ts +src/client/common/variables/serviceRegistry.ts +src/client/common/variables/environment.ts +src/client/common/variables/types.ts +src/client/common/variables/systemVariables.ts +src/client/common/cancellation.ts +src/client/common/interpreterPathService.ts +src/client/common/application/applicationShell.ts +src/client/common/application/languageService.ts +src/client/common/application/clipboard.ts +src/client/common/application/workspace.ts +src/client/common/application/debugSessionTelemetry.ts +src/client/common/application/documentManager.ts +src/client/common/application/debugService.ts +src/client/common/application/commands/reloadCommand.ts +src/client/common/application/terminalManager.ts +src/client/common/application/applicationEnvironment.ts +src/client/common/errors/errorUtils.ts +src/client/common/installer/serviceRegistry.ts +src/client/common/installer/channelManager.ts +src/client/common/installer/moduleInstaller.ts +src/client/common/installer/types.ts +src/client/common/installer/pipEnvInstaller.ts +src/client/common/installer/productService.ts +src/client/common/installer/pipInstaller.ts +src/client/common/installer/productPath.ts +src/client/common/process/currentProcess.ts +src/client/common/process/processFactory.ts +src/client/common/process/serviceRegistry.ts +src/client/common/process/pythonToolService.ts +src/client/common/process/internal/python.ts +src/client/common/process/internal/scripts/testing_tools.ts +src/client/common/process/types.ts +src/client/common/process/logger.ts +src/client/common/process/pythonProcess.ts +src/client/common/process/pythonEnvironment.ts +src/client/common/process/decoder.ts + + +src/client/debugger/extension/adapter/remoteLaunchers.ts +src/client/debugger/extension/adapter/outdatedDebuggerPrompt.ts +src/client/debugger/extension/adapter/factory.ts +src/client/debugger/extension/adapter/activator.ts +src/client/debugger/extension/adapter/logging.ts +src/client/debugger/extension/hooks/eventHandlerDispatcher.ts +src/client/debugger/extension/hooks/childProcessAttachService.ts +src/client/debugger/extension/attachQuickPick/wmicProcessParser.ts +src/client/debugger/extension/attachQuickPick/factory.ts +src/client/debugger/extension/attachQuickPick/psProcessParser.ts +src/client/debugger/extension/attachQuickPick/picker.ts + +src/client/application/serviceRegistry.ts +src/client/application/diagnostics/surceMapSupportService.ts +src/client/application/diagnostics/base.ts +src/client/application/diagnostics/applicationDiagnostics.ts +src/client/application/diagnostics/filter.ts +src/client/application/diagnostics/promptHandler.ts +src/client/application/diagnostics/commands/base.ts +src/client/application/diagnostics/commands/ignore.ts +src/client/application/diagnostics/commands/factory.ts +src/client/application/diagnostics/commands/execVSCCommand.ts +src/client/application/diagnostics/commands/launchBrowser.ts diff --git a/extensions/positron-python/.eslintrc b/extensions/positron-python/.eslintrc new file mode 100644 index 00000000000..4f1c07a4c23 --- /dev/null +++ b/extensions/positron-python/.eslintrc @@ -0,0 +1,113 @@ +{ + "root": true, + "env": { + "node": true, + "es6": true, + "mocha": true + }, + "parser": "@typescript-eslint/parser", + "plugins": ["@typescript-eslint"], + "extends": [ + "airbnb", + "plugin:@typescript-eslint/recommended", + "plugin:import/errors", + "plugin:import/warnings", + "plugin:import/typescript", + "prettier" + ], + "rules": { + // Overriding ESLint rules with Typescript-specific ones + "@typescript-eslint/ban-ts-comment": [ + "error", + { + "ts-ignore": "allow-with-description" + } + ], + "@typescript-eslint/explicit-module-boundary-types": "error", + "no-bitwise": "off", + "no-dupe-class-members": "off", + "@typescript-eslint/no-dupe-class-members": "error", + "no-empty-function": "off", + "@typescript-eslint/no-empty-function": ["error"], + "@typescript-eslint/no-empty-interface": "off", + "@typescript-eslint/no-explicit-any": "error", + "@typescript-eslint/no-non-null-assertion": "off", + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + "args": "after-used", + "argsIgnorePattern": "^_" + } + ], + "no-use-before-define": "off", + "@typescript-eslint/no-use-before-define": [ + "error", + { + "functions": false + } + ], + "no-useless-constructor": "off", + "@typescript-eslint/no-useless-constructor": "error", + "@typescript-eslint/no-var-requires": "off", + + // Other rules + "class-methods-use-this": ["error", {"exceptMethods": ["dispose"]}], + "func-names": "off", + "import/extensions": "off", + "import/namespace": "off", + "import/no-extraneous-dependencies": "off", + "import/no-unresolved": [ + "error", + { + "ignore": ["monaco-editor", "vscode"] + } + ], + "import/prefer-default-export": "off", + "linebreak-style": "off", + "no-await-in-loop": "off", + "no-console": "off", + "no-control-regex": "off", + "no-extend-native": "off", + "no-multi-str": "off", + "no-param-reassign": "off", + "no-prototype-builtins": "off", + "no-restricted-syntax": [ + "error", + { + "selector": "ForInStatement", + "message": "for..in loops iterate over the entire prototype chain, which is virtually never what you want. Use Object.{keys,values,entries}, and iterate over the resulting array." + }, + { + "selector": "LabeledStatement", + "message": "Labels are a form of GOTO; using them makes code confusing and hard to maintain and understand." + }, + { + "selector": "WithStatement", + "message": "`with` is disallowed in strict mode because it makes code impossible to predict and optimize." + } + ], + "no-template-curly-in-string": "off", + "no-underscore-dangle": "off", + "no-useless-escape": "off", + "no-void": [ + "error", + { + "allowAsStatement": true + } + ], + "operator-assignment": "off", + "strict": "off" + }, + // --- Start Positron --- + "overrides": [ + { + // Disable no-explicit-any for jupyter-adapter.d.ts since we don't control that file. + "files": ["jupyter-adapter.d.ts"], + "rules": { + "@typescript-eslint/no-explicit-any": "off" + } + } + ] + // --- End Positron --- +} diff --git a/extensions/positron-python/.gitattributes b/extensions/positron-python/.gitattributes new file mode 100644 index 00000000000..e25c2877c07 --- /dev/null +++ b/extensions/positron-python/.gitattributes @@ -0,0 +1,3 @@ +package.json text eol=lf +package-lock.json text eol=lf +requirements.txt text eol=lf diff --git a/extensions/positron-python/.github/ISSUE_TEMPLATE/3_feature_request.md b/extensions/positron-python/.github/ISSUE_TEMPLATE/3_feature_request.md new file mode 100644 index 00000000000..d13a5e94e70 --- /dev/null +++ b/extensions/positron-python/.github/ISSUE_TEMPLATE/3_feature_request.md @@ -0,0 +1,7 @@ +--- +name: Feature request +about: Request for the Python extension, not supporting/sibling extensions +labels: classify, feature-request +--- + + diff --git a/extensions/positron-python/.github/ISSUE_TEMPLATE/config.yml b/extensions/positron-python/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000000..eaacc33b8d8 --- /dev/null +++ b/extensions/positron-python/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,17 @@ +blank_issues_enabled: false +contact_links: + - name: 'Bug 🐜' + url: https://aka.ms/pvsc-bug + about: 'Use the `Python: Report Issue...` command (follow the link for instructions)' + - name: 'Pylance' + url: https://github.com/microsoft/pylance-release/issues + about: 'For issues relating to the Pylance language server extension' + - name: 'Jupyter' + url: https://github.com/microsoft/vscode-jupyter/issues + about: 'For issues relating to the Jupyter extension (including the interactive window)' + - name: 'Debugpy' + url: https://github.com/microsoft/debugpy/issues + about: 'For issues relating to the debugpy debugger' + - name: Help/Support + url: https://github.com/microsoft/vscode-python/discussions/categories/q-a + about: 'Having trouble with the extension? Need help getting something to work?' diff --git a/extensions/positron-python/.github/actions/build-vsix/action.yml b/extensions/positron-python/.github/actions/build-vsix/action.yml new file mode 100644 index 00000000000..52d6d1cdbdd --- /dev/null +++ b/extensions/positron-python/.github/actions/build-vsix/action.yml @@ -0,0 +1,92 @@ +name: 'Build VSIX' +description: "Build the extension's VSIX" + +inputs: + node_version: + description: 'Version of Node to install' + required: true + vsix_name: + description: 'Name to give the final VSIX' + required: true + artifact_name: + description: 'Name to give the artifact containing the VSIX' + required: true + +runs: + using: 'composite' + steps: + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node_version }} + cache: 'npm' + + # Jedi LS depends on dataclasses which is not in the stdlib in Python 3.7. + - name: Use Python 3.8 for JediLSP + uses: actions/setup-python@v5 + with: + python-version: 3.8 + cache: 'pip' + cache-dependency-path: | + requirements.txt + build/build-install-requirements.txt + pythonFiles/jedilsp_requirements/requirements.txt + + - name: Upgrade Pip + run: python -m pip install -U pip + shell: bash + + # For faster/better builds of sdists. + - name: Install build pre-requisite + run: python -m pip install wheel + shell: bash + + - name: Install Python dependencies + uses: brettcannon/pip-secure-install@v1 + with: + options: '-t ./pythonFiles/lib/python --implementation py' + + - name: Install debugpy and get-pip + run: | + python -m pip --disable-pip-version-check install packaging + python ./pythonFiles/install_debugpy.py + python ./pythonFiles/download_get_pip.py + shell: bash + + - name: Install Jedi LSP + uses: brettcannon/pip-secure-install@v1 + with: + requirements-file: './pythonFiles/jedilsp_requirements/requirements.txt' + options: '-t ./pythonFiles/lib/jedilsp --implementation py --platform any --abi none' + + - name: Run npm ci + run: npm ci --prefer-offline + shell: bash + + # Use the GITHUB_RUN_ID environment variable to update the build number. + # GITHUB_RUN_ID is a unique number for each run within a repository. + # This number does not change if you re-run the workflow run. + - name: Update extension build number + run: npm run updateBuildNumber -- --buildNumber $GITHUB_RUN_ID + shell: bash + + - name: Update optional extension dependencies + run: npm run addExtensionPackDependencies + shell: bash + + - name: Build VSIX + run: npm run package + shell: bash + + - name: Rename VSIX + # Move to a temp name in case the specified name happens to match the default name. + run: mv ms-python-insiders.vsix ms-python-temp.vsix && mv ms-python-temp.vsix ${{ inputs.vsix_name }} + shell: bash + + - name: Upload VSIX + uses: actions/upload-artifact@v3 + with: + name: ${{ inputs.artifact_name }} + path: ${{ inputs.vsix_name }} + if-no-files-found: error + retention-days: 7 diff --git a/extensions/positron-python/.github/actions/lint/action.yml b/extensions/positron-python/.github/actions/lint/action.yml new file mode 100644 index 00000000000..47924c10815 --- /dev/null +++ b/extensions/positron-python/.github/actions/lint/action.yml @@ -0,0 +1,56 @@ +name: 'Lint' +description: 'Lint TypeScript and Python code' + +inputs: + node_version: + description: 'Version of Node to install' + required: true + +runs: + using: 'composite' + steps: + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node_version }} + cache: 'npm' + + - name: Install Node dependencies + run: npm ci --prefer-offline + shell: bash + + - name: Run `gulp prePublishNonBundle` + run: npx gulp prePublishNonBundle + shell: bash + + - name: Check dependencies + run: npm run checkDependencies + shell: bash + + - name: Lint TypeScript code + run: npm run lint + shell: bash + + - name: Check TypeScript format + run: npm run format-check + shell: bash + + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + cache: 'pip' + + - name: Check Python format + run: | + python -m pip install -U black + python -m black . --check + working-directory: pythonFiles + shell: bash + + - name: Run Ruff + run: | + python -m pip install -U ruff + python -m ruff check . + working-directory: pythonFiles + shell: bash diff --git a/extensions/positron-python/.github/actions/smoke-tests/action.yml b/extensions/positron-python/.github/actions/smoke-tests/action.yml new file mode 100644 index 00000000000..b2d00205043 --- /dev/null +++ b/extensions/positron-python/.github/actions/smoke-tests/action.yml @@ -0,0 +1,67 @@ +name: 'Smoke tests' +description: 'Run smoke tests' + +inputs: + node_version: + description: 'Version of Node to install' + required: true + artifact_name: + description: 'Name of the artifact containing the VSIX' + required: true + +runs: + using: 'composite' + steps: + - name: Install Node + uses: actions/setup-node@v2 + with: + node-version: ${{ inputs.node_version }} + cache: 'npm' + + - name: Install Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + cache: 'pip' + cache-dependency-path: | + build/test-requirements.txt + requirements.txt + + - name: Install dependencies (npm ci) + run: npm ci --prefer-offline + shell: bash + + - name: Install Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + options: '-t ./pythonFiles/lib/python --implementation py' + + - name: pip install system test requirements + run: | + python -m pip install --upgrade -r build/test-requirements.txt + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --implementation py --no-deps --upgrade --pre debugpy + shell: bash + + # Bits from the VSIX are reused by smokeTest.ts to speed things up. + - name: Download VSIX + uses: actions/download-artifact@v2 + with: + name: ${{ inputs.artifact_name }} + + - name: Prepare for smoke tests + run: npx tsc -p ./ + shell: bash + + - name: Set CI_PYTHON_PATH and CI_DISABLE_AUTO_SELECTION + run: | + echo "CI_PYTHON_PATH=python" >> $GITHUB_ENV + echo "CI_DISABLE_AUTO_SELECTION=1" >> $GITHUB_ENV + shell: bash + + - name: Run smoke tests + env: + DISPLAY: 10 + INSTALL_JUPYTER_EXTENSION: true + uses: GabrielBB/xvfb-action@v1.5 + with: + run: node --no-force-async-hooks-checks ./out/test/smokeTest.js diff --git a/extensions/positron-python/.github/dependabot.yml b/extensions/positron-python/.github/dependabot.yml new file mode 100644 index 00000000000..a005c17f8c3 --- /dev/null +++ b/extensions/positron-python/.github/dependabot.yml @@ -0,0 +1,49 @@ +version: 2 +updates: + - package-ecosystem: 'github-actions' + directory: / + schedule: + interval: weekly + labels: + - 'no-changelog' + + - package-ecosystem: 'github-actions' + directory: .github/actions/build-vsix + schedule: + interval: weekly + labels: + - 'no-changelog' + + - package-ecosystem: 'github-actions' + directory: .github/actions/lint + schedule: + interval: weekly + labels: + - 'no-changelog' + + - package-ecosystem: 'github-actions' + directory: .github/actions/smoke-test + schedule: + interval: weekly + labels: + - 'no-changelog' + + # Not skipping the news for some Python dependencies in case it's actually useful to communicate to users. + - package-ecosystem: 'pip' + directory: / + schedule: + interval: weekly + ignore: + - dependency-name: prospector # Due to Python 2.7 and #14477. + - dependency-name: pytest # Due to Python 2.7 and #13776. + - dependency-name: py # Due to Python 2.7. + - dependency-name: jedi-language-server + labels: + - 'no-changelog' + # Activate when we feel ready to keep up with frequency. + # - package-ecosystem: 'npm' + # directory: / + # schedule: + # interval: daily + # default_labels: + # - "no-changelog" diff --git a/extensions/positron-python/.github/release.yml b/extensions/positron-python/.github/release.yml new file mode 100644 index 00000000000..0058580e92e --- /dev/null +++ b/extensions/positron-python/.github/release.yml @@ -0,0 +1,19 @@ +changelog: + exclude: + labels: + - 'no-changelog' + authors: + - 'dependabot' + + categories: + - title: Enhancements + labels: + - 'feature-request' + + - title: Bug Fixes + labels: + - 'bug' + + - title: Code Health + labels: + - 'debt' diff --git a/extensions/positron-python/.github/release_plan.md b/extensions/positron-python/.github/release_plan.md new file mode 100644 index 00000000000..71f8d8aa095 --- /dev/null +++ b/extensions/positron-python/.github/release_plan.md @@ -0,0 +1,119 @@ +All dates should align with VS Code's [iteration](https://github.com/microsoft/vscode/labels/iteration-plan) and [endgame](https://github.com/microsoft/vscode/labels/endgame-plan) plans. + +Feature freeze is Monday @ 17:00 America/Vancouver, XXX XX. At that point, commits to `main` should only be in response to bugs found during endgame testing until the release candidate is ready. + + +NOTE: the number of this release is in the issue title and can be substituted in wherever you see [YYYY.minor]. + + +# Release candidate (Monday, XXX XX) + +NOTE: Third Party Notices are automatically added by our build pipelines using https://tools.opensource.microsoft.com/notice. + +### Step 1: +##### Bump the version of `main` to be a release candidate (also updating debugpy dependences, third party notices, and package-lock.json).❄️ (steps with ❄️ will dictate this step happens while main is frozen 🥶) + +- [ ] checkout to `main` on your local machine and run `git fetch` to ensure your local is up to date with the remote repo. +- [ ] Create a new branch called **`bump-release-[YYYY.minor]`**. +- [ ] Change the version in `package.json` to the next **even** number and switch the `-dev` to `-rc`. (🤖) +- [ ] Run `npm install` to make sure `package-lock.json` is up-to-date _(you should now see changes to the `package.json` and `package-lock.json` at this point which update the version number **only**)_. (🤖) +- [ ] Check [debugpy on PyPI](https://pypi.org/project/debugpy/) for a new release and update the version of debugpy in [`install_debugpy.py`](https://github.com/microsoft/vscode-python/blob/main/pythonFiles/install_debugpy.py) if necessary. +- [ ] Update `ThirdPartyNotices-Repository.txt` as appropriate. You can check by looking at the [commit history](https://github.com/microsoft/vscode-python/commits/main) and scrolling through to see if there's anything listed there which might have pulled in some code directly into the repository from somewhere else. If you are still unsure you can check with the team. +- [ ] Create a PR from your branch **`bump-release-[YYYY.minor]`** to `main`. Add the `"no change-log"` tag to the PR so it does not show up on the release notes before merging it. + +NOTE: this PR will fail the test in our internal release pipeline called `VS Code (pre-release)` because the version specified in `main` is (temporarily) an invalid pre-release version. This is expected as this will be resolved below. + + +### Step 2: Creating your release branch ❄️ +- [ ] Create a release branch by creating a new branch called **`release/YYYY.minor`** branch from `main`. This branch is now the candidate for our release which will be the base from which we will release. + +NOTE: If there are release branches that are two versions old you can delete them at this time. + +### Step 3 Create a draft GitHub release for the release notes (🤖) ❄️ + +- [ ] Create a new [GitHub release](https://github.com/microsoft/vscode-python/releases/new). +- [ ] Specify a new tag called `YYYY.minor.0`. +- [ ] Have the `target` for the github release be your release branch called **`release/YYYY.minor`**. +- [ ] Create the release notes by specifying the previous tag for the last stable release and click `Generate release notes`. Quickly check that it only contain notes from what is new in this release. +- [ ] Click `Save draft`. + +### Step 4: Return `main` to dev and unfreeze (❄️ ➡ 💧) +NOTE: The purpose of this step is ensuring that main always is on a dev version number for every night's 🌃 pre-release. Therefore it is imperative that you do this directly after the previous steps to reset the version in main to a dev version **before** a pre-release goes out. +- [ ] Create a branch called **`bump-dev-version-YYYY.[minor+1]`**. +- [ ] Bump the minor version number in the `package.json` to the next `YYYY.[minor+1]` which will be an odd number, and switch the `-rc` to `-dev`.(🤖) +- [ ] Run `npm install` to make sure `package-lock.json` is up-to-date _(you should now see changes to the `package.json` and `package-lock.json` only relating to the new version number)_ . (🤖) +- [ ] Create a PR from this branch against `main` and merge it. + +NOTE: this PR should make all CI relating to `main` be passing again (such as the failures stemming from step 1). + +### Step 5: Notifications and Checks on External Release Factors +- [ ] Check [Component Governance](https://dev.azure.com/monacotools/Monaco/_componentGovernance/192726?_a=alerts&typeId=11825783&alerts-view-option=active) to make sure there are no active alerts. +- [ ] Manually add/fix any 3rd-party licenses as appropriate based on what the internal build pipeline detects. +- [ ] Open appropriate [documentation issues](https://github.com/microsoft/vscode-docs/issues?q=is%3Aissue+is%3Aopen+label%3Apython). +- [ ] Contact the PM team to begin drafting a blog post. +- [ ] Announce to the development team that `main` is open again. + + +# Release (Wednesday, XXX XX) + +### Step 6: Take the release branch from a candidate to the finalized release +- [ ] Make sure the [appropriate pull requests](https://github.com/microsoft/vscode-docs/pulls) for the [documentation](https://code.visualstudio.com/docs/python/python-tutorial) -- including the [WOW](https://code.visualstudio.com/docs/languages/python) page -- are ready. +- [ ] Check to make sure any final updates to the **`release/YYYY.minor`** branch have been merged. +- [ ] Create a branch against **`release/YYYY.minor`** called **`finalized-release-[YYYY.minor]`**. +- [ ] Update the version in `package.json` to remove the `-rc` (🤖) from the version. +- [ ] Run `npm install` to make sure `package-lock.json` is up-to-date _(the only update should be the version number if `package-lock.json` has been kept up-to-date)_. (🤖) +- [ ] Update `ThirdPartyNotices-Repository.txt` manually if necessary. +- [ ] Create a PR from **`finalized-release-[YYYY.minor]`** against `release/YYYY.minor` and merge it. + + +### Step 7: Execute the Release +- [ ] Make sure CI is passing for **`release/YYYY.minor`** release branch (🤖). +- [ ] Run the [CD](https://dev.azure.com/monacotools/Monaco/_build?definitionId=299) pipeline on the **`release/YYYY.minor`** branch. + - [ ] Click `run pipeline`. + - [ ] for `branch/tag` select the release branch which is **`release/YYYY.minor`**. + - NOTE: Please opt to release the python extension close to when VS Code is released to align when release notes go out. When we bump the VS Code engine number, our extension will not go out to stable until the VS Code stable release but this only occurs when we bump the engine number. +- [ ] 🧍🧍 Get approval on the release on the [CD](https://dev.azure.com/monacotools/Monaco/_build?definitionId=299). +- [ ] Click "approve" in the publish step of [CD](https://dev.azure.com/monacotools/Monaco/_build?definitionId=299) to publish the release to the marketplace. 🎉 +- [ ] Take the Github release out of draft. +- [ ] Publish documentation changes. +- [ ] Contact the PM team to publish the blog post. +- [ ] Determine if a hotfix is needed. +- [ ] Merge the release branch **`release/YYYY.minor`** back into `main`. (This step is only required if changes were merged into the release branch. If the only change made on the release branch is the version, this is not necessary. Overall you need to ensure you DO NOT overwrite the version on the `main` branch.) + + +## Steps for Point Release (if necessary) +- [ ] checkout to `main` on your local machine and run `git fetch` to ensure your local is up to date with the remote repo. +- [ ] checkout to the `release/YYY.minor` and check to make sure all necessary changes for the point release have been cherry-picked into the release branch. If not, contact the owner of the changes to do so. +- [ ] Create a branch against **`release/YYYY.minor`** called **`release-[YYYY.minor.point]`**. +- [ ] Bump the point version number in the `package.json` to the next `YYYY.minor.point` +- [ ] Run `npm install` to make sure `package-lock.json` is up-to-date _(you should now see changes to the `package.json` and `package-lock.json` only relating to the new version number)_ . (🤖) +- [ ] Create a PR from this branch against `release/YYYY.minor` +- [ ] **Rebase** and merge this PR into the release branch +- [ ] Create a draft GitHub release for the release notes (🤖) ❄️ + - [ ] Create a new [GitHub release](https://github.com/microsoft/vscode-python/releases/new). + - [ ] Specify a new tag called `vYYYY.minor.point`. + - [ ] Have the `target` for the github release be your release branch called **`release/YYYY.minor`**. + - [ ] Create the release notes by specifying the previous tag as the previous version of stable, so the minor release **`vYYYY.minor`** for the last stable release and click `Generate release notes`. + - [ ] Check the generated notes to ensure that all PRs for the point release are included so users know these new changes. + - [ ] Click `Save draft`. +- [ ] Publish the point release + - [ ] Make sure CI is passing for **`release/YYYY.minor`** release branch (🤖). + - [ ] Run the [CD](https://dev.azure.com/monacotools/Monaco/_build?definitionId=299) pipeline on the **`release/YYYY.minor`** branch. + - [ ] Click `run pipeline`. + - [ ] for `branch/tag` select the release branch which is **`release/YYYY.minor`**. + - [ ] 🧍🧍 Get approval on the release on the [CD](https://dev.azure.com/monacotools/Monaco/_build?definitionId=299) and publish the release to the marketplace. 🎉 + - [ ] Take the Github release out of draft. + +## Steps for contributing to a point release +- [ ] Work with team to decide if point release is necessary +- [ ] Work with team or users to verify the fix is correct and solves the problem without creating any new ones +- [ ] Create PR/PRs and merge then each into main as usual +- [ ] Make sure to still mark if the change is "bug" or "no-changelog" +- [ ] Cherry-pick all PRs to the release branch and check that the changes are in before the package is bumped +- [ ] Notify the release champ that your changes are in so they can trigger a point-release + + +## Prep for the _next_ release + +- [ ] Create a new [release plan](https://raw.githubusercontent.com/microsoft/vscode-python/main/.github/release_plan.md). (🤖) +- [ ] [(Un-)pin](https://help.github.com/en/articles/pinning-an-issue-to-your-repository) [release plan issues](https://github.com/Microsoft/vscode-python/labels/release%20plan) (🤖) diff --git a/extensions/positron-python/.github/workflows/build.yml b/extensions/positron-python/.github/workflows/build.yml new file mode 100644 index 00000000000..586fe619d5d --- /dev/null +++ b/extensions/positron-python/.github/workflows/build.yml @@ -0,0 +1,373 @@ +name: Build + +on: + push: + branches: + - 'main' + - 'release' + - 'release/*' + - 'release-*' + +env: + NODE_VERSION: 18.17.1 + PYTHON_VERSION: '3.10' # YML treats 3.10 the number as 3.1, so quotes around 3.10 + # Force a path with spaces and to test extension works in these scenarios + # Unicode characters are causing 2.7 failures so skip that for now. + special-working-directory: './path with spaces' + special-working-directory-relative: 'path with spaces' + # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter). + # Also enables a reporter which exits the process running the tests if it haven't already. + MOCHA_REPORTER_JUNIT: true + +jobs: + setup: + name: Set up + if: github.repository == 'microsoft/vscode-python' + runs-on: ubuntu-latest + defaults: + run: + shell: python + outputs: + vsix_name: ${{ steps.vsix_names.outputs.vsix_name }} + vsix_artifact_name: ${{ steps.vsix_names.outputs.vsix_artifact_name }} + steps: + - name: VSIX names + id: vsix_names + run: | + import os + if os.environ["GITHUB_REF"].endswith("/main"): + vsix_type = "insiders" + else: + vsix_type = "release" + print(f"::set-output name=vsix_name::ms-python-{vsix_type}.vsix") + print(f"::set-output name=vsix_artifact_name::ms-python-{vsix_type}-vsix") + + build-vsix: + name: Create VSIX + if: github.repository == 'microsoft/vscode-python' + needs: setup + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build VSIX + uses: ./.github/actions/build-vsix + with: + node_version: ${{ env.NODE_VERSION }} + vsix_name: ${{ needs.setup.outputs.vsix_name }} + artifact_name: ${{ needs.setup.outputs.vsix_artifact_name }} + + lint: + name: Lint + if: github.repository == 'microsoft/vscode-python' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Lint + uses: ./.github/actions/lint + with: + node_version: ${{ env.NODE_VERSION }} + + check-types: + name: Check Python types + if: github.repository == 'microsoft/vscode-python' + runs-on: ubuntu-latest + steps: + - name: Use Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Install core Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + options: '-t ./pythonFiles/lib/python --no-cache-dir --implementation py' + + - name: Install Jedi requirements + run: python scripts/vendor.py + + - name: Install other Python requirements + run: | + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy + python -m pip install --upgrade -r build/test-requirements.txt + + - name: Run Pyright + uses: jakebailey/pyright-action@v2 + with: + version: 1.1.308 + working-directory: 'pythonFiles' + + python-tests: + name: Python Tests + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }} + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the Unix case. + os: [ubuntu-latest, windows-latest] + # Run the tests on the oldest and most recent versions of Python. + python: ['3.8', '3.x', '3.12-dev'] + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install base Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + requirements-file: '"${{ env.special-working-directory-relative }}/requirements.txt"' + options: '-t "${{ env.special-working-directory-relative }}/pythonFiles/lib/python" --no-cache-dir --implementation py' + + - name: Install test requirements + run: python -m pip install --upgrade -r build/test-requirements.txt + + - name: Run Python unit tests + run: python pythonFiles/tests/run_all.py + + tests: + name: Tests + if: github.repository == 'microsoft/vscode-python' + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }} + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix + # entry to lower the number of runners used, macOS runners are expensive, + # and we assume that Ubuntu is enough to cover the UNIX case. + os: [ubuntu-latest, windows-latest] + python: ['3.x'] + test-suite: [ts-unit, venv, single-workspace, multi-workspace, debugger, functional] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + cache-dependency-path: ${{ env.special-working-directory-relative }}/package-lock.json + + - name: Install dependencies (npm ci) + run: npm ci + + - name: Compile + run: npx gulp prePublishNonBundle + + - name: Localization + run: npx @vscode/l10n-dev@latest export ./src + + - name: Install Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Download get-pip.py + run: | + python -m pip install wheel + python -m pip install -r build/build-install-requirements.txt + python ./pythonFiles/download_get_pip.py + shell: bash + + - name: Install debugpy + run: | + # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy + + - name: Install core Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + requirements-file: '"${{ env.special-working-directory-relative }}/requirements.txt"' + options: '-t "${{ env.special-working-directory-relative }}/pythonFiles/lib/python" --no-cache-dir --implementation py' + if: startsWith(matrix.python, 3.) + + - name: Install Jedi requirements + run: python scripts/vendor.py + if: startsWith(matrix.python, 3.) + + - name: Install test requirements + run: python -m pip install --upgrade -r build/test-requirements.txt + + - name: Install debugpy wheels (Python ${{ matrix.python }}) + run: | + python -m pip install wheel + python -m pip install -r build/build-install-requirements.txt + python ./pythonFiles/install_debugpy.py + shell: bash + if: matrix.test-suite == 'debugger' + + - name: Install functional test requirements + run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt + if: matrix.test-suite == 'functional' + + - name: Prepare pipenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install pipenv + python -m pipenv run python ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} pipenvPath + + - name: Prepare poetry for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install poetry + Move-Item -Path ".\build\ci\pyproject.toml" -Destination . + poetry env use python + + - name: Prepare virtualenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install virtualenv + python -m virtualenv .virtualenv/ + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".virtualenv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } else { + & ".virtualenv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } + + - name: Prepare venv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' && startsWith(matrix.python, 3.) + run: | + python -m venv .venv + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".venv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } else { + & ".venv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } + + - name: Prepare conda for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + # 1. For `*.testvirtualenvs.test.ts` + if ('${{ matrix.os }}' -match 'windows-latest') { + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath python.exe + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath Scripts | Join-Path -ChildPath conda + } else{ + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath python + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath conda + } + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaExecPath $condaExecPath + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaPath + & $condaExecPath init --all + + - name: Set CI_PYTHON_PATH and CI_DISABLE_AUTO_SELECTION + run: | + echo "CI_PYTHON_PATH=python" >> $GITHUB_ENV + echo "CI_DISABLE_AUTO_SELECTION=1" >> $GITHUB_ENV + shell: bash + if: matrix.test-suite != 'ts-unit' + + # Run TypeScript unit tests only for Python 3.X. + - name: Run TypeScript unit tests + run: npm run test:unittests + if: matrix.test-suite == 'ts-unit' && startsWith(matrix.python, '3.') + + # The virtual environment based tests use the `testSingleWorkspace` set of tests + # with the environment variable `TEST_FILES_SUFFIX` set to `testvirtualenvs`, + # which is set in the "Prepare environment for venv tests" step. + # We also use a third-party GitHub Action to install xvfb on Linux, + # run tests and then clean up the process once the tests ran. + # See https://github.com/GabrielBB/xvfb-action + - name: Run venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'venv' && matrix.os == 'ubuntu-latest' + + - name: Run single-workspace tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'single-workspace' + + - name: Run multi-workspace tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testMultiWorkspace + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'multi-workspace' + + - name: Run debugger tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testDebugger + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'debugger' + + # Run TypeScript functional tests + - name: Run TypeScript functional tests + run: npm run test:functional + if: matrix.test-suite == 'functional' + + smoke-tests: + name: Smoke tests + if: github.repository == 'microsoft/vscode-python' + runs-on: ${{ matrix.os }} + needs: [setup, build-vsix] + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the UNIX case. + os: [ubuntu-latest, windows-latest] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Smoke tests + uses: ./.github/actions/smoke-tests + with: + node_version: ${{ env.NODE_VERSION }} + artifact_name: ${{ needs.setup.outputs.vsix_artifact_name }} diff --git a/extensions/positron-python/.github/workflows/codeql-analysis.yml b/extensions/positron-python/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000000..d902a68878e --- /dev/null +++ b/extensions/positron-python/.github/workflows/codeql-analysis.yml @@ -0,0 +1,68 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +name: 'CodeQL' + +on: + push: + branches: + - main + - release-* + - release/* + pull_request: + # The branches below must be a subset of the branches above + branches: [main] + schedule: + - cron: '0 3 * * 0' + +permissions: + security-events: write + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + # Override automatic language detection by changing the below list + # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] + language: ['javascript', 'python'] + # Learn more... + # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + #- name: Autobuild + # uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/extensions/positron-python/.github/workflows/community-feedback-auto-comment.yml b/extensions/positron-python/.github/workflows/community-feedback-auto-comment.yml new file mode 100644 index 00000000000..cf3c4f51fe6 --- /dev/null +++ b/extensions/positron-python/.github/workflows/community-feedback-auto-comment.yml @@ -0,0 +1,28 @@ +name: Community Feedback Auto Comment + +on: + issues: + types: + - labeled +jobs: + add-comment: + if: github.event.label.name == 'needs community feedback' + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Check For Existing Comment + uses: peter-evans/find-comment@v3 + id: finder + with: + issue-number: ${{ github.event.issue.number }} + comment-author: 'github-actions[bot]' + body-includes: 'Thanks for the feature request! We are going to give the community' + + - name: Add Community Feedback Comment + if: steps.finder.outputs.comment-id == '' + uses: peter-evans/create-or-update-comment@v4 + with: + issue-number: ${{ github.event.issue.number }} + body: | + Thanks for the feature request! We are going to give the community 60 days from when this issue was created to provide 7 👍 upvotes on the opening comment to gauge general interest in this idea. If there's enough upvotes then we will consider this feature request in our future planning. If there's unfortunately not enough upvotes then we will close this issue. diff --git a/extensions/positron-python/.github/workflows/info-needed-closer.yml b/extensions/positron-python/.github/workflows/info-needed-closer.yml new file mode 100644 index 00000000000..442799cd7a1 --- /dev/null +++ b/extensions/positron-python/.github/workflows/info-needed-closer.yml @@ -0,0 +1,28 @@ +name: Info-Needed Closer +on: + schedule: + - cron: 20 12 * * * # 5:20am Redmond + repository_dispatch: + types: [trigger-needs-more-info] + workflow_dispatch: + +jobs: + main: + runs-on: ubuntu-latest + steps: + - name: Checkout Actions + uses: actions/checkout@v4 + with: + repository: 'microsoft/vscode-github-triage-actions' + path: ./actions + ref: stable + - name: Install Actions + run: npm install --production --prefix ./actions + - name: Run info-needed Closer + uses: ./actions/needs-more-info-closer + with: + label: info-needed + closeDays: 30 + closeComment: "Because we have not heard back with the information we requested, we are closing this issue for now. If you are able to provide the info later on, then we will be happy to re-open this issue to pick up where we left off. \n\nHappy Coding!" + pingDays: 30 + pingComment: "Hey @${assignee}, this issue might need further attention.\n\n@${author}, you can help us out by closing this issue if the problem no longer exists, or adding more information." diff --git a/extensions/positron-python/.github/workflows/issue-labels.yml b/extensions/positron-python/.github/workflows/issue-labels.yml new file mode 100644 index 00000000000..8b084aef409 --- /dev/null +++ b/extensions/positron-python/.github/workflows/issue-labels.yml @@ -0,0 +1,33 @@ +name: Issue labels + +on: + issues: + types: [opened, reopened] + +env: + TRIAGERS: '["karrtikr","karthiknadig","paulacamargo25","eleanorjboyd","anthonykim1"]' + +permissions: + issues: write + +jobs: + # From https://github.com/marketplace/actions/github-script#apply-a-label-to-an-issue. + add-classify-label: + name: "Add 'triage-needed' and remove assignees" + runs-on: ubuntu-latest + steps: + - name: Checkout Actions + uses: actions/checkout@v4 + with: + repository: 'microsoft/vscode-github-triage-actions' + ref: stable + path: ./actions + + - name: Install Actions + run: npm install --production --prefix ./actions + + - name: "Add 'triage-needed' and remove assignees" + uses: ./actions/python-issue-labels + with: + triagers: ${{ env.TRIAGERS }} + token: ${{secrets.GITHUB_TOKEN}} diff --git a/extensions/positron-python/.github/workflows/lock-issues.yml b/extensions/positron-python/.github/workflows/lock-issues.yml new file mode 100644 index 00000000000..47f243d7197 --- /dev/null +++ b/extensions/positron-python/.github/workflows/lock-issues.yml @@ -0,0 +1,24 @@ +name: 'Lock Issues' + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +permissions: + issues: write + +concurrency: + group: lock + +jobs: + lock-issues: + runs-on: ubuntu-latest + steps: + - name: 'Lock Issues' + uses: dessant/lock-threads@v5 + with: + github-token: ${{ github.token }} + issue-inactive-days: '30' + process-only: 'issues' + log-output: true diff --git a/extensions/positron-python/.github/workflows/pr-check.yml b/extensions/positron-python/.github/workflows/pr-check.yml new file mode 100644 index 00000000000..9b2aaa09974 --- /dev/null +++ b/extensions/positron-python/.github/workflows/pr-check.yml @@ -0,0 +1,539 @@ +name: PR/CI Check + +on: + pull_request: + push: + branches-ignore: + - main + - release* + +env: + NODE_VERSION: 18.17.1 + PYTHON_VERSION: '3.10' # YML treats 3.10 the number as 3.1, so quotes around 3.10 + MOCHA_REPORTER_JUNIT: true # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter). Also enables a reporter which exits the process running the tests if it haven't already. + ARTIFACT_NAME_VSIX: ms-python-insiders-vsix + VSIX_NAME: ms-python-insiders.vsix + TEST_RESULTS_DIRECTORY: . + # Force a path with spaces and to test extension works in these scenarios + # Unicode characters are causing 2.7 failures so skip that for now. + special-working-directory: './path with spaces' + special-working-directory-relative: 'path with spaces' + +jobs: + build-vsix: + name: Create VSIX + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build VSIX + uses: ./.github/actions/build-vsix + with: + node_version: ${{ env.NODE_VERSION}} + vsix_name: ${{ env.VSIX_NAME }} + artifact_name: ${{ env.ARTIFACT_NAME_VSIX }} + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Lint + uses: ./.github/actions/lint + with: + node_version: ${{ env.NODE_VERSION }} + + check-types: + name: Check Python types + runs-on: ubuntu-latest + steps: + - name: Use Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Install base Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + options: '-t ./pythonFiles/lib/python --no-cache-dir --implementation py' + + - name: Install Jedi requirements + run: python scripts/vendor.py + + - name: Install other Python requirements + run: | + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy + python -m pip install --upgrade -r build/test-requirements.txt + + - name: Run Pyright + uses: jakebailey/pyright-action@v2 + with: + version: 1.1.308 + working-directory: 'pythonFiles' + + python-tests: + name: Python Tests + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }} + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the Unix case. + os: [ubuntu-latest, windows-latest] + # Run the tests on the oldest and most recent versions of Python. + python: ['3.8', '3.x'] # run for 3 pytest versions, most recent stable, oldest version supported and pre-release + pytest-version: ['pytest', 'pytest@pre-release', 'pytest==6.2.0'] + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install specific pytest version + if: matrix.pytest-version == 'pytest@pre-release' + run: | + python -m pip install --pre pytest + + - name: Install specific pytest version + if: matrix.pytest-version != 'pytest@pre-release' + run: | + python -m pip install "${{ matrix.pytest-version }}" + + - name: Install specific pytest version + run: python -m pytest --version + - name: Install base Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + requirements-file: '"${{ env.special-working-directory-relative }}/requirements.txt"' + options: '-t "${{ env.special-working-directory-relative }}/pythonFiles/lib/python" --no-cache-dir --implementation py' + + - name: Install test requirements + run: python -m pip install --upgrade -r build/test-requirements.txt + + - name: Run Python unit tests + run: python pythonFiles/tests/run_all.py + + tests: + name: Tests + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + defaults: + run: + working-directory: ${{ env.special-working-directory }} + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the Unix case. + os: [ubuntu-latest, windows-latest] + # Run the tests on the oldest and most recent versions of Python. + python: ['3.x'] + test-suite: [ts-unit, venv, single-workspace, debugger, functional] + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: ${{ env.special-working-directory-relative }} + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + cache-dependency-path: ${{ env.special-working-directory-relative }}/package-lock.json + + - name: Install dependencies (npm ci) + run: npm ci + + - name: Compile + run: npx gulp prePublishNonBundle + + - name: Localization + run: npx @vscode/l10n-dev@latest export ./src + + - name: Use Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install debugpy + run: | + # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy + + - name: Download get-pip.py + run: | + python -m pip install wheel + python -m pip install -r build/build-install-requirements.txt + python ./pythonFiles/download_get_pip.py + shell: bash + + - name: Install base Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + requirements-file: '"${{ env.special-working-directory-relative }}/requirements.txt"' + options: '-t "${{ env.special-working-directory-relative }}/pythonFiles/lib/python" --no-cache-dir --implementation py' + + - name: Install Jedi requirements + run: python scripts/vendor.py + + - name: Install test requirements + run: python -m pip install --upgrade -r build/test-requirements.txt + + - name: Install debugpy wheels (Python ${{ matrix.python }}) + run: | + python -m pip install wheel + python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt + python ./pythonFiles/install_debugpy.py + shell: bash + if: matrix.test-suite == 'debugger' + + - name: Install functional test requirements + run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt + if: matrix.test-suite == 'functional' + + - name: Prepare pipenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install pipenv + python -m pipenv run python ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} pipenvPath + + - name: Prepare poetry for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install poetry + Move-Item -Path ".\build\ci\pyproject.toml" -Destination . + poetry env use python + + - name: Prepare virtualenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + python -m pip install virtualenv + python -m virtualenv .virtualenv/ + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".virtualenv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } else { + & ".virtualenv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } + + - name: Prepare venv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' && startsWith(matrix.python, 3.) + run: | + python -m venv .venv + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".venv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } else { + & ".venv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } + + - name: Prepare conda for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + if: matrix.test-suite == 'venv' + run: | + # 1. For `*.testvirtualenvs.test.ts` + if ('${{ matrix.os }}' -match 'windows-latest') { + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath python.exe + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath Scripts | Join-Path -ChildPath conda + } else{ + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath python + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath conda + } + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaExecPath $condaExecPath + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaPath + & $condaExecPath init --all + + - name: Set CI_PYTHON_PATH and CI_DISABLE_AUTO_SELECTION + run: | + echo "CI_PYTHON_PATH=python" >> $GITHUB_ENV + echo "CI_DISABLE_AUTO_SELECTION=1" >> $GITHUB_ENV + shell: bash + if: matrix.test-suite != 'ts-unit' + + # Run TypeScript unit tests only for Python 3.X. + - name: Run TypeScript unit tests + run: npm run test:unittests + if: matrix.test-suite == 'ts-unit' && startsWith(matrix.python, 3.) + + # The virtual environment based tests use the `testSingleWorkspace` set of tests + # with the environment variable `TEST_FILES_SUFFIX` set to `testvirtualenvs`, + # which is set in the "Prepare environment for venv tests" step. + # We also use a third-party GitHub Action to install xvfb on Linux, + # run tests and then clean up the process once the tests ran. + # See https://github.com/GabrielBB/xvfb-action + - name: Run venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'venv' + + - name: Run single-workspace tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'single-workspace' + + - name: Run debugger tests + env: + CI_PYTHON_VERSION: ${{ matrix.python }} + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testDebugger + working-directory: ${{ env.special-working-directory }} + if: matrix.test-suite == 'debugger' + + # Run TypeScript functional tests + - name: Run TypeScript functional tests + run: npm run test:functional + if: matrix.test-suite == 'functional' + + smoke-tests: + name: Smoke tests + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + needs: [build-vsix] + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the UNIX case. + os: [ubuntu-latest, windows-latest] + steps: + # Need the source to have the tests available. + - name: Checkout + uses: actions/checkout@v4 + + - name: Smoke tests + uses: ./.github/actions/smoke-tests + with: + node_version: ${{ env.NODE_VERSION }} + artifact_name: ${{ env.ARTIFACT_NAME_VSIX }} + + ### Coverage run + coverage: + name: Coverage + # The value of runs-on is the OS of the current job (specified in the strategy matrix below) instead of being hardcoded. + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # Only run coverage on linux for PRs + os: [ubuntu-latest] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + + - name: Install dependencies (npm ci) + run: npm ci + + - name: Compile + run: npx gulp prePublishNonBundle + + - name: Localization + run: npx @vscode/l10n-dev@latest export ./src + + - name: Use Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' + cache-dependency-path: | + requirements.txt + pythonFiles/jedilsp_requirements/requirements.txt + build/test-requirements.txt + build/functional-test-requirements.txt + + - name: Install base Python requirements + uses: brettcannon/pip-secure-install@v1 + with: + options: '-t ./pythonFiles/lib/python --implementation py' + + - name: Install Jedi requirements + run: python scripts/vendor.py + + - name: Install debugpy + run: | + # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --implementation py --no-deps --upgrade --pre debugpy + + - name: Install test requirements + run: python -m pip install --upgrade -r build/test-requirements.txt + + - name: Install functional test requirements + run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt + + - name: Prepare pipenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + run: | + python -m pip install pipenv + python -m pipenv run python ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} pipenvPath + + - name: Prepare poetry for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + shell: pwsh + run: | + python -m pip install poetry + Move-Item -Path ".\build\ci\pyproject.toml" -Destination . + poetry env use python + + - name: Prepare virtualenv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + run: | + python -m pip install virtualenv + python -m virtualenv .virtualenv/ + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".virtualenv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } else { + & ".virtualenv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} virtualEnvPath + } + + - name: Prepare venv for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + run: | + python -m venv .venv + if ('${{ matrix.os }}' -match 'windows-latest') { + & ".venv/Scripts/python.exe" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } else { + & ".venv/bin/python" ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} venvPath + } + + - name: Prepare conda for venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + PYTHON_VIRTUAL_ENVS_LOCATION: './src/tmp/envPaths.json' + shell: pwsh + run: | + # 1. For `*.testvirtualenvs.test.ts` + if ('${{ matrix.os }}' -match 'windows-latest') { + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath python.exe + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath Scripts | Join-Path -ChildPath conda + } else{ + $condaPythonPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath python + $condaExecPath = Join-Path -Path $Env:CONDA -ChildPath bin | Join-Path -ChildPath conda + } + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaExecPath $condaExecPath + & $condaPythonPath ./build/ci/addEnvPath.py ${{ env.PYTHON_VIRTUAL_ENVS_LOCATION }} condaPath + & $condaExecPath init --all + + - name: Run TypeScript unit tests + run: npm run test:unittests:cover + + - name: Run Python unit tests + run: | + python pythonFiles/tests/run_all.py + + # The virtual environment based tests use the `testSingleWorkspace` set of tests + # with the environment variable `TEST_FILES_SUFFIX` set to `testvirtualenvs`, + # which is set in the "Prepare environment for venv tests" step. + # We also use a third-party GitHub Action to install xvfb on Linux, + # run tests and then clean up the process once the tests ran. + # See https://github.com/GabrielBB/xvfb-action + - name: Run venv tests + env: + TEST_FILES_SUFFIX: testvirtualenvs + CI_PYTHON_VERSION: ${{ env.PYTHON_VERSION }} + CI_DISABLE_AUTO_SELECTION: 1 + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace:cover + + - name: Run single-workspace tests + env: + CI_PYTHON_VERSION: ${{ env.PYTHON_VERSION }} + CI_DISABLE_AUTO_SELECTION: 1 + uses: GabrielBB/xvfb-action@v1.6 + with: + run: npm run testSingleWorkspace:cover + + # Enable these tests when coverage is setup for multiroot workspace tests + # - name: Run multi-workspace tests + # env: + # CI_PYTHON_VERSION: ${{ env.PYTHON_VERSION }} + # CI_DISABLE_AUTO_SELECTION: 1 + # uses: GabrielBB/xvfb-action@v1.6 + # with: + # run: npm run testMultiWorkspace:cover + + # Enable these tests when coverage is setup for debugger tests + # - name: Run debugger tests + # env: + # CI_PYTHON_VERSION: ${{ env.PYTHON_VERSION }} + # CI_DISABLE_AUTO_SELECTION: 1 + # uses: GabrielBB/xvfb-action@v1.6 + # with: + # run: npm run testDebugger:cover + + # Run TypeScript functional tests + - name: Run TypeScript functional tests + env: + CI_PYTHON_VERSION: ${{ env.PYTHON_VERSION }} + CI_DISABLE_AUTO_SELECTION: 1 + run: npm run test:functional:cover + + - name: Generate coverage reports + run: npm run test:cover:report + + - name: Upload HTML report + uses: actions/upload-artifact@v4 + with: + name: ${{ runner.os }}-coverage-report-html + path: ./coverage + retention-days: 1 diff --git a/extensions/positron-python/.github/workflows/pr-file-check.yml b/extensions/positron-python/.github/workflows/pr-file-check.yml new file mode 100644 index 00000000000..ba019c790e9 --- /dev/null +++ b/extensions/positron-python/.github/workflows/pr-file-check.yml @@ -0,0 +1,44 @@ +name: PR files + +on: + pull_request: + types: + # On by default if you specify no types. + - 'opened' + - 'reopened' + - 'synchronize' + # For `skip-label` only. + - 'labeled' + - 'unlabeled' + +jobs: + changed-files-in-pr: + name: 'Check for changed files' + runs-on: ubuntu-latest + steps: + - name: 'package-lock.json matches package.json' + uses: brettcannon/check-for-changed-files@v1.2.0 + with: + prereq-pattern: 'package.json' + file-pattern: 'package-lock.json' + skip-label: 'skip package*.json' + failure-message: '${prereq-pattern} was edited but ${file-pattern} was not (the ${skip-label} label can be used to pass this check)' + + - name: 'package.json matches package-lock.json' + uses: brettcannon/check-for-changed-files@v1.2.0 + with: + prereq-pattern: 'package-lock.json' + file-pattern: 'package.json' + skip-label: 'skip package*.json' + failure-message: '${prereq-pattern} was edited but ${file-pattern} was not (the ${skip-label} label can be used to pass this check)' + + - name: 'Tests' + uses: brettcannon/check-for-changed-files@v1.2.0 + with: + prereq-pattern: src/**/*.ts + file-pattern: | + src/**/*.test.ts + src/**/*.testvirtualenvs.ts + .github/test_plan.md + skip-label: 'skip tests' + failure-message: 'TypeScript code was edited without also editing a ${file-pattern} file; see the Testing page in our wiki on testing guidelines (the ${skip-label} label can be used to pass this check)' diff --git a/extensions/positron-python/.github/workflows/pr-labels.yml b/extensions/positron-python/.github/workflows/pr-labels.yml new file mode 100644 index 00000000000..730b8e5c583 --- /dev/null +++ b/extensions/positron-python/.github/workflows/pr-labels.yml @@ -0,0 +1,21 @@ +name: 'PR labels' +on: + pull_request: + types: + - 'opened' + - 'reopened' + - 'labeled' + - 'unlabeled' + - 'synchronize' + +jobs: + classify: + name: 'Classify PR' + runs-on: ubuntu-latest + steps: + - name: 'PR impact specified' + uses: mheap/github-action-required-labels@v5 + with: + mode: exactly + count: 1 + labels: 'bug, debt, feature-request, no-changelog' diff --git a/extensions/positron-python/.github/workflows/python27-issue-response.yml b/extensions/positron-python/.github/workflows/python27-issue-response.yml new file mode 100644 index 00000000000..4d51e9921ab --- /dev/null +++ b/extensions/positron-python/.github/workflows/python27-issue-response.yml @@ -0,0 +1,14 @@ +on: + issues: + types: [opened] + +jobs: + python27-issue-response: + runs-on: ubuntu-latest + if: "contains(github.event.issue.body, 'Python version (& distribution if applicable, e.g. Anaconda): 2.7')" + steps: + - name: Check for Python 2.7 string + run: | + response="We're sorry, but we no longer support Python 2.7. If you need to work with Python 2.7, you will have to pin to 2022.2.* version of the extension, which was the last version that had the debugger (debugpy) with support for python 2.7, and was tested with `2.7`. Thank you for your understanding! \n ![https://user-images.githubusercontent.com/51720070/80000627-39dacc00-8472-11ea-9755-ac7ba0acbb70.gif](https://user-images.githubusercontent.com/51720070/80000627-39dacc00-8472-11ea-9755-ac7ba0acbb70.gif)" + gh issue comment ${{ github.event.issue.number }} --body "$response" + gh issue close ${{ github.event.issue.number }} diff --git a/extensions/positron-python/.github/workflows/remove-needs-labels.yml b/extensions/positron-python/.github/workflows/remove-needs-labels.yml new file mode 100644 index 00000000000..3d218e297a1 --- /dev/null +++ b/extensions/positron-python/.github/workflows/remove-needs-labels.yml @@ -0,0 +1,18 @@ +name: 'Remove Needs Label' +on: + issues: + types: [closed] + +jobs: + classify: + name: 'Remove needs labels on issue closing' + runs-on: ubuntu-latest + steps: + - name: 'Removes needs labels on issue close' + uses: actions-ecosystem/action-remove-labels@v1 + with: + labels: | + needs PR + needs spike + needs community feedback + needs proposal diff --git a/extensions/positron-python/.github/workflows/test-plan-item-validator.yml b/extensions/positron-python/.github/workflows/test-plan-item-validator.yml new file mode 100644 index 00000000000..17f1740345f --- /dev/null +++ b/extensions/positron-python/.github/workflows/test-plan-item-validator.yml @@ -0,0 +1,29 @@ +name: Test Plan Item Validator +on: + issues: + types: [edited, labeled] + +permissions: + issues: write + +jobs: + main: + runs-on: ubuntu-latest + if: contains(github.event.issue.labels.*.name, 'testplan-item') || contains(github.event.issue.labels.*.name, 'invalid-testplan-item') + steps: + - name: Checkout Actions + uses: actions/checkout@v4 + with: + repository: 'microsoft/vscode-github-triage-actions' + path: ./actions + ref: stable + + - name: Install Actions + run: npm install --production --prefix ./actions + + - name: Run Test Plan Item Validator + uses: ./actions/test-plan-item-validator + with: + label: testplan-item + invalidLabel: invalid-testplan-item + comment: Invalid test plan item. See errors below and the [test plan item spec](https://github.com/microsoft/vscode/wiki/Writing-Test-Plan-Items) for more information. This comment will go away when the issues are resolved. diff --git a/extensions/positron-python/.github/workflows/triage-info-needed.yml b/extensions/positron-python/.github/workflows/triage-info-needed.yml new file mode 100644 index 00000000000..1ded54ea3f5 --- /dev/null +++ b/extensions/positron-python/.github/workflows/triage-info-needed.yml @@ -0,0 +1,54 @@ +name: Triage "info-needed" label + +on: + issue_comment: + types: [created] + +env: + TRIAGERS: '["karrtikr","karthiknadig","paulacamargo25","eleanorjboyd", "brettcannon","anthonykim1"]' + +permissions: + issues: write + +jobs: + add_label: + runs-on: ubuntu-latest + if: contains(github.event.issue.labels.*.name, 'triage-needed') && !contains(github.event.issue.labels.*.name, 'info-needed') + steps: + - name: Checkout Actions + uses: actions/checkout@v4 + with: + repository: 'microsoft/vscode-github-triage-actions' + ref: stable + path: ./actions + + - name: Install Actions + run: npm install --production --prefix ./actions + + - name: Add "info-needed" label + uses: ./actions/python-triage-info-needed + with: + triagers: ${{ env.TRIAGERS }} + action: 'add' + token: ${{secrets.GITHUB_TOKEN}} + + remove_label: + if: contains(github.event.issue.labels.*.name, 'info-needed') && contains(github.event.issue.labels.*.name, 'triage-needed') + runs-on: ubuntu-latest + steps: + - name: Checkout Actions + uses: actions/checkout@v4 + with: + repository: 'microsoft/vscode-github-triage-actions' + ref: stable + path: ./actions + + - name: Install Actions + run: npm install --production --prefix ./actions + + - name: Remove "info-needed" label + uses: ./actions/python-triage-info-needed + with: + triagers: ${{ env.TRIAGERS }} + action: 'remove' + token: ${{secrets.GITHUB_TOKEN}} diff --git a/extensions/positron-python/.gitignore b/extensions/positron-python/.gitignore new file mode 100644 index 00000000000..dceec679bc2 --- /dev/null +++ b/extensions/positron-python/.gitignore @@ -0,0 +1,57 @@ +.DS_Store +.huskyrc.json +out +log.log +**/node_modules +*.pyc +*.vsix +envVars.txt +**/.vscode/.ropeproject/** +**/testFiles/**/.cache/** +*.noseids +.nyc_output +.vscode-test +__pycache__ +npm-debug.log +**/.mypy_cache/** +!yarn.lock +coverage/ +cucumber-report.json +**/.vscode-test/** +**/.vscode test/** +**/.vscode-smoke/** +**/.venv*/ +port.txt +precommit.hook +pythonFiles/lib/** +pythonFiles/get-pip.py +pythonFiles/get_pip.py +debug_coverage*/** +languageServer/** +languageServer.*/** +bin/** +obj/** +.pytest_cache +tmp/** +.python-version +.vs/ +test-results*.xml +xunit-test-results.xml +build/ci/performance/performance-results.json +!build/ +debug*.log +debugpy*.log +pydevd*.log +nodeLanguageServer/** +nodeLanguageServer.*/** +dist/** +# translation files +*.xlf +*.nls.*.json +*.i18n.json +l10n/ +tags +# --- Start Positron --- +pythonFiles/positron/positron_ipykernel/tests/images +pythonFiles/positron/positron_ipykernel/_vendor/** +# --- End Positron --- diff --git a/extensions/positron-python/.npmrc b/extensions/positron-python/.npmrc new file mode 100644 index 00000000000..16cc2ccdf1e --- /dev/null +++ b/extensions/positron-python/.npmrc @@ -0,0 +1 @@ +@types:registry=https://registry.npmjs.org diff --git a/extensions/positron-python/.nvmrc b/extensions/positron-python/.nvmrc new file mode 100644 index 00000000000..860cc5000ae --- /dev/null +++ b/extensions/positron-python/.nvmrc @@ -0,0 +1 @@ +v18.17.1 diff --git a/extensions/positron-python/.pre-commit-config.yaml b/extensions/positron-python/.pre-commit-config.yaml new file mode 100644 index 00000000000..25e2507b3ad --- /dev/null +++ b/extensions/positron-python/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +exclude: "(.*\\.csv)|(^build/)|(^src/)|(^resources/)|(^pythonFiles/tests/unittestadapter)|(^pythonFiles/tests/testing_tools)" +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.4.0 + hooks: + - id: flake8 + # line too long and line before binary operator (black is ok with these) + types: + - python + args: + - "--max-line-length=100" + - "--ignore=E203" + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + args: ["--unsafe"] + - id: check-added-large-files + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black diff --git a/extensions/positron-python/.prettierrc.js b/extensions/positron-python/.prettierrc.js new file mode 100644 index 00000000000..87a94b7bf46 --- /dev/null +++ b/extensions/positron-python/.prettierrc.js @@ -0,0 +1,15 @@ +module.exports = { + singleQuote: true, + printWidth: 120, + tabWidth: 4, + endOfLine: 'auto', + trailingComma: 'all', + overrides: [ + { + files: ['*.yml', '*.yaml'], + options: { + tabWidth: 2 + } + } + ] +}; diff --git a/extensions/positron-python/.sonarcloud.properties b/extensions/positron-python/.sonarcloud.properties new file mode 100644 index 00000000000..9e466689a90 --- /dev/null +++ b/extensions/positron-python/.sonarcloud.properties @@ -0,0 +1,4 @@ +sonar.sources=src/client +sonar.tests=src/test +sonar.cfamily.build-wrapper-output.bypass=true +sonar.cpd.exclusions=src/client/activation/**/*.ts diff --git a/extensions/positron-python/.vscode/extensions.json b/extensions/positron-python/.vscode/extensions.json new file mode 100644 index 00000000000..93a73827e7a --- /dev/null +++ b/extensions/positron-python/.vscode/extensions.json @@ -0,0 +1,14 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=827846 + // for the documentation about the extensions.json format + "recommendations": [ + "editorconfig.editorconfig", + "esbenp.prettier-vscode", + "dbaeumer.vscode-eslint", + "ms-python.python", + "ms-python.black-formatter", + "ms-python.vscode-pylance", + "ms-python.isort", + "ms-python.flake8" + ] +} diff --git a/extensions/positron-python/.vscode/launch.json b/extensions/positron-python/.vscode/launch.json new file mode 100644 index 00000000000..bfb336d767b --- /dev/null +++ b/extensions/positron-python/.vscode/launch.json @@ -0,0 +1,292 @@ +// A launch configuration that compiles the extension and then opens it inside a new window +{ + "version": "0.1.0", + "configurations": [ + // --- Start Positron --- + { + "name": "Positron: Remote Attach", + "type": "python", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "subProcess": false, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "." + } + ], + "justMyCode": false + }, + // --- End Positron --- + { + "name": "Extension", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": ["--extensionDevelopmentPath=${workspaceFolder}"], + "stopOnEntry": false, + "smartStep": true, + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/out/**/*", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"], + "env": { + // Enable this to turn on redux logging during debugging + "XVSC_PYTHON_FORCE_LOGGING": "1", + // Enable this to try out new experiments locally + "VSC_PYTHON_LOAD_EXPERIMENTS_FROM_FILE": "1", + // Enable this to log telemetry to the output during debugging + "XVSC_PYTHON_LOG_TELEMETRY": "1", + // Enable this to log debugger output. Directory must exist ahead of time + "XDEBUGPY_LOG_DIR": "${workspaceRoot}/tmp/Debug_Output_Ex" + } + }, + { + "name": "Extension inside container", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": ["--extensionDevelopmentPath=${workspaceFolder}", "${workspaceFolder}/data"], + "stopOnEntry": false, + "smartStep": true, + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/out/**/*", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile" + }, + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Tests (Debugger, VS Code, *.test.ts)", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": [ + "${workspaceFolder}/src/testMultiRootWkspc/multi.code-workspace", + "--disable-extensions", + "--extensionDevelopmentPath=${workspaceFolder}", + "--extensionTestsPath=${workspaceFolder}/out/test" + ], + "stopOnEntry": false, + "sourceMaps": true, + "smartStep": true, + "outFiles": ["${workspaceFolder}/out/**/*", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "env": { + "IS_CI_SERVER_TEST_DEBUGGER": "1" + }, + "skipFiles": ["/**"] + }, + { + // Note, for the smoke test you want to debug, you may need to copy the file, + // rename it and remove a check for only smoke tests. + "name": "Tests (Smoke, VS Code, *.test.ts)", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": [ + "${workspaceFolder}/src/testMultiRootWkspc/smokeTests", + "--disable-extensions", + "--extensionDevelopmentPath=${workspaceFolder}", + "--extensionTestsPath=${workspaceFolder}/out/test" + ], + "env": { + "VSC_PYTHON_CI_TEST_GREP": "Smoke Test", + "VSC_PYTHON_SMOKE_TEST": "1", + "TEST_FILES_SUFFIX": "smoke.test" + }, + "stopOnEntry": false, + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "name": "Tests (Single Workspace, VS Code, *.test.ts)", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": [ + "${workspaceFolder}/src/test", + "--disable-extensions", + "--extensionDevelopmentPath=${workspaceFolder}", + "--extensionTestsPath=${workspaceFolder}/out/test" + ], + "env": { + "VSC_PYTHON_CI_TEST_GREP": "" // Modify this to run a subset of the single workspace tests + }, + "stopOnEntry": false, + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "name": "Jedi LSP tests", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": [ + "${workspaceFolder}/src/test", + "--disable-extensions", + "--extensionDevelopmentPath=${workspaceFolder}", + "--extensionTestsPath=${workspaceFolder}/out/test" + ], + "env": { + "VSC_PYTHON_CI_TEST_GREP": "Language Server:" + }, + "stopOnEntry": false, + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "preTestJediLSP", + "skipFiles": ["/**"] + }, + { + "name": "Tests (Multiroot, VS Code, *.test.ts)", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": [ + "${workspaceFolder}/src/testMultiRootWkspc/multi.code-workspace", + "--disable-extensions", + "--extensionDevelopmentPath=${workspaceFolder}", + "--extensionTestsPath=${workspaceFolder}/out/test" + ], + "stopOnEntry": false, + "sourceMaps": true, + "smartStep": true, + "outFiles": ["${workspaceFolder}/out/**/*", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "name": "Unit Tests (without VS Code, *.unit.test.ts)", + "type": "node", + "request": "launch", + "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", + "stopOnEntry": false, + "sourceMaps": true, + "args": [ + "./out/test/**/*.unit.test.js", + "--require=out/test/unittests.js", + "--ui=tdd", + "--recursive", + "--colors", + //"--grep", "", + "--timeout=300000" + ], + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "name": "Unit Tests (fast, without VS Code and without react/monaco, *.unit.test.ts)", + "type": "node", + "request": "launch", + "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", + "stopOnEntry": false, + "sourceMaps": true, + "args": [ + "./out/test/**/*.unit.test.js", + "--require=out/test/unittests.js", + "--ui=tdd", + "--recursive", + "--colors", + // "--grep", "", + "--timeout=300000", + "--fast" + ], + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "name": "Functional Tests (without VS Code, *.functional.test.ts)", + "type": "node", + "request": "launch", + "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", + "stopOnEntry": false, + "sourceMaps": true, + "args": [ + "./out/test/**/*.functional.test.js", + "--require=out/test/unittests.js", + "--ui=tdd", + "--recursive", + "--colors", + // "--grep", "", + "--timeout=300000", + "--exit" + ], + "env": { + // Remove `X` prefix to test with real browser to host DS ui (for DS functional tests). + "XVSC_PYTHON_DS_UI_BROWSER": "1", + // Remove `X` prefix to test with real python (for DS functional tests). + "XVSCODE_PYTHON_ROLLING": "1", + // Remove 'X' to turn on all logging in the debug output + "XVSC_PYTHON_FORCE_LOGGING": "1", + // Remove `X` prefix and update path to test with real python interpreter (for DS functional tests). + "XCI_PYTHON_PATH": "", + // Remove 'X' prefix to dump output for debugger. Directory has to exist prior to launch + "XDEBUGPY_LOG_DIR": "${workspaceRoot}/tmp/Debug_Output", + // Remove 'X' prefix to dump webview redux action log + "XVSC_PYTHON_WEBVIEW_LOG_FILE": "${workspaceRoot}/test-webview.log" + }, + "outFiles": ["${workspaceFolder}/out/**/*.js", "!${workspaceFolder}/**/node_modules**/*"], + "preLaunchTask": "Compile", + "skipFiles": ["/**"] + }, + { + "type": "node", + "request": "launch", + "name": "Gulp tasks (helpful for debugging gulpfile.js)", + "program": "${workspaceFolder}/node_modules/gulp/bin/gulp.js", + "args": ["watch"], + "skipFiles": ["/**"] + }, + { + "name": "Node: Current File", + "program": "${file}", + "request": "launch", + "skipFiles": ["/**"], + "type": "pwa-node" + }, + { + "name": "Python: Current File", + "type": "python", + "justMyCode": true, + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}" + }, + { + "name": "Listen", + "type": "python", + "request": "attach", + "listen": { "host": "localhost", "port": 5678 }, + "justMyCode": true + }, + { + "name": "Debug pytest plugin tests", + + "type": "python", + "request": "launch", + "module": "pytest", + "args": ["${workspaceFolder}/pythonFiles/tests/pytestadapter"], + "justMyCode": true + } + ], + "compounds": [ + { + "name": "Debug Test Discovery", + "configurations": ["Listen", "Extension"] + } + ] +} diff --git a/extensions/positron-python/.vscode/settings.json b/extensions/positron-python/.vscode/settings.json new file mode 100644 index 00000000000..72f4133a4a9 --- /dev/null +++ b/extensions/positron-python/.vscode/settings.json @@ -0,0 +1,75 @@ +// Place your settings in this file to overwrite default and user settings. +{ + "files.exclude": { + "out": true, // set this to true to hide the "out" folder with the compiled JS files + "dist": true, + "**/*.pyc": true, + ".nyc_output": true, + "obj": true, + "bin": true, + "**/__pycache__": true, + "**/node_modules": true, + ".vscode-test": false, + ".vscode test": false, + "**/.mypy_cache/**": true + }, + "search.exclude": { + "out": true, // set this to false to include "out" folder in search results + "dist": true, + "**/node_modules": true, + "coverage": true, + "languageServer*/**": true, + ".vscode-test": true, + ".vscode test": true + }, + "[python]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit" + }, + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "[typescript]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true + }, + "[javascript]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true + }, + "[JSON]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true + }, + "[YAML]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true + }, + "typescript.tsdk": "./node_modules/typescript/lib", // we want to use the TS server from our node_modules folder to control its version + "typescript.preferences.quoteStyle": "single", + "javascript.preferences.quoteStyle": "single", + "prettier.printWidth": 120, + "prettier.singleQuote": true, + "editor.codeActionsOnSave": { + "source.fixAll.eslint": "explicit" + }, + "editor.rulers": [ + 100 + ], + "python.languageServer": "Jedi", + "flake8.args": ["--max-line-length=100", "--ignore=E203"], + "cucumberautocomplete.skipDocStringsFormat": true, + "typescript.preferences.importModuleSpecifier": "relative", + // Branch name suggestion. + "git.branchProtectionPrompt": "alwaysCommitToNewBranch", + "git.branchRandomName.enable": true, + "git.branchProtection": ["main", "release/*"], + "git.pullBeforeCheckout": true, + // Open merge editor for resolving conflicts. + "git.mergeEditor": true, + "python.testing.pytestArgs": [ + "pythonFiles/tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true +} diff --git a/extensions/positron-python/.vscode/tasks.json b/extensions/positron-python/.vscode/tasks.json new file mode 100644 index 00000000000..e1468bdfc2a --- /dev/null +++ b/extensions/positron-python/.vscode/tasks.json @@ -0,0 +1,39 @@ +{ + "version": "2.0.0", + "presentation": { + "echo": true, + "reveal": "always", + "focus": false, + "panel": "shared" + }, + "tasks": [ + { + "label": "Compile", + "type": "npm", + "script": "compile", + "isBackground": true, + "problemMatcher": [ + "$tsc-watch" + ], + "group": { + "kind": "build", + "isDefault": true + } + }, + { + "label": "Run Unit Tests", + "type": "npm", + "script": "test:unittests", + "group": { + "kind": "test", + "isDefault": true + } + }, + { + "type": "npm", + "script": "preTestJediLSP", + "problemMatcher": [], + "label": "preTestJediLSP" + } + ] +} diff --git a/extensions/positron-python/.vscodeignore b/extensions/positron-python/.vscodeignore new file mode 100644 index 00000000000..6788f9b6d8e --- /dev/null +++ b/extensions/positron-python/.vscodeignore @@ -0,0 +1,69 @@ +**/*.map +**/*.analyzer.html +*.vsix +.editorconfig +.env +.eslintrc +.gitattributes +.gitignore +.gitmodules +.npmrc +.nvmrc +.nycrc +CODE_OF_CONDUCT.md +CODING_STANDARDS.md +CONTRIBUTING.md +gulpfile.js +package-lock.json +requirements.in +sprint-planning.github-issues +test.ipynb +tsconfig*.json +tsfmt.json +vscode-python-signing.* + +.github/** +.mocha-reporter/** +.nvm/** +.nyc_output +.prettierrc.js +.sonarcloud.properties +.venv/** +.vscode/** +.vscode-test/** +.vscode test/** +languageServer/** +languageServer.*/** +nodeLanguageServer/** +nodeLanguageServer.*/** +bin/** +build/** +BuildOutput/** +coverage/** +data/** +debug_coverage*/** +images/**/*.gif +images/**/*.png +ipywidgets/** +i18n/** +node_modules/** +obj/** +out/**/*.stats.json +out/client/**/*.analyzer.html +out/coverconfig.json +out/pythonFiles/** +out/src/** +out/test/** +out/testMultiRootWkspc/** +precommit.hook +pythonFiles/**/*.pyc +pythonFiles/lib/**/*.egg-info/** +pythonFiles/lib/python/bin/** +pythonFiles/jedilsp_requirements/** +pythonFiles/tests/** +scripts/** +src/** +test/** +tmp/** +typings/** +types/** diff --git a/extensions/positron-python/CHANGELOG.md b/extensions/positron-python/CHANGELOG.md new file mode 100644 index 00000000000..4a84aaeac20 --- /dev/null +++ b/extensions/positron-python/CHANGELOG.md @@ -0,0 +1,3 @@ +# Changelog + +**Please see https://github.com/posit-dev/positron/releases for the latest release notes.** diff --git a/extensions/positron-python/CODE_OF_CONDUCT.md b/extensions/positron-python/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..f9ba8cf65f3 --- /dev/null +++ b/extensions/positron-python/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/extensions/positron-python/CONTRIBUTING.md b/extensions/positron-python/CONTRIBUTING.md new file mode 100644 index 00000000000..c6c0998395b --- /dev/null +++ b/extensions/positron-python/CONTRIBUTING.md @@ -0,0 +1 @@ +Please see [our wiki](https://github.com/microsoft/vscode-python/wiki) on how to contribute to this project. diff --git a/extensions/positron-python/LICENSE b/extensions/positron-python/LICENSE new file mode 100644 index 00000000000..8cb179cdb69 --- /dev/null +++ b/extensions/positron-python/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/extensions/positron-python/README.md b/extensions/positron-python/README.md new file mode 100644 index 00000000000..718f4b48857 --- /dev/null +++ b/extensions/positron-python/README.md @@ -0,0 +1,57 @@ +# Positron Python Extension + +The Python Extension to the [Positron IDE](https://github.com/rstudio/positron). + +You can read more about Positron IDE development on the [Positron Wiki](https://connect.rstudioservices.com/positron-wiki). + +## About + +The extension is a fork of [Microsoft's Python VSCode extension](https://github.com/microsoft/vscode-python). The main TypeScript functionality (mostly UI) is implemented in [`src`](src) and calls out to Python scripts in [`pythonFiles`](pythonFiles). + +We provide a custom Positron Python Kernel based on the following open-source Python projects: + +- [**IPyKernel**](https://github.com/ipython/ipykernel), a Jupyter kernel for the Python programming language written in Python +- [**Jedi Language Server**](https://github.com/pappasam/jedi-language-server), a language server built on the [pygls](https://github.com/openlawlibrary/pygls) (Python Generic Language Server Framework) using the [Jedi](https://github.com/davidhalter/jedi) library for autocompletion, static analysis, and refactoring + +The entrypoint to our kernel is the [`positron_language_server.py`](pythonFiles/positron/positron_language_server.py) script. The core functionality of the kernel can be found in the [`positron_ipykernel`](pythonFiles/positron/positron/positron_ipykernel/) package, which consists of these services: + +- [`positron_ipkernel`](pythonFiles/positron/positron_ipykernel/positron_ipkernel.py), the Positron Python Kernel +- [`positron_jedilsp`](pythonFiles/positron/positron_ipykernel/positron_jedilsp.py), the Positron Python Language Server +- [`variables`](pythonFiles/positron/positron_ipykernel/variables.py), manages Positron's Variables pane +- [`ui`](pythonFiles/positron/positron_ipykernel/ui.py), manages Positron's Frontend comm channel (a global channel for communication unscoped to any particular view) +- [`help`](pythonFiles/positron/positron_ipykernel/help.py), manages Positron's Help pane +- [`lsp`](pythonFiles/positron/positron_ipykernel/lsp.py), manages the language server +- [`plots`](pythonFiles/positron/positron_ipykernel/plots.py), a custom [IPython display publisher](https://github.com/ipython/ipython/blob/main/IPython/core/displaypub.py) that displays to Positron's Plots pane +- [`data_explorer`](pythonFiles/positron/positron_ipykernel/data_explorer.py), manages Positron's Data Viewer + +The various Positron services communicate with the front end via Jupyter's [comms](https://connect.rstudioservices.com/content/59a1f153-dcd8-44ac-849b-3371829b7002/positron-architecture.html#comms-and-ui-bindings) messaging protocol. + +## Python development + +When editing the Python source, **open a new workspace at the root `positron-python` folder** to use the settings for the various tools (linters, testers, etc) to match the CI workflows. + +From the `positron-python/pythonFiles` folder, you can run the following commands. + +Format source files with [Black](https://github.com/psf/black): + +```sh +black . +``` + +Type-check with [pyright](https://github.com/microsoft/pyright): + +```sh +pyright +``` + +Install the test requirements that are used in CI: + +```sh +pip install -r ../build/pinned-test-requirements.txt +``` + +Run Positron's unit tests with [pytest](https://docs.pytest.org/en/8.0.x/): + +```sh +pytest pythonFiles/positron/ +``` diff --git a/extensions/positron-python/SECURITY.md b/extensions/positron-python/SECURITY.md new file mode 100644 index 00000000000..1ceb287afaf --- /dev/null +++ b/extensions/positron-python/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability]() of a security vulnerability, please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + +- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) +- Full paths of source file(s) related to the manifestation of the issue +- The location of the affected source code (tag/branch/commit or direct URL) +- Any special configuration required to reproduce the issue +- Step-by-step instructions to reproduce the issue +- Proof-of-concept or exploit code (if possible) +- Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + diff --git a/extensions/positron-python/SUPPORT.md b/extensions/positron-python/SUPPORT.md new file mode 100644 index 00000000000..b1afe54cc55 --- /dev/null +++ b/extensions/positron-python/SUPPORT.md @@ -0,0 +1,11 @@ +# Support + +## How to file issues and get help + +This project uses GitHub Issues to track bugs and feature requests. Please search the [existing issues](https://github.com/microsoft/vscode-python/issues) before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. + +For help and questions about using this project, please see the [`python`+`visual-studio-code` labels on Stack Overflow](https://stackoverflow.com/questions/tagged/visual-studio-code+python) or the `#vscode` channel on the [`microsoft-python` server on Discord](https://aka.ms/python-discord-invite). + +## Microsoft Support Policy + +Support for this project is limited to the resources listed above. diff --git a/extensions/positron-python/ThirdPartyNotices-Repository.txt b/extensions/positron-python/ThirdPartyNotices-Repository.txt new file mode 100644 index 00000000000..bbb00d523f9 --- /dev/null +++ b/extensions/positron-python/ThirdPartyNotices-Repository.txt @@ -0,0 +1,1063 @@ + +THIRD-PARTY SOFTWARE NOTICES AND INFORMATION +Do Not Translate or Localize + +Microsoft Python extension for Visual Studio Code incorporates third party material from the projects listed below. The original copyright notice and the license under which Microsoft received such third party material are set forth below. Microsoft reserves all other rights not expressly granted, whether by implication, estoppel or otherwise. + +1. Go for Visual Studio Code (https://github.com/Microsoft/vscode-go) +2. Files from the Python Project (https://www.python.org/) +3. omnisharp-vscode (https://github.com/OmniSharp/omnisharp-vscode) +4. PTVS (https://github.com/Microsoft/PTVS) +5. Python documentation (https://docs.python.org/) +6. python-functools32 (https://github.com/MiCHiLU/python-functools32/blob/master/functools32/functools32.py) +7. pythonVSCode (https://github.com/DonJayamanne/pythonVSCode) +8. Sphinx (http://sphinx-doc.org/) +9. nteract (https://github.com/nteract/nteract) +10. less-plugin-inline-urls (https://github.com/less/less-plugin-inline-urls/) +11. vscode-cpptools (https://github.com/microsoft/vscode-cpptools) +12. mocha (https://github.com/mochajs/mocha) +13. get-pip (https://github.com/pypa/get-pip) +14. vscode-js-debug (https://github.com/microsoft/vscode-js-debug) + +%% +Go for Visual Studio Code NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +The MIT License (MIT) + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +========================================= +END OF +Go for Visual Studio Code NOTICES, INFORMATION, AND LICENSE + +%% Files from the Python Project NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights +Reserved" are retained in Python alone or in any derivative version prepared by +Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (C) 2006-2010 Python Software Foundation + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +========================================= +END OF Files from the Python Project NOTICES, INFORMATION, AND LICENSE + +%% omnisharp-vscode NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Copyright (c) Microsoft Corporation + +All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +========================================= +END OF omnisharp-vscode NOTICES, INFORMATION, AND LICENSE + +%% PTVS NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Copyright (c) Microsoft Corporation. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +========================================= +END OF PTVS NOTICES, INFORMATION, AND LICENSE + +%% Python documentation NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Terms and conditions for accessing or otherwise using Python +PSF LICENSE AGREEMENT FOR PYTHON 2.7.13 +1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and + the Individual or Organization ("Licensee") accessing and otherwise using Python + 2.7.13 software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby + grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative works, + distribute, and otherwise use Python 2.7.13 alone or in any derivative + version, provided, however, that PSF's License Agreement and PSF's notice of + copyright, i.e., "Copyright © 2001-2017 Python Software Foundation; All Rights + Reserved" are retained in Python 2.7.13 alone or in any derivative version + prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on or + incorporates Python 2.7.13 or any part thereof, and wants to make the + derivative work available to others as provided herein, then Licensee hereby + agrees to include in any such work a brief summary of the changes made to Python + 2.7.13. + +4. PSF is making Python 2.7.13 available to Licensee on an "AS IS" basis. + PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF + EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR + WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE + USE OF PYTHON 2.7.13 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.13 + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF + MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.13, OR ANY DERIVATIVE + THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of + its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any relationship + of agency, partnership, or joint venture between PSF and Licensee. This License + Agreement does not grant permission to use PSF trademarks or trade name in a + trademark sense to endorse or promote products or services of Licensee, or any + third party. + +8. By copying, installing or otherwise using Python 2.7.13, Licensee agrees + to be bound by the terms and conditions of this License Agreement. +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office at + 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or Organization + ("Licensee") accessing and otherwise using this software in source or binary + form and its associated documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License Agreement, + BeOpen hereby grants Licensee a non-exclusive, royalty-free, world-wide license + to reproduce, analyze, test, perform and/or display publicly, prepare derivative + works, distribute, and otherwise use the Software alone or in any derivative + version, provided, however, that the BeOpen Python License is retained in the + Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" basis. + BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF + EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY REPRESENTATION OR + WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE + USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE FOR + ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF USING, + MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, EVEN IF + ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material breach of + its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all respects + by the law of the State of California, excluding conflict of law provisions. + Nothing in this License Agreement shall be deemed to create any relationship of + agency, partnership, or joint venture between BeOpen and Licensee. This License + Agreement does not grant permission to use BeOpen trademarks or trade names in a + trademark sense to endorse or promote products or services of Licensee, or any + third party. As an exception, the "BeOpen Python" logos available at + http://www.pythonlabs.com/logos.html may be used according to the permissions + granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee agrees to be + bound by the terms and conditions of this License Agreement. +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +1. This LICENSE AGREEMENT is between the Corporation for National Research + Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191 + ("CNRI"), and the Individual or Organization ("Licensee") accessing and + otherwise using Python 1.6.1 software in source or binary form and its + associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI hereby + grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative works, + distribute, and otherwise use Python 1.6.1 alone or in any derivative version, + provided, however, that CNRI's License Agreement and CNRI's notice of copyright, + i.e., "Copyright © 1995-2001 Corporation for National Research Initiatives; All + Rights Reserved" are retained in Python 1.6.1 alone or in any derivative version + prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, + Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 + is made available subject to the terms and conditions in CNRI's License + Agreement. This Agreement together with Python 1.6.1 may be located on the + Internet using the following unique, persistent identifier (known as a handle): + 1895.22/1013. This Agreement may also be obtained from a proxy server on the + Internet using the following URL: http://hdl.handle.net/1895.22/1013." + +3. In the event Licensee prepares a derivative work that is based on or + incorporates Python 1.6.1 or any part thereof, and wants to make the derivative + work available to others as provided herein, then Licensee hereby agrees to + include in any such work a brief summary of the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" basis. CNRI + MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, + BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY + OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF + PYTHON 1.6.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 1.6.1 FOR + ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF + MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, OR ANY DERIVATIVE + THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of + its terms and conditions. + +7. This License Agreement shall be governed by the federal intellectual property + law of the United States, including without limitation the federal copyright + law, and, to the extent such U.S. federal law does not apply, by the law of the + Commonwealth of Virginia, excluding Virginia's conflict of law provisions. + Notwithstanding the foregoing, with regard to derivative works based on Python + 1.6.1 that incorporate non-separable material that was previously distributed + under the GNU General Public License (GPL), the law of the Commonwealth of + Virginia shall govern this License Agreement only as to issues arising under or + with respect to Paragraphs 4, 5, and 7 of this License Agreement. Nothing in + this License Agreement shall be deemed to create any relationship of agency, + partnership, or joint venture between CNRI and Licensee. This License Agreement + does not grant permission to use CNRI trademarks or trade name in a trademark + sense to endorse or promote products or services of Licensee, or any third + party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, installing + or otherwise using Python 1.6.1, Licensee agrees to be bound by the terms and + conditions of this License Agreement. +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +Copyright © 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The +Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of Stichting Mathematisch Centrum or CWI not be used in advertising or +publicity pertaining to distribution of the software without specific, written +prior permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT +OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. +========================================= +END OF Python documentation NOTICES, INFORMATION, AND LICENSE + +%% python-functools32 NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +========================================= +END OF python-functools32 NOTICES, INFORMATION, AND LICENSE + +%% pythonVSCode NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +The MIT License (MIT) + +Copyright (c) 2015 DonJayamanne + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +========================================= +END OF pythonVSCode NOTICES, INFORMATION, AND LICENSE + +%% Sphinx NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Copyright (c) 2007-2017 by the Sphinx team (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +========================================= +END OF Sphinx NOTICES, INFORMATION, AND LICENSE + + +%% nteract NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Copyright (c) 2016, nteract contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of nteract nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +========================================= +END OF nteract NOTICES, INFORMATION, AND LICENSE + +%% less-plugin-inline-urls NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +========================================= +END OF less-plugin-inline-urls NOTICES, INFORMATION, AND LICENSE + +%% vscode-cpptools NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= +vscode-cpptools + +Copyright (c) Microsoft Corporation + +All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the Software), to deal in the +Software without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT + +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +========================================= +END OF vscode-cpptools NOTICES, INFORMATION, AND LICENSE + +%% mocha NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= + +(The MIT License) + +Copyright (c) 2011-2020 OpenJS Foundation and contributors, https://openjsf.org + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +========================================= +END OF mocha NOTICES, INFORMATION, AND LICENSE + + +%% get-pip NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= + +Copyright (c) 2008-2019 The pip developers + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +========================================= +END OF get-pip NOTICES, INFORMATION, AND LICENSE + + +%% vscode-js-debug NOTICES, INFORMATION, AND LICENSE BEGIN HERE +========================================= + +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +========================================= +END OF vscode-js-debug NOTICES, INFORMATION, AND LICENSE diff --git a/extensions/positron-python/build/.mocha-multi-reporters.config b/extensions/positron-python/build/.mocha-multi-reporters.config new file mode 100644 index 00000000000..abe46f117f5 --- /dev/null +++ b/extensions/positron-python/build/.mocha-multi-reporters.config @@ -0,0 +1,3 @@ +{ + "reporterEnabled": "./build/ci/scripts/spec_with_pid,mocha-junit-reporter" +} diff --git a/extensions/positron-python/build/.mocha.functional.json b/extensions/positron-python/build/.mocha.functional.json new file mode 100644 index 00000000000..71998902e98 --- /dev/null +++ b/extensions/positron-python/build/.mocha.functional.json @@ -0,0 +1,14 @@ +{ + "spec": "./out/test/**/*.functional.test.js", + "require": [ + "out/test/unittests.js" + ], + "exclude": "out/**/*.jsx", + "reporter": "mocha-multi-reporters", + "reporter-option": "configFile=./build/.mocha-multi-reporters.config", + "ui": "tdd", + "recursive": true, + "colors": true, + "exit": true, + "timeout": 180000 +} diff --git a/extensions/positron-python/build/.mocha.functional.perf.json b/extensions/positron-python/build/.mocha.functional.perf.json new file mode 100644 index 00000000000..d67cbb73e8f --- /dev/null +++ b/extensions/positron-python/build/.mocha.functional.perf.json @@ -0,0 +1,11 @@ +{ + "spec": "./out/test/**/*.functional.test.js", + "exclude-out": "out/**/*.jsx", + "require": ["out/test/unittests.js"], + "reporter": "spec", + "ui": "tdd", + "recursive": true, + "colors": true, + "exit": true, + "timeout": 180000 +} diff --git a/extensions/positron-python/build/.mocha.perf.config b/extensions/positron-python/build/.mocha.perf.config new file mode 100644 index 00000000000..50ae73444d0 --- /dev/null +++ b/extensions/positron-python/build/.mocha.perf.config @@ -0,0 +1,6 @@ +{ + "reporterEnabled": "spec,xunit", + "xunitReporterOptions": { + "output": "xunit-test-results.xml" + } +} diff --git a/extensions/positron-python/build/.mocha.performance.json b/extensions/positron-python/build/.mocha.performance.json new file mode 100644 index 00000000000..84dc3952cc8 --- /dev/null +++ b/extensions/positron-python/build/.mocha.performance.json @@ -0,0 +1,11 @@ +{ + "spec": "./out/test/**/*.functional.test.js", + "require": ["out/test/unittests.js"], + "reporter": "mocha-multi-reporters", + "reporter-option": "configFile=build/.mocha.perf.config", + "ui": "tdd", + "recursive": true, + "colors": true, + "exit": true, + "timeout": 30000 +} diff --git a/extensions/positron-python/build/.mocha.unittests.js.json b/extensions/positron-python/build/.mocha.unittests.js.json new file mode 100644 index 00000000000..a0bc134c7dc --- /dev/null +++ b/extensions/positron-python/build/.mocha.unittests.js.json @@ -0,0 +1,9 @@ +{ + "spec": "./out/test/**/*.unit.test.js", + "require": ["source-map-support/register", "out/test/unittests.js"], + "reporter": "mocha-multi-reporters", + "reporter-option": "configFile=build/.mocha-multi-reporters.config", + "ui": "tdd", + "recursive": true, + "colors": true +} diff --git a/extensions/positron-python/build/.mocha.unittests.json b/extensions/positron-python/build/.mocha.unittests.json new file mode 100644 index 00000000000..cb6bff95949 --- /dev/null +++ b/extensions/positron-python/build/.mocha.unittests.json @@ -0,0 +1,13 @@ +{ + "spec": "./out/test/**/*.unit.test.js", + "require": [ + "out/test/unittests.js" + ], + "exclude": "out/**/*.jsx", + "reporter": "mocha-multi-reporters", + "reporter-option": "configFile=./build/.mocha-multi-reporters.config", + "ui": "tdd", + "recursive": true, + "colors": true, + "timeout": 180000 +} diff --git a/extensions/positron-python/build/.mocha.unittests.ts.json b/extensions/positron-python/build/.mocha.unittests.ts.json new file mode 100644 index 00000000000..b20e02bfa96 --- /dev/null +++ b/extensions/positron-python/build/.mocha.unittests.ts.json @@ -0,0 +1,9 @@ +{ + "spec": "./src/test/**/*.unit.test.ts", + "require": ["ts-node/register", "out/test/unittests.js"], + "reporter": "mocha-multi-reporters", + "reporter-option": "configFile=build/.mocha-multi-reporters.config", + "ui": "tdd", + "recursive": true, + "colors": true +} diff --git a/extensions/positron-python/build/.nycrc b/extensions/positron-python/build/.nycrc new file mode 100644 index 00000000000..b92a4f36785 --- /dev/null +++ b/extensions/positron-python/build/.nycrc @@ -0,0 +1,9 @@ +{ + "extends": "@istanbuljs/nyc-config-typescript", + "all": true, + "include": [ + "src/client/**/*.ts", "out/client/**/*.js" + ], + "exclude": ["src/test/**/*.ts", "out/test/**/*.js"], + "exclude-node-modules": true +} diff --git a/extensions/positron-python/build/azure-pipeline.pre-release.yml b/extensions/positron-python/build/azure-pipeline.pre-release.yml new file mode 100644 index 00000000000..bb52f983d02 --- /dev/null +++ b/extensions/positron-python/build/azure-pipeline.pre-release.yml @@ -0,0 +1,81 @@ +# Run on a schedule +trigger: none +pr: none + +schedules: + - cron: '0 10 * * 1-5' # 10AM UTC (2AM PDT) MON-FRI (VS Code Pre-release builds at 9PM PDT) + displayName: Nightly Pre-Release Schedule + always: false # only run if there are source code changes + branches: + include: + - main + +resources: + repositories: + - repository: templates + type: github + name: microsoft/vscode-engineering + ref: main + endpoint: Monaco + +parameters: + - name: publishExtension + displayName: 🚀 Publish Extension + type: boolean + default: false + +extends: + template: azure-pipelines/extension/pre-release.yml@templates + parameters: + publishExtension: ${{ parameters.publishExtension }} + ghCreateTag: false + l10nSourcePaths: ./src/client + buildSteps: + - task: NodeTool@0 + inputs: + versionSpec: '18.17.1' + displayName: Select Node version + + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.8' + addToPath: true + architecture: 'x64' + displayName: Select Python version + + - script: npm ci + displayName: Install NPM dependencies + + - script: python -m pip install -U pip + displayName: Upgrade pip + + - script: python -m pip install wheel + displayName: Install wheel + + - script: | + python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt + python ./pythonFiles/install_debugpy.py + python ./pythonFiles/download_get_pip.py + displayName: Install debugpy and get-pip.py + + - script: | + python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/python --implementation py -r ./requirements.txt + displayName: Install Python dependencies + + - script: | + python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/jedilsp --implementation py --platform any --abi none -r ./pythonFiles/jedilsp_requirements/requirements.txt + displayName: Install Jedi Language Server + + - script: | + python ./build/update_ext_version.py --for-publishing + displayName: Update build number + + - script: | + python ./build/update_package_file.py + displayName: Update telemetry in package.json + + - script: npm run addExtensionPackDependencies + displayName: Update optional extension dependencies + + - script: gulp prePublishBundle + displayName: Build diff --git a/extensions/positron-python/build/azure-pipeline.stable.yml b/extensions/positron-python/build/azure-pipeline.stable.yml new file mode 100644 index 00000000000..02f8bd38cf8 --- /dev/null +++ b/extensions/positron-python/build/azure-pipeline.stable.yml @@ -0,0 +1,89 @@ +trigger: none +# branches: +# include: +# - release* +# tags: +# include: ['*'] +pr: none + +resources: + repositories: + - repository: templates + type: github + name: microsoft/vscode-engineering + ref: main + endpoint: Monaco + +parameters: + - name: publishExtension + displayName: 🚀 Publish Extension + type: boolean + default: false + +extends: + template: azure-pipelines/extension/stable.yml@templates + parameters: + publishExtension: ${{ parameters.publishExtension }} + l10nSourcePaths: ./src/client + buildSteps: + - task: NodeTool@0 + inputs: + versionSpec: '18.17.1' + displayName: Select Node version + + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.8' + addToPath: true + architecture: 'x64' + displayName: Select Python version + + - script: npm ci + displayName: Install NPM dependencies + + - script: python -m pip install -U pip + displayName: Upgrade pip + + - script: python -m pip install wheel + displayName: Install wheel + + - script: | + python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt + python ./pythonFiles/install_debugpy.py + python ./pythonFiles/download_get_pip.py + displayName: Install debugpy and get-pip.py + + - script: | + python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/python --implementation py -r ./requirements.txt + displayName: Install Python dependencies + + - script: | + python -m pip install --no-deps --require-hashes --only-binary :all: -t ./pythonFiles/lib/jedilsp --implementation py --platform any --abi none -r ./pythonFiles/jedilsp_requirements/requirements.txt + displayName: Install Jedi Language Server + + - script: | + python ./build/update_ext_version.py --release --for-publishing + displayName: Update build number + + - script: | + python ./build/update_package_file.py + displayName: Update telemetry in package.json + + - script: npm run addExtensionPackDependencies + displayName: Update optional extension dependencies + + - script: gulp prePublishBundle + displayName: Build + tsa: + enabled: true + options: + codebaseName: 'devdiv_$(Build.Repository.Name)' + serviceTreeID: '6e6194bc-7baa-4486-86d0-9f5419626d46' + instanceUrl: 'https://devdiv.visualstudio.com/defaultcollection' + projectName: 'DevDiv' + areaPath: "DevDiv\\VS Code (compliance tracking only)\\Visual Studio Code Python Extensions" + notificationAliases: + - 'stbatt@microsoft.com' + - 'lszomoru@microsoft.com' + - 'brcan@microsoft.com' + - 'kanadig@microsoft.com' diff --git a/extensions/positron-python/build/azure-pipelines/pipeline.yml b/extensions/positron-python/build/azure-pipelines/pipeline.yml new file mode 100644 index 00000000000..adb2fa5d1c3 --- /dev/null +++ b/extensions/positron-python/build/azure-pipelines/pipeline.yml @@ -0,0 +1,58 @@ +############################################################################################### +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +############################################################################################### +name: $(Date:yyyyMMdd)$(Rev:.r) + +trigger: none + +pr: none + +resources: + repositories: + - repository: templates + type: github + name: microsoft/vscode-engineering + ref: main + endpoint: Monaco + +parameters: + - name: quality + displayName: Quality + type: string + default: latest + values: + - latest + - next + - name: publishPythonApi + displayName: 🚀 Publish pythonExtensionApi + type: boolean + default: false + +extends: + template: azure-pipelines/npm-package/pipeline.yml@templates + parameters: + npmPackages: + - name: pythonExtensionApi + testPlatforms: + - name: Linux + nodeVersions: + - 18.17.1 + - name: MacOS + nodeVersions: + - 18.17.1 + - name: Windows + nodeVersions: + - 18.17.1 + testSteps: + - template: /build/azure-pipelines/templates/test-steps.yml@self + parameters: + package: pythonExtensionApi + buildSteps: + - template: /build/azure-pipelines/templates/pack-steps.yml@self + parameters: + package: pythonExtensionApi + ghTagPrefix: release/pythonExtensionApi/ + tag: ${{ parameters.quality }} + publishPackage: ${{ parameters.publishPythonApi }} + workingDirectory: $(Build.SourcesDirectory)/pythonExtensionApi diff --git a/extensions/positron-python/build/azure-pipelines/templates/pack-steps.yml b/extensions/positron-python/build/azure-pipelines/templates/pack-steps.yml new file mode 100644 index 00000000000..97037efb59b --- /dev/null +++ b/extensions/positron-python/build/azure-pipelines/templates/pack-steps.yml @@ -0,0 +1,14 @@ +############################################################################################### +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +############################################################################################### +parameters: +- name: package + +steps: + - script: npm install --root-only + workingDirectory: $(Build.SourcesDirectory) + displayName: Install root dependencies + - script: npm install + workingDirectory: $(Build.SourcesDirectory)/${{ parameters.package }} + displayName: Install package dependencies diff --git a/extensions/positron-python/build/azure-pipelines/templates/test-steps.yml b/extensions/positron-python/build/azure-pipelines/templates/test-steps.yml new file mode 100644 index 00000000000..15eb3db6384 --- /dev/null +++ b/extensions/positron-python/build/azure-pipelines/templates/test-steps.yml @@ -0,0 +1,23 @@ +############################################################################################### +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +############################################################################################### +parameters: +- name: package + type: string +- name: script + type: string + default: 'all:publish' + +steps: + - script: npm install --root-only + workingDirectory: $(Build.SourcesDirectory) + displayName: Install root dependencies + - bash: | + /usr/bin/Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 & + echo ">>> Started xvfb" + displayName: Start xvfb + condition: eq(variables['Agent.OS'], 'Linux') + - script: npm run ${{ parameters.script }} + workingDirectory: $(Build.SourcesDirectory)/${{ parameters.package }} + displayName: Verify package diff --git a/extensions/positron-python/build/build-install-requirements.txt b/extensions/positron-python/build/build-install-requirements.txt new file mode 100644 index 00000000000..8baaa59ded6 --- /dev/null +++ b/extensions/positron-python/build/build-install-requirements.txt @@ -0,0 +1,2 @@ +# Requirements needed to run install_debugpy.py and download_get_pip.py +packaging diff --git a/extensions/positron-python/build/ci/addEnvPath.py b/extensions/positron-python/build/ci/addEnvPath.py new file mode 100644 index 00000000000..abad9ec3b5c --- /dev/null +++ b/extensions/positron-python/build/ci/addEnvPath.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +#Adds the virtual environment's executable path to json file + +import json,sys +import os.path +jsonPath = sys.argv[1] +key = sys.argv[2] + +if os.path.isfile(jsonPath): + with open(jsonPath, 'r') as read_file: + data = json.load(read_file) +else: + directory = os.path.dirname(jsonPath) + if not os.path.exists(directory): + os.makedirs(directory) + with open(jsonPath, 'w+') as read_file: + data = {} + data = {} +with open(jsonPath, 'w') as outfile: + if key == 'condaExecPath': + data[key] = sys.argv[3] + else: + data[key] = sys.executable + json.dump(data, outfile, sort_keys=True, indent=4) diff --git a/extensions/positron-python/build/ci/conda_base.yml b/extensions/positron-python/build/ci/conda_base.yml new file mode 100644 index 00000000000..a1b589e38a3 --- /dev/null +++ b/extensions/positron-python/build/ci/conda_base.yml @@ -0,0 +1 @@ +pip diff --git a/extensions/positron-python/build/ci/conda_env_1.yml b/extensions/positron-python/build/ci/conda_env_1.yml new file mode 100644 index 00000000000..e9d08d0820a --- /dev/null +++ b/extensions/positron-python/build/ci/conda_env_1.yml @@ -0,0 +1,4 @@ +name: conda_env_1 +dependencies: + - python=3.8 + - pip diff --git a/extensions/positron-python/build/ci/conda_env_2.yml b/extensions/positron-python/build/ci/conda_env_2.yml new file mode 100644 index 00000000000..80b946c3cc1 --- /dev/null +++ b/extensions/positron-python/build/ci/conda_env_2.yml @@ -0,0 +1,4 @@ +name: conda_env_2 +dependencies: + - python=3.8 + - pip diff --git a/extensions/positron-python/build/ci/pyproject.toml b/extensions/positron-python/build/ci/pyproject.toml new file mode 100644 index 00000000000..6335f021a63 --- /dev/null +++ b/extensions/positron-python/build/ci/pyproject.toml @@ -0,0 +1,8 @@ +[tool.poetry] +name = "poetry-tutorial-project" +version = "0.1.0" +description = "" +authors = [""] + +[tool.poetry.dependencies] +python = "*" diff --git a/extensions/positron-python/build/ci/scripts/spec_with_pid.js b/extensions/positron-python/build/ci/scripts/spec_with_pid.js new file mode 100644 index 00000000000..9815feaac76 --- /dev/null +++ b/extensions/positron-python/build/ci/scripts/spec_with_pid.js @@ -0,0 +1,102 @@ +'use strict'; + +/** + * @module Spec + */ +/** + * Module dependencies. + */ + +const Base = require('mocha/lib/reporters/base'); +const { constants } = require('mocha/lib/runner'); + +const { EVENT_RUN_BEGIN } = constants; +const { EVENT_RUN_END } = constants; +const { EVENT_SUITE_BEGIN } = constants; +const { EVENT_SUITE_END } = constants; +const { EVENT_TEST_FAIL } = constants; +const { EVENT_TEST_PASS } = constants; +const { EVENT_TEST_PENDING } = constants; +const { inherits } = require('mocha/lib/utils'); + +const { color } = Base; + +const prefix = process.env.VSC_PYTHON_CI_TEST_PARALLEL ? `${process.pid} ` : ''; + +/** + * Constructs a new `Spec` reporter instance. + * + * @public + * @class + * @memberof Mocha.reporters + * @extends Mocha.reporters.Base + * @param {Runner} runner - Instance triggers reporter actions. + * @param {Object} [options] - runner options + */ +function Spec(runner, options) { + Base.call(this, runner, options); + + let indents = 0; + let n = 0; + + function indent() { + return Array(indents).join(' '); + } + + runner.on(EVENT_RUN_BEGIN, () => { + Base.consoleLog(); + }); + + runner.on(EVENT_SUITE_BEGIN, (suite) => { + indents += 1; + Base.consoleLog(color('suite', `${prefix}%s%s`), indent(), suite.title); + }); + + runner.on(EVENT_SUITE_END, () => { + indents -= 1; + if (indents === 1) { + Base.consoleLog(); + } + }); + + runner.on(EVENT_TEST_PENDING, (test) => { + const fmt = indent() + color('pending', `${prefix} %s`); + Base.consoleLog(fmt, test.title); + }); + + runner.on(EVENT_TEST_PASS, (test) => { + let fmt; + if (test.speed === 'fast') { + fmt = indent() + color('checkmark', prefix + Base.symbols.ok) + color('pass', ' %s'); + Base.consoleLog(fmt, test.title); + } else { + fmt = + indent() + + color('checkmark', prefix + Base.symbols.ok) + + color('pass', ' %s') + + color(test.speed, ' (%dms)'); + Base.consoleLog(fmt, test.title, test.duration); + } + }); + + runner.on(EVENT_TEST_FAIL, (test) => { + n += 1; + Base.consoleLog(indent() + color('fail', `${prefix}%d) %s`), n, test.title); + }); + + runner.once(EVENT_RUN_END, this.epilogue.bind(this)); +} + +/** + * Inherit from `Base.prototype`. + */ +inherits(Spec, Base); + +Spec.description = 'hierarchical & verbose [default]'; + +/** + * Expose `Spec`. + */ + +exports = Spec; +module.exports = exports; diff --git a/extensions/positron-python/build/ci/static_analysis/policheck/exceptions.mdb b/extensions/positron-python/build/ci/static_analysis/policheck/exceptions.mdb new file mode 100644 index 00000000000..d4a413f897e Binary files /dev/null and b/extensions/positron-python/build/ci/static_analysis/policheck/exceptions.mdb differ diff --git a/extensions/positron-python/build/constants.js b/extensions/positron-python/build/constants.js new file mode 100644 index 00000000000..da6139ff1c9 --- /dev/null +++ b/extensions/positron-python/build/constants.js @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +const util = require('./util'); + +exports.ExtensionRootDir = util.ExtensionRootDir; +// This is a list of files that existed before MS got the extension. +exports.existingFiles = util.getListOfFiles('existingFiles.json'); +exports.contributedFiles = util.getListOfFiles('contributedFiles.json'); +exports.isWindows = /^win/.test(process.platform); +// --- Start Positron --- +exports.isCI = + process.env.GITHUB_ACTIONS === 'true' || process.env.TRAVIS === 'true' || process.env.TF_BUILD !== undefined; +// --- End Positron --- diff --git a/extensions/positron-python/build/contributedFiles.json b/extensions/positron-python/build/contributedFiles.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/extensions/positron-python/build/contributedFiles.json @@ -0,0 +1 @@ +[] diff --git a/extensions/positron-python/build/existingFiles.json b/extensions/positron-python/build/existingFiles.json new file mode 100644 index 00000000000..1f5acc727d8 --- /dev/null +++ b/extensions/positron-python/build/existingFiles.json @@ -0,0 +1,556 @@ +[ + "src/client/activation/activationService.ts", + "src/client/activation/downloadChannelRules.ts", + "src/client/activation/downloader.ts", + "src/client/activation/hashVerifier.ts", + "src/client/activation/interpreterDataService.ts", + "src/client/activation/jedi.ts", + "src/client/activation/languageServer/languageServer.ts", + "src/client/activation/languageServer/languageServerFolderService.ts", + "src/client/activation/languageServer/languageServerHashes.ts", + "src/client/activation/languageServer/languageServerPackageRepository.ts", + "src/client/activation/languageServer/languageServerPackageService.ts", + "src/client/activation/platformData.ts", + "src/client/activation/progress.ts", + "src/client/activation/serviceRegistry.ts", + "src/client/activation/types.ts", + "src/client/api.ts", + "src/client/application/diagnostics/applicationDiagnostics.ts", + "src/client/application/diagnostics/base.ts", + "src/client/application/diagnostics/checks/envPathVariable.ts", + "src/client/application/diagnostics/checks/invalidDebuggerType.ts", + "src/client/application/diagnostics/checks/invalidPythonPathInDebugger.ts", + "src/client/application/diagnostics/checks/powerShellActivation.ts", + "src/client/application/diagnostics/checks/pythonInterpreter.ts", + "src/client/application/diagnostics/commands/base.ts", + "src/client/application/diagnostics/commands/execVSCCommand.ts", + "src/client/application/diagnostics/commands/factory.ts", + "src/client/application/diagnostics/commands/ignore.ts", + "src/client/application/diagnostics/commands/launchBrowser.ts", + "src/client/application/diagnostics/commands/types.ts", + "src/client/application/diagnostics/constants.ts", + "src/client/application/diagnostics/filter.ts", + "src/client/application/diagnostics/promptHandler.ts", + "src/client/application/diagnostics/serviceRegistry.ts", + "src/client/application/diagnostics/types.ts", + "src/client/application/serviceRegistry.ts", + "src/client/application/types.ts", + "src/client/common/application/applicationEnvironment.ts", + "src/client/common/application/applicationShell.ts", + "src/client/common/application/commandManager.ts", + "src/client/common/application/debugService.ts", + "src/client/common/application/documentManager.ts", + "src/client/common/application/extensions.ts", + "src/client/common/application/terminalManager.ts", + "src/client/common/application/types.ts", + "src/client/common/application/workspace.ts", + "src/client/common/configSettingMonitor.ts", + "src/client/common/configSettings.ts", + "src/client/common/configuration/service.ts", + "src/client/common/constants.ts", + "src/client/common/contextKey.ts", + "src/client/common/editor.ts", + "src/client/common/envFileParser.ts", + "src/client/common/errors/errorUtils.ts", + "src/client/common/errors/moduleNotInstalledError.ts", + "src/client/common/extensions.ts", + "src/client/common/featureDeprecationManager.ts", + "src/client/common/helpers.ts", + "src/client/common/installer/channelManager.ts", + "src/client/common/installer/condaInstaller.ts", + "src/client/common/installer/moduleInstaller.ts", + "src/client/common/installer/pipEnvInstaller.ts", + "src/client/common/installer/pipInstaller.ts", + "src/client/common/installer/productInstaller.ts", + "src/client/common/installer/productNames.ts", + "src/client/common/installer/productPath.ts", + "src/client/common/installer/productService.ts", + "src/client/common/installer/serviceRegistry.ts", + "src/client/common/installer/types.ts", + "src/client/common/logger.ts", + "src/client/common/markdown/restTextConverter.ts", + "src/client/common/net/browser.ts", + "src/client/common/net/httpClient.ts", + "src/client/common/net/socket/socketCallbackHandler.ts", + "src/client/common/net/socket/socketServer.ts", + "src/client/common/net/socket/SocketStream.ts", + "src/client/common/nuget/azureBlobStoreNugetRepository.ts", + "src/client/common/nuget/nugetRepository.ts", + "src/client/common/nuget/nugetService.ts", + "src/client/common/nuget/types.ts", + "src/client/common/open.ts", + "src/client/common/persistentState.ts", + "src/client/common/platform/constants.ts", + "src/client/common/platform/fileSystem.ts", + "src/client/common/platform/osinfo.ts", + "src/client/common/platform/pathUtils.ts", + "src/client/common/platform/platformService.ts", + "src/client/common/platform/registry.ts", + "src/client/common/platform/serviceRegistry.ts", + "src/client/common/platform/types.ts", + "src/client/common/process/constants.ts", + "src/client/common/process/currentProcess.ts", + "src/client/common/process/decoder.ts", + "src/client/common/process/proc.ts", + "src/client/common/process/processFactory.ts", + "src/client/common/process/pythonExecutionFactory.ts", + "src/client/common/process/pythonProcess.ts", + "src/client/common/process/pythonToolService.ts", + "src/client/common/process/serviceRegistry.ts", + "src/client/common/process/types.ts", + "src/client/common/serviceRegistry.ts", + "src/client/common/terminal/activator/base.ts", + "src/client/common/terminal/activator/index.ts", + "src/client/common/terminal/activator/powershellFailedHandler.ts", + "src/client/common/terminal/commandPrompt.ts", + "src/client/common/terminal/environmentActivationProviders/baseActivationProvider.ts", + "src/client/common/terminal/environmentActivationProviders/bash.ts", + "src/client/common/terminal/environmentActivationProviders/commandPrompt.ts", + "src/client/common/terminal/environmentActivationProviders/condaActivationProvider.ts", + "src/client/common/terminal/environmentActivationProviders/pyenvActivationProvider.ts", + "src/client/common/terminal/factory.ts", + "src/client/common/terminal/helper.ts", + "src/client/common/terminal/service.ts", + "src/client/common/terminal/types.ts", + "src/client/common/types.ts", + "src/client/common/util.ts", + "src/client/common/utils/async.ts", + "src/client/common/utils/decorators.ts", + "src/client/common/utils/enum.ts", + "src/client/common/utils/fs.ts", + "src/client/common/utils/localize.ts", + "src/client/common/utils/logging.ts", + "src/client/common/utils/misc.ts", + "src/client/common/utils/platform.ts", + "src/client/common/utils/random.ts", + "src/client/common/utils/stopWatch.ts", + "src/client/common/utils/string.ts", + "src/client/common/utils/sysTypes.ts", + "src/client/common/utils/text.ts", + "src/client/common/utils/version.ts", + "src/client/common/variables/environment.ts", + "src/client/common/variables/environmentVariablesProvider.ts", + "src/client/common/variables/serviceRegistry.ts", + "src/client/common/variables/systemVariables.ts", + "src/client/common/variables/sysTypes.ts", + "src/client/common/variables/types.ts", + "src/client/debugger/constants.ts", + "src/client/debugger/extension/banner.ts", + "src/client/debugger/extension/configuration/baseProvider.ts", + "src/client/debugger/extension/configuration/configurationProviderUtils.ts", + "src/client/debugger/extension/configuration/pythonV2Provider.ts", + "src/client/debugger/extension/configuration/types.ts", + "src/client/debugger/extension/hooks/childProcessAttachHandler.ts", + "src/client/debugger/extension/hooks/childProcessAttachService.ts", + "src/client/debugger/extension/hooks/constants.ts", + "src/client/debugger/extension/hooks/eventHandlerDispatcher.ts", + "src/client/debugger/extension/hooks/processTerminationHandler.ts", + "src/client/debugger/extension/hooks/processTerminationService.ts", + "src/client/debugger/extension/hooks/types.ts", + "src/client/debugger/extension/serviceRegistry.ts", + "src/client/debugger/extension/types.ts", + "src/client/debugger/types.ts", + "src/client/extension.ts", + "src/client/formatters/autoPep8Formatter.ts", + "src/client/formatters/baseFormatter.ts", + "src/client/formatters/blackFormatter.ts", + "src/client/formatters/dummyFormatter.ts", + "src/client/formatters/helper.ts", + "src/client/formatters/lineFormatter.ts", + "src/client/formatters/serviceRegistry.ts", + "src/client/formatters/types.ts", + "src/client/formatters/yapfFormatter.ts", + "src/client/interpreter/configuration/interpreterComparer.ts", + "src/client/interpreter/configuration/interpreterSelector.ts", + "src/client/interpreter/configuration/pythonPathUpdaterService.ts", + "src/client/interpreter/configuration/pythonPathUpdaterServiceFactory.ts", + "src/client/interpreter/configuration/services/globalUpdaterService.ts", + "src/client/interpreter/configuration/services/workspaceFolderUpdaterService.ts", + "src/client/interpreter/configuration/services/workspaceUpdaterService.ts", + "src/client/interpreter/configuration/types.ts", + "src/client/interpreter/contracts.ts", + "src/client/interpreter/display/index.ts", + "src/client/interpreter/helpers.ts", + "src/client/interpreter/interpreterService.ts", + "src/client/interpreter/interpreterVersion.ts", + "src/client/interpreter/locators/helpers.ts", + "src/client/interpreter/locators/index.ts", + "src/client/interpreter/locators/services/baseVirtualEnvService.ts", + "src/client/interpreter/locators/services/cacheableLocatorService.ts", + "src/client/interpreter/locators/services/conda.ts", + "src/client/interpreter/locators/services/condaEnvFileService.ts", + "src/client/interpreter/locators/services/condaEnvService.ts", + "src/client/interpreter/locators/services/condaHelper.ts", + "src/client/interpreter/locators/services/condaService.ts", + "src/client/interpreter/locators/services/currentPathService.ts", + "src/client/interpreter/locators/services/globalVirtualEnvService.ts", + "src/client/interpreter/locators/services/KnownPathsService.ts", + "src/client/interpreter/locators/services/pipEnvService.ts", + "src/client/interpreter/locators/services/windowsRegistryService.ts", + "src/client/interpreter/locators/services/workspaceVirtualEnvService.ts", + "src/client/interpreter/serviceRegistry.ts", + "src/client/interpreter/virtualEnvs/index.ts", + "src/client/interpreter/virtualEnvs/types.ts", + "src/client/ioc/container.ts", + "src/client/ioc/index.ts", + "src/client/ioc/serviceManager.ts", + "src/client/ioc/types.ts", + "src/client/language/braceCounter.ts", + "src/client/language/characters.ts", + "src/client/language/characterStream.ts", + "src/client/language/iterableTextRange.ts", + "src/client/language/textBuilder.ts", + "src/client/language/textIterator.ts", + "src/client/language/textRangeCollection.ts", + "src/client/language/tokenizer.ts", + "src/client/language/types.ts", + "src/client/language/unicode.ts", + "src/client/languageServices/jediProxyFactory.ts", + "src/client/languageServices/proposeLanguageServerBanner.ts", + "src/client/linters/bandit.ts", + "src/client/linters/baseLinter.ts", + "src/client/linters/errorHandlers/baseErrorHandler.ts", + "src/client/linters/errorHandlers/errorHandler.ts", + "src/client/linters/errorHandlers/notInstalled.ts", + "src/client/linters/errorHandlers/standard.ts", + "src/client/linters/flake8.ts", + "src/client/linters/linterCommands.ts", + "src/client/linters/linterInfo.ts", + "src/client/linters/linterManager.ts", + "src/client/linters/lintingEngine.ts", + "src/client/linters/mypy.ts", + "src/client/linters/pycodestyle.ts", + "src/client/linters/prospector.ts", + "src/client/linters/pydocstyle.ts", + "src/client/linters/pylama.ts", + "src/client/linters/pylint.ts", + "src/client/linters/serviceRegistry.ts", + "src/client/linters/types.ts", + "src/client/providers/codeActionsProvider.ts", + "src/client/providers/completionProvider.ts", + "src/client/providers/completionSource.ts", + "src/client/providers/definitionProvider.ts", + "src/client/providers/docStringFoldingProvider.ts", + "src/client/providers/formatProvider.ts", + "src/client/providers/hoverProvider.ts", + "src/client/providers/importSortProvider.ts", + "src/client/providers/itemInfoSource.ts", + "src/client/providers/jediProxy.ts", + "src/client/providers/linterProvider.ts", + "src/client/providers/objectDefinitionProvider.ts", + "src/client/providers/providerUtilities.ts", + "src/client/providers/referenceProvider.ts", + "src/client/providers/renameProvider.ts", + "src/client/providers/replProvider.ts", + "src/client/providers/serviceRegistry.ts", + "src/client/providers/signatureProvider.ts", + "src/client/providers/simpleRefactorProvider.ts", + "src/client/providers/symbolProvider.ts", + "src/client/providers/terminalProvider.ts", + "src/client/providers/types.ts", + "src/client/refactor/contracts.ts", + "src/client/refactor/proxy.ts", + "src/client/telemetry/constants.ts", + "src/client/telemetry/index.ts", + "src/client/telemetry/types.ts", + "src/client/telemetry/vscode-extension-telemetry.d.ts", + "src/client/terminals/activation.ts", + "src/client/terminals/codeExecution/codeExecutionManager.ts", + "src/client/terminals/codeExecution/djangoContext.ts", + "src/client/terminals/codeExecution/djangoShellCodeExecution.ts", + "src/client/terminals/codeExecution/helper.ts", + "src/client/terminals/codeExecution/repl.ts", + "src/client/terminals/codeExecution/terminalCodeExecution.ts", + "src/client/terminals/serviceRegistry.ts", + "src/client/terminals/types.ts", + "src/client/typeFormatters/blockFormatProvider.ts", + "src/client/typeFormatters/codeBlockFormatProvider.ts", + "src/client/typeFormatters/contracts.ts", + "src/client/typeFormatters/dispatcher.ts", + "src/client/typeFormatters/onEnterFormatter.ts", + "src/client/testing/codeLenses/main.ts", + "src/client/testing/codeLenses/testFiles.ts", + "src/client/testing/common/argumentsHelper.ts", + "src/client/testing/common/constants.ts", + "src/client/testing/common/debugLauncher.ts", + "src/client/testing/common/managers/baseTestManager.ts", + "src/client/testing/common/managers/testConfigurationManager.ts", + "src/client/testing/common/runner.ts", + "src/client/testing/common/services/configSettingService.ts", + "src/client/testing/common/services/storageService.ts", + "src/client/testing/common/services/testManagerService.ts", + "src/client/testing/common/services/testResultsService.ts", + "src/client/testing/common/services/workspaceTestManagerService.ts", + "src/client/testing/common/testUtils.ts", + "src/client/testing/common/testVisitors/flatteningVisitor.ts", + "src/client/testing/common/testVisitors/folderGenerationVisitor.ts", + "src/client/testing/common/testVisitors/resultResetVisitor.ts", + "src/client/testing/common/types.ts", + "src/client/testing/common/xUnitParser.ts", + "src/client/testing/configuration.ts", + "src/client/testing/configurationFactory.ts", + "src/client/testing/display/main.ts", + "src/client/testing/display/picker.ts", + "src/client/testing/main.ts", + "src/client/testing/nosetest/main.ts", + "src/client/testing/nosetest/runner.ts", + "src/client/testing/nosetest/services/argsService.ts", + "src/client/testing/nosetest/services/discoveryService.ts", + "src/client/testing/nosetest/services/parserService.ts", + "src/client/testing/nosetest/testConfigurationManager.ts", + "src/client/testing/pytest/main.ts", + "src/client/testing/pytest/runner.ts", + "src/client/testing/pytest/services/argsService.ts", + "src/client/testing/pytest/services/discoveryService.ts", + "src/client/testing/pytest/services/parserService.ts", + "src/client/testing/pytest/testConfigurationManager.ts", + "src/client/testing/serviceRegistry.ts", + "src/client/testing/types.ts", + "src/client/testing/unittest/helper.ts", + "src/client/testing/unittest/main.ts", + "src/client/testing/unittest/runner.ts", + "src/client/testing/unittest/services/argsService.ts", + "src/client/testing/unittest/services/discoveryService.ts", + "src/client/testing/unittest/services/parserService.ts", + "src/client/testing/unittest/socketServer.ts", + "src/client/testing/unittest/testConfigurationManager.ts", + "src/client/workspaceSymbols/contracts.ts", + "src/client/workspaceSymbols/generator.ts", + "src/client/workspaceSymbols/main.ts", + "src/client/workspaceSymbols/parser.ts", + "src/client/workspaceSymbols/provider.ts", + "src/server/dummy.ts", + "src/test/aaFirstTest/aaFirstTest.test.ts", + "src/test/activation/activationService.unit.test.ts", + "src/test/activation/downloadChannelRules.unit.test.ts", + "src/test/activation/downloader.unit.test.ts", + "src/test/activation/excludeFiles.ls.test.ts", + "src/test/activation/languageServer/languageServer.unit.test.ts", + "src/test/activation/languageServer/languageServerFolderService.unit.test.ts", + "src/test/activation/languageServer/languageServerPackageRepository.unit.test.ts", + "src/test/activation/languageServer/languageServerPackageService.test.ts", + "src/test/activation/languageServer/languageServerPackageService.unit.test.ts", + "src/test/activation/platformData.unit.test.ts", + "src/test/application/diagnostics/applicationDiagnostics.unit.test.ts", + "src/test/application/diagnostics/checks/envPathVariable.unit.test.ts", + "src/test/application/diagnostics/checks/invalidPythonPathInDebugger.unit.test.ts", + "src/test/application/diagnostics/checks/powerShellActivation.unit.test.ts", + "src/test/application/diagnostics/checks/pythonInterpreter.unit.test.ts", + "src/test/application/diagnostics/commands/factory.unit.test.ts", + "src/test/application/diagnostics/commands/ignore.unit.test.ts", + "src/test/application/diagnostics/commands/launchBrowser.unit.test.ts", + "src/test/application/diagnostics/filter.unit.test.ts", + "src/test/application/diagnostics/promptHandler.unit.test.ts", + "src/test/autocomplete/base.test.ts", + "src/test/autocomplete/pep484.test.ts", + "src/test/autocomplete/pep526.test.ts", + "src/test/ciConstants.ts", + "src/test/common.ts", + "src/test/common/configSettings.multiroot.test.ts", + "src/test/common/configSettings.test.ts", + "src/test/common/configSettings.unit.test.ts", + "src/test/common/configuration/service.test.ts", + "src/test/common/extensions.unit.test.ts", + "src/test/common/featureDeprecationManager.unit.test.ts", + "src/test/common/helpers.test.ts", + "src/test/common/installer.test.ts", + "src/test/common/installer/installer.invalidPath.unit.test.ts", + "src/test/common/installer/installer.unit.test.ts", + "src/test/common/installer/moduleInstaller.unit.test.ts", + "src/test/common/installer/productPath.unit.test.ts", + "src/test/common/localize.unit.test.ts", + "src/test/common/misc.test.ts", + "src/test/common/moduleInstaller.test.ts", + "src/test/common/net/httpClient.unit.test.ts", + "src/test/common/nuget/azureBobStoreRepository.test.ts", + "src/test/common/nuget/nugetRepository.unit.test.ts", + "src/test/common/nuget/nugetService.unit.test.ts", + "src/test/common/platform/filesystem.unit.test.ts", + "src/test/common/platform/osinfo.unit.test.ts", + "src/test/common/platform/platformService.unit.test.ts", + "src/test/common/process/currentProcess.test.ts", + "src/test/common/process/decoder.test.ts", + "src/test/common/process/execFactory.test.ts", + "src/test/common/process/proc.exec.test.ts", + "src/test/common/process/proc.observable.test.ts", + "src/test/common/process/proc.unit.test.ts", + "src/test/common/process/pythonProc.simple.multiroot.test.ts", + "src/test/common/socketCallbackHandler.test.ts", + "src/test/common/socketStream.test.ts", + "src/test/common/terminals/activation.bash.unit.test.ts", + "src/test/common/terminals/activation.commandPrompt.unit.test.ts", + "src/test/common/terminals/activation.nushell.unit.test.ts", + "src/test/common/terminals/activation.conda.unit.test.ts", + "src/test/common/terminals/activation.unit.test.ts", + "src/test/common/terminals/activator/base.unit.test.ts", + "src/test/common/terminals/activator/index.unit.test.ts", + "src/test/common/terminals/activator/powerShellFailedHandler.unit.test.ts", + "src/test/common/terminals/commandPrompt.unit.test.ts", + "src/test/common/terminals/factory.unit.test.ts", + "src/test/common/terminals/helper.activation.unit.test.ts", + "src/test/common/terminals/helper.unit.test.ts", + "src/test/common/terminals/pyenvActivationProvider.unit.test.ts", + "src/test/common/terminals/service.unit.test.ts", + "src/test/common/utils/async.unit.test.ts", + "src/test/common/utils/platform.unit.test.ts", + "src/test/common/utils/string.unit.test.ts", + "src/test/common/utils/text.unit.test.ts", + "src/test/common/utils/version.unit.test.ts", + "src/test/common/variables/envVarsProvider.multiroot.test.ts", + "src/test/common/variables/envVarsService.test.ts", + "src/test/configuration/interpreterSelector.unit.test.ts", + "src/test/constants.ts", + "src/test/core.ts", + "src/test/debugger/capabilities.test.ts", + "src/test/debugger/common/constants.ts", + "src/test/debugger/common/debugStreamProvider.test.ts", + "src/test/debugger/common/protocoloLogger.test.ts", + "src/test/debugger/common/protocolparser.test.ts", + "src/test/debugger/common/protocolWriter.test.ts", + "src/test/debugger/debugClient.ts", + "src/test/debugger/envVars.test.ts", + "src/test/debugger/extension/banner.unit.test.ts", + "src/test/debugger/extension/configProvider/provider.attach.unit.test.ts", + "src/test/debugger/extension/configProvider/provider.unit.test.ts", + "src/test/debugger/extension/hooks/childProcessAttachHandler.unit.test.ts", + "src/test/debugger/extension/hooks/childProcessAttachService.unit.test.ts", + "src/test/debugger/extension/hooks/processTerminationHandler.unit.test.ts", + "src/test/debugger/extension/hooks/processTerminationService.test.ts", + "src/test/debugger/launcherScriptProvider.unit.test.ts", + "src/test/debugger/misc.test.ts", + "src/test/debugger/portAndHost.test.ts", + "src/test/debugger/run.test.ts", + "src/test/debugger/utils.ts", + "src/test/debuggerTest.ts", + "src/test/definitions/hover.jedi.test.ts", + "src/test/definitions/hover.ls.test.ts", + "src/test/definitions/navigation.test.ts", + "src/test/definitions/parallel.jedi.test.ts", + "src/test/definitions/parallel.ls.test.ts", + "src/test/format/extension.dispatch.test.ts", + "src/test/format/extension.format.test.ts", + "src/test/format/extension.formatOnSave.test.ts", + "src/test/format/extension.lineFormatter.test.ts", + "src/test/format/extension.onEnterFormat.test.ts", + "src/test/format/extension.onTypeFormat.test.ts", + "src/test/format/extension.sort.test.ts", + "src/test/format/format.helper.test.ts", + "src/test/index.ts", + "src/test/initialize.ts", + "src/test/install/channelManager.channels.test.ts", + "src/test/install/channelManager.messages.test.ts", + "src/test/interpreters/condaEnvFileService.unit.test.ts", + "src/test/interpreters/condaEnvService.unit.test.ts", + "src/test/interpreters/condaHelper.unit.test.ts", + "src/test/interpreters/condaService.unit.test.ts", + "src/test/interpreters/currentPathService.unit.test.ts", + "src/test/interpreters/display.unit.test.ts", + "src/test/interpreters/helper.unit.test.ts", + "src/test/interpreters/interpreterService.unit.test.ts", + "src/test/interpreters/interpreterVersion.unit.test.ts", + "src/test/interpreters/knownPathService.unit.test.ts", + "src/test/interpreters/locators/helpers.unit.test.ts", + "src/test/interpreters/locators/index.unit.test.ts", + "src/test/interpreters/mocks.ts", + "src/test/interpreters/pipEnvService.unit.test.ts", + "src/test/interpreters/pythonPathUpdater.test.ts", + "src/test/interpreters/venv.unit.test.ts", + "src/test/interpreters/virtualEnvManager.unit.test.ts", + "src/test/interpreters/virtualEnvs/index.unit.test.ts", + "src/test/interpreters/windowsRegistryService.unit.test.ts", + "src/test/language/characterStream.test.ts", + "src/test/language/textIterator.test.ts", + "src/test/language/textRange.test.ts", + "src/test/language/textRangeCollection.test.ts", + "src/test/language/tokenizer.test.ts", + "src/test/linters/lint.args.test.ts", + "src/test/linters/lint.commands.test.ts", + "src/test/linters/lint.manager.test.ts", + "src/test/linters/lint.multiroot.test.ts", + "src/test/linters/lint.provider.test.ts", + "src/test/linters/lint.test.ts", + "src/test/linters/lintengine.test.ts", + "src/test/linters/mypy.unit.test.ts", + "src/test/linters/pylint.test.ts", + "src/test/markdown/restTextConverter.test.ts", + "src/test/mockClasses.ts", + "src/test/mocks/mementos.ts", + "src/test/mocks/moduleInstaller.ts", + "src/test/mocks/proc.ts", + "src/test/mocks/process.ts", + "src/test/mocks/vsc/arrays.ts", + "src/test/mocks/vsc/extHostedTypes.ts", + "src/test/mocks/vsc/htmlContent.ts", + "src/test/mocks/vsc/index.ts", + "src/test/mocks/vsc/position.ts", + "src/test/mocks/vsc/range.ts", + "src/test/mocks/vsc/selection.ts", + "src/test/mocks/vsc/strings.ts", + "src/test/mocks/vsc/telemetryReporter.ts", + "src/test/mocks/vsc/uri.ts", + "src/test/multiRootTest.ts", + "src/test/performance/load.perf.test.ts", + "src/test/performanceTest.ts", + "src/test/providers/codeActionsProvider.test.ts", + "src/test/providers/completionSource.unit.test.ts", + "src/test/providers/foldingProvider.test.ts", + "src/test/providers/importSortProvider.unit.test.ts", + "src/test/providers/pythonSignatureProvider.unit.test.ts", + "src/test/providers/repl.unit.test.ts", + "src/test/providers/shebangCodeLenseProvider.test.ts", + "src/test/providers/symbolProvider.unit.test.ts", + "src/test/providers/terminal.unit.test.ts", + "src/test/pythonFiles/formatting/dummy.ts", + "src/test/refactor/extension.refactor.extract.method.test.ts", + "src/test/refactor/extension.refactor.extract.var.test.ts", + "src/test/refactor/rename.test.ts", + "src/test/serviceRegistry.ts", + "src/test/signature/signature.jedi.test.ts", + "src/test/signature/signature.ls.test.ts", + "src/test/standardTest.ts", + "src/test/stub.ts", + "src/test/terminals/codeExecution/codeExecutionManager.unit.test.ts", + "src/test/terminals/codeExecution/djangoShellCodeExect.unit.test.ts", + "src/test/terminals/codeExecution/helper.test.ts", + "src/test/terminals/codeExecution/terminalCodeExec.unit.test.ts", + "src/test/testRunner.ts", + "src/test/textUtils.ts", + "src/test/unittests.ts", + "src/test/testing/argsService.test.ts", + "src/test/testing/banners/languageServerSurvey.unit.test.ts", + "src/test/testing/banners/proposeNewLanguageServerBanner.unit.test.ts", + "src/test/testing/common/argsHelper.unit.test.ts", + "src/test/testing/common/debugLauncher.test.ts", + "src/test/testing/common/managers/testConfigurationManager.unit.test.ts", + "src/test/testing/common/services/configSettingService.unit.test.ts", + "src/test/testing/configuration.unit.test.ts", + "src/test/testing/configurationFactory.unit.test.ts", + "src/test/testing/debugger.test.ts", + "src/test/testing/display/main.test.ts", + "src/test/testing/helper.ts", + "src/test/testing/mocks.ts", + "src/test/testing/nosetest/nosetest.argsService.unit.test.ts", + "src/test/testing/nosetest/nosetest.discovery.unit.test.ts", + "src/test/testing/nosetest/nosetest.disovery.test.ts", + "src/test/testing/nosetest/nosetest.run.test.ts", + "src/test/testing/nosetest/nosetest.test.ts", + "src/test/testing/pytest/pytest_unittest_parser_data.ts", + "src/test/testing/pytest/pytest.argsService.unit.test.ts", + "src/test/testing/pytest/pytest.discovery.test.ts", + "src/test/testing/pytest/pytest.discovery.unit.test.ts", + "src/test/testing/pytest/pytest.run.test.ts", + "src/test/testing/pytest/pytest.test.ts", + "src/test/testing/pytest/pytest.testparser.unit.test.ts", + "src/test/testing/rediscover.test.ts", + "src/test/testing/serviceRegistry.ts", + "src/test/testing/stoppingDiscoverAndTest.test.ts", + "src/test/testing/unittest/unittest.argsService.unit.test.ts", + "src/test/testing/unittest/unittest.discovery.test.ts", + "src/test/testing/unittest/unittest.discovery.unit.test.ts", + "src/test/testing/unittest/unittest.run.test.ts", + "src/test/testing/unittest/unittest.test.ts", + "src/test/vscode-mock.ts", + "src/test/workspaceSymbols/common.ts", + "src/test/workspaceSymbols/multiroot.test.ts", + "src/test/workspaceSymbols/standard.test.ts" +] diff --git a/extensions/positron-python/build/fail.js b/extensions/positron-python/build/fail.js new file mode 100644 index 00000000000..2adc808d8da --- /dev/null +++ b/extensions/positron-python/build/fail.js @@ -0,0 +1,6 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for license information. + *--------------------------------------------------------------------------------------------*/ + +process.exitCode = 1; diff --git a/extensions/positron-python/build/functional-test-requirements.txt b/extensions/positron-python/build/functional-test-requirements.txt new file mode 100644 index 00000000000..d45208f671f --- /dev/null +++ b/extensions/positron-python/build/functional-test-requirements.txt @@ -0,0 +1,3 @@ +# List of requirements for functional tests +versioneer +numpy diff --git a/extensions/positron-python/build/license-header.txt b/extensions/positron-python/build/license-header.txt new file mode 100644 index 00000000000..2970b03d7a1 --- /dev/null +++ b/extensions/positron-python/build/license-header.txt @@ -0,0 +1,9 @@ +PLEASE NOTE: This is the license for the Python extension for Visual Studio Code. The Python extension automatically installs other extensions as optional dependencies, which can be uninstalled at any time. These extensions have separate licenses: + + - The Python Debugger extension is released under an MIT License: + https://marketplace.visualstudio.com/items/ms-python.debugpy/license + + - The Pylance extension is only available in binary form and is released under a Microsoft proprietary license, the terms of which are available here: + https://marketplace.visualstudio.com/items/ms-python.vscode-pylance/license + +------------------------------------------------------------------------------ diff --git a/extensions/positron-python/build/test-requirements.txt b/extensions/positron-python/build/test-requirements.txt new file mode 100644 index 00000000000..0650e86fb3d --- /dev/null +++ b/extensions/positron-python/build/test-requirements.txt @@ -0,0 +1,21 @@ +# pin setoptconf to prevent issue with 'use_2to3' +setoptconf==0.3.0 + +flake8 +bandit +pylint +pycodestyle +pydocstyle +prospector +pytest +flask +fastapi +uvicorn +django + +# Integrated TensorBoard tests +tensorboard +torch-tb-profiler + +# extension build tests +freezegun diff --git a/extensions/positron-python/build/test_update_ext_version.py b/extensions/positron-python/build/test_update_ext_version.py new file mode 100644 index 00000000000..b94484775f5 --- /dev/null +++ b/extensions/positron-python/build/test_update_ext_version.py @@ -0,0 +1,126 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import datetime +import json + +import freezegun +import pytest +import update_ext_version + + +CURRENT_YEAR = datetime.datetime.now().year +TEST_DATETIME = f"{CURRENT_YEAR}-03-14 01:23:45" + +# The build ID is calculated via: +# "1" + datetime.datetime.strptime(TEST_DATETIME,"%Y-%m-%d %H:%M:%S").strftime('%j%H%M') +EXPECTED_BUILD_ID = "10730123" + + +def create_package_json(directory, version): + """Create `package.json` in `directory` with a specified version of `version`.""" + package_json = directory / "package.json" + package_json.write_text(json.dumps({"version": version}), encoding="utf-8") + return package_json + + +def run_test(tmp_path, version, args, expected): + package_json = create_package_json(tmp_path, version) + update_ext_version.main(package_json, args) + package = json.loads(package_json.read_text(encoding="utf-8")) + assert expected == update_ext_version.parse_version(package["version"]) + + +@pytest.mark.parametrize( + "version, args", + [ + ("2000.1.0", []), # Wrong year for CalVer + (f"{CURRENT_YEAR}.0.0-rc", []), + (f"{CURRENT_YEAR}.1.0-rc", ["--release"]), + (f"{CURRENT_YEAR}.0.0-rc", ["--release", "--build-id", "-1"]), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--for-publishing", "--build-id", "-1"], + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--for-publishing", "--build-id", "999999999999"], + ), + (f"{CURRENT_YEAR}.1.0-rc", ["--build-id", "-1"]), + (f"{CURRENT_YEAR}.1.0-rc", ["--for-publishing", "--build-id", "-1"]), + (f"{CURRENT_YEAR}.1.0-rc", ["--for-publishing", "--build-id", "999999999999"]), + ], +) +def test_invalid_args(tmp_path, version, args): + with pytest.raises(ValueError): + run_test(tmp_path, version, args, None) + + +@pytest.mark.parametrize( + "version, args, expected", + [ + ( + f"{CURRENT_YEAR}.1.0-rc", + ["--build-id", "12345"], + (f"{CURRENT_YEAR}", "1", "12345", "rc"), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--build-id", "12345"], + (f"{CURRENT_YEAR}", "0", "12345", ""), + ), + ( + f"{CURRENT_YEAR}.1.0-rc", + ["--for-publishing", "--build-id", "12345"], + (f"{CURRENT_YEAR}", "1", "12345", ""), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--for-publishing", "--build-id", "12345"], + (f"{CURRENT_YEAR}", "0", "12345", ""), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--build-id", "999999999999"], + (f"{CURRENT_YEAR}", "0", "999999999999", ""), + ), + ( + f"{CURRENT_YEAR}.1.0-rc", + ["--build-id", "999999999999"], + (f"{CURRENT_YEAR}", "1", "999999999999", "rc"), + ), + ( + f"{CURRENT_YEAR}.1.0-rc", + [], + (f"{CURRENT_YEAR}", "1", EXPECTED_BUILD_ID, "rc"), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release"], + (f"{CURRENT_YEAR}", "0", "0", ""), + ), + ( + f"{CURRENT_YEAR}.1.0-rc", + ["--for-publishing"], + (f"{CURRENT_YEAR}", "1", EXPECTED_BUILD_ID, ""), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release", "--for-publishing"], + (f"{CURRENT_YEAR}", "0", "0", ""), + ), + ( + f"{CURRENT_YEAR}.0.0-rc", + ["--release"], + (f"{CURRENT_YEAR}", "0", "0", ""), + ), + ( + f"{CURRENT_YEAR}.1.0-rc", + [], + (f"{CURRENT_YEAR}", "1", EXPECTED_BUILD_ID, "rc"), + ), + ], +) +@freezegun.freeze_time(f"{CURRENT_YEAR}-03-14 01:23:45") +def test_update_ext_version(tmp_path, version, args, expected): + run_test(tmp_path, version, args, expected) diff --git a/extensions/positron-python/build/unlocalizedFiles.json b/extensions/positron-python/build/unlocalizedFiles.json new file mode 100644 index 00000000000..4da3d450af2 --- /dev/null +++ b/extensions/positron-python/build/unlocalizedFiles.json @@ -0,0 +1,26 @@ +[ + "src/client/activation/activationService.ts", + "src/client/common/installer/channelManager.ts", + "src/client/common/installer/moduleInstaller.ts", + "src/client/common/installer/productInstaller.ts", + "src/client/debugger/extension/hooks/childProcessAttachService.ts", + "src/client/formatters/baseFormatter.ts", + "src/client/formatters/blackFormatter.ts", + "src/client/interpreter/configuration/pythonPathUpdaterService.ts", + "src/client/linters/errorHandlers/notInstalled.ts", + "src/client/linters/errorHandlers/standard.ts", + "src/client/linters/linterCommands.ts", + "src/client/linters/prospector.ts", + "src/client/providers/importSortProvider.ts", + "src/client/providers/objectDefinitionProvider.ts", + "src/client/providers/simpleRefactorProvider.ts", + "src/client/pythonEnvironments/discovery/locators/services/pipEnvService.ts", + "src/client/terminals/codeExecution/helper.ts", + "src/client/testing/common/debugLauncher.ts", + "src/client/testing/common/managers/baseTestManager.ts", + "src/client/testing/common/services/discovery.ts", + "src/client/testing/configuration.ts", + "src/client/testing/display/main.ts", + "src/client/testing/main.ts", + "src/client/workspaceSymbols/generator.ts" +] diff --git a/extensions/positron-python/build/update_ext_version.py b/extensions/positron-python/build/update_ext_version.py new file mode 100644 index 00000000000..6ac2b15bbf0 --- /dev/null +++ b/extensions/positron-python/build/update_ext_version.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import datetime +import json +import pathlib +import sys +from typing import Sequence, Tuple, Union + +EXT_ROOT = pathlib.Path(__file__).parent.parent +PACKAGE_JSON_PATH = EXT_ROOT / "package.json" + + +def build_arg_parse() -> argparse.ArgumentParser: + """Builds the arguments parser.""" + parser = argparse.ArgumentParser( + description="This script updates the python extension micro version based on the release or pre-release channel." + ) + parser.add_argument( + "--release", + action="store_true", + help="Treats the current build as a release build.", + ) + parser.add_argument( + "--build-id", + action="store", + type=int, + default=None, + help="If present, will be used as a micro version.", + required=False, + ) + parser.add_argument( + "--for-publishing", + action="store_true", + help="Removes `-dev` or `-rc` suffix.", + ) + return parser + + +def is_even(v: Union[int, str]) -> bool: + """Returns True if `v` is even.""" + return not int(v) % 2 + + +def micro_build_number() -> str: + """Generates the micro build number. + The format is `1`. + """ + return f"1{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%j%H%M')}" + + +def parse_version(version: str) -> Tuple[str, str, str, str]: + """Parse a version string into a tuple of version parts.""" + major, minor, parts = version.split(".", maxsplit=2) + try: + micro, suffix = parts.split("-", maxsplit=1) + except ValueError: + micro = parts + suffix = "" + return major, minor, micro, suffix + + +def main(package_json: pathlib.Path, argv: Sequence[str]) -> None: + parser = build_arg_parse() + args = parser.parse_args(argv) + + package = json.loads(package_json.read_text(encoding="utf-8")) + + major, minor, micro, suffix = parse_version(package["version"]) + + current_year = datetime.datetime.now().year + current_month = datetime.datetime.now().month + int_major = int(major) + valid_major = ( + int_major + == current_year # Between JAN-DEC major version should be current year + or ( + int_major == current_year - 1 and current_month == 1 + ) # After new years the check is relaxed for JAN to allow releases of previous year DEC + or ( + int_major == current_year + 1 and current_month == 12 + ) # Before new years the check is relaxed for DEC to allow pre-releases of next year JAN + ) + if not valid_major: + raise ValueError( + f"Major version [{major}] must be the current year [{current_year}].", + f"If changing major version after new year's, change to {current_year}.1.0", + f"Minor version must be updated based on release or pre-release channel.", + ) + + if args.release and not is_even(minor): + raise ValueError( + f"Release version should have EVEN numbered minor version: {package['version']}" + ) + elif not args.release and is_even(minor): + raise ValueError( + f"Pre-Release version should have ODD numbered minor version: {package['version']}" + ) + + print(f"Updating build FROM: {package['version']}") + if args.build_id: + # If build id is provided it should fall within the 0-INT32 max range + # that the max allowed value for publishing to the Marketplace. + if args.build_id < 0 or ( + args.for_publishing and args.build_id > ((2**32) - 1) + ): + raise ValueError(f"Build ID must be within [0, {(2**32) - 1}]") + + package["version"] = ".".join((major, minor, str(args.build_id))) + elif args.release: + package["version"] = ".".join((major, minor, micro)) + else: + # micro version only updated for pre-release. + package["version"] = ".".join((major, minor, micro_build_number())) + + if not args.for_publishing and not args.release and len(suffix): + package["version"] += "-" + suffix + print(f"Updating build TO: {package['version']}") + + # Overwrite package.json with new data add a new-line at the end of the file. + package_json.write_text( + json.dumps(package, indent=4, ensure_ascii=False) + "\n", encoding="utf-8" + ) + + +if __name__ == "__main__": + main(PACKAGE_JSON_PATH, sys.argv[1:]) diff --git a/extensions/positron-python/build/update_package_file.py b/extensions/positron-python/build/update_package_file.py new file mode 100644 index 00000000000..f82587ced84 --- /dev/null +++ b/extensions/positron-python/build/update_package_file.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import pathlib + +EXT_ROOT = pathlib.Path(__file__).parent.parent +PACKAGE_JSON_PATH = EXT_ROOT / "package.json" + + +def main(package_json: pathlib.Path) -> None: + package = json.loads(package_json.read_text(encoding="utf-8")) + package["enableTelemetry"] = True + + # Overwrite package.json with new data add a new-line at the end of the file. + package_json.write_text( + json.dumps(package, indent=4, ensure_ascii=False) + "\n", encoding="utf-8" + ) + + +if __name__ == "__main__": + main(PACKAGE_JSON_PATH) diff --git a/extensions/positron-python/build/util.js b/extensions/positron-python/build/util.js new file mode 100644 index 00000000000..c54e204ae7d --- /dev/null +++ b/extensions/positron-python/build/util.js @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +exports.ExtensionRootDir = path.dirname(__dirname); +function getListOfFiles(filename) { + filename = path.normalize(filename); + if (!path.isAbsolute(filename)) { + filename = path.join(__dirname, filename); + } + const data = fs.readFileSync(filename).toString(); + const files = JSON.parse(data); + return files.map((file) => path.join(exports.ExtensionRootDir, file.replace(/\//g, path.sep))); +} +exports.getListOfFiles = getListOfFiles; diff --git a/extensions/positron-python/build/webpack/common.js b/extensions/positron-python/build/webpack/common.js new file mode 100644 index 00000000000..fca1b1a900f --- /dev/null +++ b/extensions/positron-python/build/webpack/common.js @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +const glob = require('glob'); +const path = require('path'); +// eslint-disable-next-line camelcase +const webpack_bundle_analyzer = require('webpack-bundle-analyzer'); +const constants = require('../constants'); + +exports.nodeModulesToExternalize = [ + 'unicode/category/Lu', + 'unicode/category/Ll', + 'unicode/category/Lt', + 'unicode/category/Lo', + 'unicode/category/Lm', + 'unicode/category/Nl', + 'unicode/category/Mn', + 'unicode/category/Mc', + 'unicode/category/Nd', + 'unicode/category/Pc', + 'source-map-support', + 'sudo-prompt', + 'node-stream-zip', + 'xml2js', +]; +exports.nodeModulesToReplacePaths = [...exports.nodeModulesToExternalize]; + +// --- Start Positron --- +// Don't externalize paths in built-in extension. +exports.nodeModulesToReplacePaths = []; +// --- End Positron --- +// +function getDefaultPlugins(name) { + const plugins = []; + // Only run the analyzer on a local machine or if required + if (!constants.isCI || process.env.VSC_PYTHON_FORCE_ANALYZER) { + plugins.push( + new webpack_bundle_analyzer.BundleAnalyzerPlugin({ + analyzerMode: 'static', + reportFilename: `${name}.analyzer.html`, + generateStatsFile: true, + statsFilename: `${name}.stats.json`, + openAnalyzer: false, // Open file manually if you want to see it :) + }), + ); + } + return plugins; +} +exports.getDefaultPlugins = getDefaultPlugins; +function getListOfExistingModulesInOutDir() { + const outDir = path.join(constants.ExtensionRootDir, 'out', 'client'); + const files = glob.sync('**/*.js', { sync: true, cwd: outDir }); + return files.map((filePath) => `./${filePath.slice(0, -3)}`); +} +exports.getListOfExistingModulesInOutDir = getListOfExistingModulesInOutDir; diff --git a/extensions/positron-python/build/webpack/loaders/externalizeDependencies.js b/extensions/positron-python/build/webpack/loaders/externalizeDependencies.js new file mode 100644 index 00000000000..0ada9b0424d --- /dev/null +++ b/extensions/positron-python/build/webpack/loaders/externalizeDependencies.js @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +const common = require('../common'); + +function replaceModule(prefixRegex, prefix, contents, moduleName, quotes) { + const stringToSearch = `${prefixRegex}${quotes}${moduleName}${quotes}`; + const stringToReplaceWith = `${prefix}${quotes}./node_modules/${moduleName}${quotes}`; + return contents.replace(new RegExp(stringToSearch, 'gm'), stringToReplaceWith); +} + +// eslint-disable-next-line camelcase +function default_1(source) { + common.nodeModulesToReplacePaths.forEach((moduleName) => { + if (source.indexOf(moduleName) > 0) { + source = replaceModule('import\\(', 'import(', source, moduleName, '"'); + source = replaceModule('import\\(', 'import(', source, moduleName, "'"); + source = replaceModule('require\\(', 'require(', source, moduleName, '"'); + source = replaceModule('require\\(', 'require(', source, moduleName, "'"); + source = replaceModule('from ', 'from ', source, moduleName, '"'); + source = replaceModule('from ', 'from ', source, moduleName, "'"); + } + }); + return source; +} +// eslint-disable-next-line camelcase +exports.default = default_1; diff --git a/extensions/positron-python/build/webpack/loaders/jsonloader.js b/extensions/positron-python/build/webpack/loaders/jsonloader.js new file mode 100644 index 00000000000..5ec3c703868 --- /dev/null +++ b/extensions/positron-python/build/webpack/loaders/jsonloader.js @@ -0,0 +1,7 @@ +// For some reason this has to be in commonjs format + +module.exports = function (source) { + // Just inline the source and fix up defaults so that they don't + // mess up the logic in the setOptions.js file + return `module.exports = ${source}\nmodule.exports.default = false`; +}; diff --git a/extensions/positron-python/build/webpack/loaders/remarkLoader.js b/extensions/positron-python/build/webpack/loaders/remarkLoader.js new file mode 100644 index 00000000000..5ec3c703868 --- /dev/null +++ b/extensions/positron-python/build/webpack/loaders/remarkLoader.js @@ -0,0 +1,7 @@ +// For some reason this has to be in commonjs format + +module.exports = function (source) { + // Just inline the source and fix up defaults so that they don't + // mess up the logic in the setOptions.js file + return `module.exports = ${source}\nmodule.exports.default = false`; +}; diff --git a/extensions/positron-python/build/webpack/nativeOrInteractivePicker.html b/extensions/positron-python/build/webpack/nativeOrInteractivePicker.html new file mode 100644 index 00000000000..46d6f0e7eb5 --- /dev/null +++ b/extensions/positron-python/build/webpack/nativeOrInteractivePicker.html @@ -0,0 +1,8 @@ + + + + + Click here to Open Native Editor
+ Click here to Open Interactive Window + + diff --git a/extensions/positron-python/build/webpack/webpack.extension.browser.config.js b/extensions/positron-python/build/webpack/webpack.extension.browser.config.js new file mode 100644 index 00000000000..b4e0f17f185 --- /dev/null +++ b/extensions/positron-python/build/webpack/webpack.extension.browser.config.js @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// @ts-check + +'use strict'; + +const path = require('path'); +const webpack = require('webpack'); +const NodePolyfillPlugin = require('node-polyfill-webpack-plugin'); + +const packageRoot = path.resolve(__dirname, '..', '..'); +const outDir = path.resolve(packageRoot, 'dist'); + +/** @type {(env: any, argv: { mode: 'production' | 'development' | 'none' }) => import('webpack').Configuration} */ +// eslint-disable-next-line @typescript-eslint/no-unused-vars +const nodeConfig = (_, { mode }) => ({ + context: packageRoot, + entry: { + extension: './src/client/browser/extension.ts', + }, + target: 'webworker', + output: { + filename: '[name].browser.js', + path: outDir, + libraryTarget: 'commonjs2', + devtoolModuleFilenameTemplate: '../../[resource-path]', + }, + devtool: 'source-map', + // stats: { + // all: false, + // errors: true, + // warnings: true, + // }, + resolve: { + extensions: ['.ts', '.js'], + fallback: { path: require.resolve('path-browserify') }, + }, + plugins: [ + new NodePolyfillPlugin(), + new webpack.optimize.LimitChunkCountPlugin({ + maxChunks: 1, + }), + ], + externals: { + vscode: 'commonjs vscode', + // --- Start Positron --- + positron: 'commonjs positron', // ignored because we inject positron via module loader + // --- End Positron --- + // These dependencies are ignored because we don't use them, and App Insights has try-catch protecting their loading if they don't exist + // See: https://github.com/microsoft/vscode-extension-telemetry/issues/41#issuecomment-598852991 + 'applicationinsights-native-metrics': 'commonjs applicationinsights-native-metrics', + '@opentelemetry/tracing': 'commonjs @opentelemetry/tracing', + // --- Start Positron --- + '@opentelemetry/instrumentation': 'commonjs @opentelemetry/instrumentation', + '@azure/opentelemetry-instrumentation-azure-sdk': 'commonjs @azure/opentelemetry-instrumentation-azure-sdk', + '@azure/functions-core': 'commonjs @azure/functions-core', + // --- End Positron --- + }, + module: { + rules: [ + { + test: /\.ts$/, + loader: 'ts-loader', + options: { + configFile: 'tsconfig.browser.json', + }, + }, + { + test: /\.node$/, + loader: 'node-loader', + }, + ], + }, + // optimization: { + // usedExports: true, + // splitChunks: { + // cacheGroups: { + // defaultVendors: { + // name: 'vendor', + // test: /[\\/]node_modules[\\/]/, + // chunks: 'all', + // priority: -10, + // }, + // }, + // }, + // }, +}); + +module.exports = nodeConfig; diff --git a/extensions/positron-python/build/webpack/webpack.extension.config.js b/extensions/positron-python/build/webpack/webpack.extension.config.js new file mode 100644 index 00000000000..eae0b7b2c43 --- /dev/null +++ b/extensions/positron-python/build/webpack/webpack.extension.config.js @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +const path = require('path'); +// eslint-disable-next-line camelcase +const tsconfig_paths_webpack_plugin = require('tsconfig-paths-webpack-plugin'); +const constants = require('../constants'); +const common = require('./common'); + +const configFileName = path.join(constants.ExtensionRootDir, 'tsconfig.extension.json'); +// Some modules will be pre-genearted and stored in out/.. dir and they'll be referenced via +// NormalModuleReplacementPlugin. We need to ensure they do not get bundled into the output +// (as they are large). +const existingModulesInOutDir = common.getListOfExistingModulesInOutDir(); +const config = { + mode: 'production', + target: 'node', + entry: { + extension: './src/client/extension.ts', + 'shellExec.worker': './src/client/common/process/worker/shellExec.worker.ts', + 'plainExec.worker': './src/client/common/process/worker/plainExec.worker.ts', + 'registryKeys.worker': 'src/client/pythonEnvironments/common/registryKeys.worker.ts', + 'registryValues.worker': 'src/client/pythonEnvironments/common/registryValues.worker.ts', + }, + devtool: 'source-map', + node: { + __dirname: false, + }, + module: { + rules: [ + { + test: /\.ts$/, + use: [ + { + loader: path.join(__dirname, 'loaders', 'externalizeDependencies.js'), + }, + ], + }, + { + test: /\.ts$/, + exclude: /node_modules/, + use: [ + { + loader: 'ts-loader', + }, + ], + }, + { + test: /\.node$/, + use: [ + { + loader: 'node-loader', + }, + ], + }, + { + test: /\.worker\.js$/, + use: { loader: 'worker-loader' }, + }, + ], + }, + externals: [ + 'vscode', + // --- Start Positron --- + 'positron', + // --- End Positron --- + 'commonjs', + ...existingModulesInOutDir, + // These dependencies are ignored because we don't use them, and App Insights has try-catch protecting their loading if they don't exist + // See: https://github.com/microsoft/vscode-extension-telemetry/issues/41#issuecomment-598852991 + 'applicationinsights-native-metrics', + '@opentelemetry/tracing', + '@azure/opentelemetry-instrumentation-azure-sdk', + '@opentelemetry/instrumentation', + '@azure/functions-core', + ], + plugins: [...common.getDefaultPlugins('extension')], + resolve: { + extensions: ['.ts', '.js'], + plugins: [new tsconfig_paths_webpack_plugin.TsconfigPathsPlugin({ configFile: configFileName })], + conditionNames: ['import', 'require', 'node'], + }, + output: { + filename: '[name].js', + path: path.resolve(constants.ExtensionRootDir, 'out', 'client'), + libraryTarget: 'commonjs2', + devtoolModuleFilenameTemplate: '../../[resource-path]', + }, +}; + +exports.default = config; diff --git a/extensions/positron-python/build/webpack/webpack.extension.dependencies.config.js b/extensions/positron-python/build/webpack/webpack.extension.dependencies.config.js new file mode 100644 index 00000000000..a90e9135a60 --- /dev/null +++ b/extensions/positron-python/build/webpack/webpack.extension.dependencies.config.js @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +const copyWebpackPlugin = require('copy-webpack-plugin'); +const path = require('path'); +const constants = require('../constants'); +const common = require('./common'); + +const entryItems = {}; +common.nodeModulesToExternalize.forEach((moduleName) => { + entryItems[`node_modules/${moduleName}`] = `./node_modules/${moduleName}`; +}); +const config = { + mode: 'production', + target: 'node', + context: constants.ExtensionRootDir, + entry: entryItems, + devtool: 'source-map', + node: { + __dirname: false, + }, + module: {}, + externals: ['vscode', 'commonjs'], + plugins: [ + ...common.getDefaultPlugins('dependencies'), + // vsls requires our package.json to be next to node_modules. It's how they + // 'find' the calling extension. + // eslint-disable-next-line new-cap + new copyWebpackPlugin({ patterns: [{ from: './package.json', to: '.' }] }), + ], + resolve: { + extensions: ['.js'], + }, + output: { + filename: '[name].js', + path: path.resolve(constants.ExtensionRootDir, 'out', 'client'), + libraryTarget: 'commonjs2', + devtoolModuleFilenameTemplate: '../../[resource-path]', + }, +}; + +exports.default = config; diff --git a/extensions/positron-python/cgmanifest.json b/extensions/positron-python/cgmanifest.json new file mode 100644 index 00000000000..57123f56679 --- /dev/null +++ b/extensions/positron-python/cgmanifest.json @@ -0,0 +1,15 @@ +{ + "Registrations": [ + { + "Component": { + "Other": { + "Name": "get-pip", + "Version": "21.3.1", + "DownloadUrl": "https://github.com/pypa/get-pip" + }, + "Type": "other" + }, + "DevelopmentDependency": false + } + ] +} diff --git a/extensions/positron-python/data/.vscode/settings.json b/extensions/positron-python/data/.vscode/settings.json new file mode 100644 index 00000000000..6f329d0777a --- /dev/null +++ b/extensions/positron-python/data/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.defaultInterpreterPath": "/usr/bin/python3" +} diff --git a/extensions/positron-python/data/test.py b/extensions/positron-python/data/test.py new file mode 100644 index 00000000000..3b316dc1e8d --- /dev/null +++ b/extensions/positron-python/data/test.py @@ -0,0 +1,2 @@ +#%% +print('hello') diff --git a/extensions/positron-python/extension-browser.webpack.config.js b/extensions/positron-python/extension-browser.webpack.config.js new file mode 100644 index 00000000000..25d85450fac --- /dev/null +++ b/extensions/positron-python/extension-browser.webpack.config.js @@ -0,0 +1,13 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Posit Software, PBC. + *--------------------------------------------------------------------------------------------*/ + +'use strict'; + +const config = require('./build/webpack/webpack.extension.browser.config'); + +module.exports = { + context: __dirname, + ...config +}; + diff --git a/extensions/positron-python/extension.webpack.config.js b/extensions/positron-python/extension.webpack.config.js new file mode 100644 index 00000000000..6f427fb8695 --- /dev/null +++ b/extensions/positron-python/extension.webpack.config.js @@ -0,0 +1,35 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Posit Software, PBC. + *--------------------------------------------------------------------------------------------*/ + +'use strict'; + +const path = require('path'); + +// Load the webpack config for the Python extension +const config = require('./build/webpack/webpack.extension.config'); + +// Merge them with settings for this environment +module.exports = { + ...config.default, + entry: { + extension: './src/client/extension.ts', + }, + externals: [ + 'vscode', + 'positron', + 'commonjs', + 'applicationinsights-native-metrics', + '@opentelemetry/tracing', + '@opentelemetry/instrumentation', + '@azure/opentelemetry-instrumentation-azure-sdk', + '@azure/functions-core' + ], + output: { + filename: '[name].js', + path: path.join(__dirname, 'dist', 'client'), + libraryTarget: 'commonjs', + }, + context: __dirname +}; + diff --git a/extensions/positron-python/gulpfile.js b/extensions/positron-python/gulpfile.js new file mode 100644 index 00000000000..4950eeb0af9 --- /dev/null +++ b/extensions/positron-python/gulpfile.js @@ -0,0 +1,414 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for license information. + *--------------------------------------------------------------------------------------------*/ + +/* jshint node: true */ +/* jshint esversion: 6 */ + +'use strict'; + +const gulp = require('gulp'); +const ts = require('gulp-typescript'); +const spawn = require('cross-spawn'); +const path = require('path'); +const del = require('del'); +const fsExtra = require('fs-extra'); +const glob = require('glob'); +const _ = require('lodash'); +const nativeDependencyChecker = require('node-has-native-dependencies'); +const flat = require('flat'); +const { argv } = require('yargs'); +const os = require('os'); +// --- Start Positron --- +const rmrf = require('rimraf'); +const fancyLog = require('fancy-log'); +const ansiColors = require('ansi-colors'); +// --- End Positron --- +const typescript = require('typescript'); + +const tsProject = ts.createProject('./tsconfig.json', { typescript }); + +const isCI = process.env.TRAVIS === 'true' || process.env.TF_BUILD !== undefined; + +// --- Start Positron --- +const pythonCommand = locatePython(); +// --- End Positron --- + +gulp.task('compileCore', (done) => { + let failed = false; + tsProject + .src() + .pipe(tsProject()) + .on('error', () => { + failed = true; + }) + .js.pipe(gulp.dest('out')) + .on('finish', () => (failed ? done(new Error('TypeScript compilation errors')) : done())); +}); + +gulp.task('compileApi', (done) => { + spawnAsync('npm', ['run', 'compileApi'], undefined, true) + .then((stdout) => { + if (stdout.includes('error')) { + done(new Error(stdout)); + } else { + done(); + } + }) + .catch((ex) => { + console.log(ex); + done(new Error('TypeScript compilation errors', ex)); + }); +}); + +gulp.task('compile', gulp.series('compileCore', 'compileApi')); + +gulp.task('precommit', (done) => run({ exitOnError: true, mode: 'staged' }, done)); + +gulp.task('output:clean', () => del(['coverage'])); + +gulp.task('clean:cleanExceptTests', () => del(['clean:vsix', 'out/client'])); +gulp.task('clean:vsix', () => del(['*.vsix'])); +gulp.task('clean:out', () => del(['out'])); + +gulp.task('clean', gulp.parallel('output:clean', 'clean:vsix', 'clean:out')); + +gulp.task('checkNativeDependencies', (done) => { + if (hasNativeDependencies()) { + done(new Error('Native dependencies detected')); + } + done(); +}); + +const webpackEnv = { NODE_OPTIONS: '--max_old_space_size=9096' }; + +async function buildWebPackForDevOrProduction(configFile, configNameForProductionBuilds) { + if (configNameForProductionBuilds) { + await buildWebPack(configNameForProductionBuilds, ['--config', configFile], webpackEnv); + } else { + await spawnAsync('npm', ['run', 'webpack', '--', '--config', configFile, '--mode', 'production'], webpackEnv); + } +} +gulp.task('webpack', async () => { + // Build node_modules. + await buildWebPackForDevOrProduction('./build/webpack/webpack.extension.dependencies.config.js', 'production'); + await buildWebPackForDevOrProduction('./build/webpack/webpack.extension.config.js', 'extension'); + await buildWebPackForDevOrProduction('./build/webpack/webpack.extension.browser.config.js', 'browser'); +}); + +gulp.task('addExtensionPackDependencies', async () => { + await buildLicense(); + await addExtensionPackDependencies(); +}); + +async function addExtensionPackDependencies() { + // Update the package.json to add extension pack dependencies at build time so that + // extension dependencies need not be installed during development + const packageJsonContents = await fsExtra.readFile('package.json', 'utf-8'); + const packageJson = JSON.parse(packageJsonContents); + // --- Start Positron --- + packageJson.extensionPack = ['ms-python.debugpy'].concat( + // --- End Positron --- + packageJson.extensionPack ? packageJson.extensionPack : [], + ); + // Remove potential duplicates. + packageJson.extensionPack = packageJson.extensionPack.filter( + (item, index) => packageJson.extensionPack.indexOf(item) === index, + ); + await fsExtra.writeFile('package.json', JSON.stringify(packageJson, null, 4), 'utf-8'); +} + +async function buildLicense() { + const headerPath = path.join(__dirname, 'build', 'license-header.txt'); + const licenseHeader = await fsExtra.readFile(headerPath, 'utf-8'); + const license = await fsExtra.readFile('LICENSE', 'utf-8'); + + await fsExtra.writeFile('LICENSE', `${licenseHeader}\n${license}`, 'utf-8'); +} + +gulp.task('updateBuildNumber', async () => { + await updateBuildNumber(argv); +}); + +async function updateBuildNumber(args) { + if (args && args.buildNumber) { + // Edit the version number from the package.json + const packageJsonContents = await fsExtra.readFile('package.json', 'utf-8'); + const packageJson = JSON.parse(packageJsonContents); + + // Change version number + const versionParts = packageJson.version.split('.'); + const buildNumberPortion = + versionParts.length > 2 ? versionParts[2].replace(/(\d+)/, args.buildNumber) : args.buildNumber; + const newVersion = + versionParts.length > 1 + ? `${versionParts[0]}.${versionParts[1]}.${buildNumberPortion}` + : packageJson.version; + packageJson.version = newVersion; + + // Write back to the package json + await fsExtra.writeFile('package.json', JSON.stringify(packageJson, null, 4), 'utf-8'); + + // Update the changelog.md if we are told to (this should happen on the release branch) + if (args.updateChangelog) { + const changeLogContents = await fsExtra.readFile('CHANGELOG.md', 'utf-8'); + const fixedContents = changeLogContents.replace( + /##\s*(\d+)\.(\d+)\.(\d+)\s*\(/, + `## $1.$2.${buildNumberPortion} (`, + ); + + // Write back to changelog.md + await fsExtra.writeFile('CHANGELOG.md', fixedContents, 'utf-8'); + } + } else { + throw Error('buildNumber argument required for updateBuildNumber task'); + } +} + +async function buildWebPack(webpackConfigName, args, env) { + // Remember to perform a case insensitive search. + const allowedWarnings = getAllowedWarningsForWebPack(webpackConfigName).map((item) => item.toLowerCase()); + const stdOut = await spawnAsync( + 'npm', + ['run', 'webpack', '--', ...args, ...['--mode', 'production', '--devtool', 'source-map']], + env, + ); + const stdOutLines = stdOut + .split(os.EOL) + .map((item) => item.trim()) + .filter((item) => item.length > 0); + // Remember to perform a case insensitive search. + const warnings = stdOutLines + .filter((item) => item.startsWith('WARNING in ')) + .filter( + (item) => + allowedWarnings.findIndex((allowedWarning) => + item.toLowerCase().startsWith(allowedWarning.toLowerCase()), + ) === -1, + ); + const errors = stdOutLines.some((item) => item.startsWith('ERROR in')); + if (errors) { + throw new Error(`Errors in ${webpackConfigName}, \n${warnings.join(', ')}\n\n${stdOut}`); + } + if (warnings.length > 0) { + throw new Error( + `Warnings in ${webpackConfigName}, Check gulpfile.js to see if the warning should be allowed., \n\n${stdOut}`, + ); + } +} +function getAllowedWarningsForWebPack(buildConfig) { + switch (buildConfig) { + case 'production': + return [ + 'WARNING in asset size limit: The following asset(s) exceed the recommended size limit (244 KiB).', + 'WARNING in entrypoint size limit: The following entrypoint(s) combined asset size exceeds the recommended limit (244 KiB). This can impact web performance.', + 'WARNING in webpack performance recommendations:', + 'WARNING in ./node_modules/encoding/lib/iconv-loader.js', + 'WARNING in ./node_modules/any-promise/register.js', + 'WARNING in ./node_modules/diagnostic-channel-publishers/dist/src/azure-coretracing.pub.js', + 'WARNING in ./node_modules/applicationinsights/out/AutoCollection/NativePerformance.js', + ]; + case 'extension': + return [ + 'WARNING in ./node_modules/encoding/lib/iconv-loader.js', + 'WARNING in ./node_modules/any-promise/register.js', + 'remove-files-plugin@1.4.0:', + 'WARNING in ./node_modules/diagnostic-channel-publishers/dist/src/azure-coretracing.pub.js', + 'WARNING in ./node_modules/applicationinsights/out/AutoCollection/NativePerformance.js', + ]; + case 'debugAdapter': + return [ + 'WARNING in ./node_modules/vscode-uri/lib/index.js', + 'WARNING in ./node_modules/diagnostic-channel-publishers/dist/src/azure-coretracing.pub.js', + 'WARNING in ./node_modules/applicationinsights/out/AutoCollection/NativePerformance.js', + ]; + case 'browser': + return [ + 'WARNING in asset size limit: The following asset(s) exceed the recommended size limit (244 KiB).', + 'WARNING in entrypoint size limit: The following entrypoint(s) combined asset size exceeds the recommended limit (244 KiB). This can impact web performance.', + 'WARNING in webpack performance recommendations:', + ]; + default: + throw new Error('Unknown WebPack Configuration'); + } +} +gulp.task('renameSourceMaps', async () => { + // By default source maps will be disabled in the extension. + // Users will need to use the command `python.enableSourceMapSupport` to enable source maps. + const extensionSourceMap = path.join(__dirname, 'out', 'client', 'extension.js.map'); + await fsExtra.rename(extensionSourceMap, `${extensionSourceMap}.disabled`); +}); + +gulp.task('verifyBundle', async () => { + const matches = await glob.sync(path.join(__dirname, '*.vsix')); + if (!matches || matches.length === 0) { + throw new Error('Bundle does not exist'); + } else { + console.log(`Bundle ${matches[0]} exists.`); + } +}); + +gulp.task('prePublishBundle', gulp.series('webpack', 'renameSourceMaps')); +gulp.task('checkDependencies', gulp.series('checkNativeDependencies')); +gulp.task('prePublishNonBundle', gulp.series('compile')); + +// --- Start Positron --- +gulp.task('installPythonRequirements', async (done) => { + const args = [ + '-m', + 'pip', + '--disable-pip-version-check', + 'install', + '--no-user', + '-t', + './pythonFiles/lib/python', + '--no-cache-dir', + '--implementation', + 'py', + '--no-deps', + '--upgrade', + '-r', + './requirements.txt', + ]; + await spawnAsync(pythonCommand, args) + .then(() => true) + .catch((ex) => { + const msg = "Failed to install requirements using 'python'"; + fancyLog.error(ansiColors.red(`error`), msg, ex); + done(new Error(msg)); + }); + + // Vendor Python requirements for the Positron Python kernel. + await spawnAsync(pythonCommand, ['scripts/vendor.py']).catch((ex) => { + const msg = 'Failed to vendor Python requirements'; + fancyLog.error(ansiColors.red(`error`), msg, ex); + done(new Error(msg)); + }); +}); + +// See https://github.com/microsoft/vscode-python/issues/7136 +gulp.task('installDebugpy', async (done) => { + // Install dependencies needed for 'install_debugpy.py' + const depsArgs = [ + '-m', + 'pip', + '--disable-pip-version-check', + 'install', + '--no-user', + '--upgrade', + '-t', + './pythonFiles/lib/temp', + '-r', + './build/build-install-requirements.txt', + ]; + await spawnAsync(pythonCommand, depsArgs) + .then(() => true) + .catch((ex) => { + const msg = "Failed to install dependencies need by 'install_debugpy.py' using 'python'"; + fancyLog.error(ansiColors.red(`error`), msg, ex); + done(new Error(msg)); + }); + + // Install new DEBUGPY with wheels for python + const wheelsArgs = ['./pythonFiles/install_debugpy.py']; + const wheelsEnv = { PYTHONPATH: './pythonFiles/lib/temp' }; + await spawnAsync(pythonCommand, wheelsArgs, wheelsEnv) + .then(() => true) + .catch((ex) => { + const msg = "Failed to install DEBUGPY wheels using 'python'"; + fancyLog.error(ansiColors.red(`error`), msg, ex); + done(new Error(msg)); + }); + + // Download get-pip.py + const getPipArgs = ['./pythonFiles/download_get_pip.py']; + const getPipEnv = { PYTHONPATH: './pythonFiles/lib/temp' }; + await spawnAsync(pythonCommand, getPipArgs, getPipEnv) + .then(() => true) + .catch((ex) => { + const msg = "Failed to download get-pip.py using 'python'"; + fancyLog.error(ansiColors.red(`error`), msg, ex); + done(new Error(msg)); + }); + + rmrf.sync('./pythonFiles/lib/temp'); +}); + +gulp.task('installPythonLibs', gulp.series('installPythonRequirements', 'installDebugpy')); + +function locatePython() { + let pythonPath = process.env.CI_PYTHON_PATH || 'python3'; + const whichCommand = os.platform() === 'win32' ? 'where' : 'which'; + try { + const result = spawn.sync(whichCommand, [pythonPath], { encoding: 'utf8' }).stdout.toString(); + if (result.trim().length === 0) { + throw new Error('Could not find python!'); + } + } catch (ex) { + // Otherwise, default to python + const msg = `Error: could not find python at '${pythonPath}'. Using 'python' instead.`; + fancyLog.warn(ansiColors.yellow(`warning`), msg); + pythonPath = 'python'; + } + return pythonPath; +} + +function spawnAsync(command, args, env, rejectOnStdErr = false) { + env = env || {}; + env = { ...process.env, ...env }; + return new Promise((resolve, reject) => { + let stdOut = ''; + let stdErr = ''; + console.info(`> ${command} ${args.join(' ')}`); + const proc = spawn(command, args, { cwd: __dirname, env }); + proc.stdout.on('data', (data) => { + // Log output on CI (else travis times out when there's not output). + stdOut += data.toString(); + if (isCI) { + console.log(data.toString()); + } + }); + proc.stderr.on('data', (data) => { + // Capture all of the stdErr to print out if the process fails. + stdErr += data.toString(); + if (isCI) { + console.error(stdErr); + } + }); + + proc.on('close', () => { + if ((stdErr && rejectOnStdErr) || proc.exitCode !== 0) { + reject(stdErr); + } + resolve(stdOut); + }); + proc.on('error', (error) => reject(error)); + }); +} +// --- End Positron --- + +function hasNativeDependencies() { + let nativeDependencies = nativeDependencyChecker.check(path.join(__dirname, 'node_modules')); + if (!Array.isArray(nativeDependencies) || nativeDependencies.length === 0) { + return false; + } + const dependencies = JSON.parse(spawn.sync('npm', ['ls', '--json', '--prod']).stdout.toString()); + const jsonProperties = Object.keys(flat.flatten(dependencies)); + nativeDependencies = _.flatMap(nativeDependencies, (item) => + path.dirname(item.substring(item.indexOf('node_modules') + 'node_modules'.length)).split(path.sep), + ) + .filter((item) => item.length > 0) + .filter((item) => item !== 'fsevents') + .filter( + (item) => + jsonProperties.findIndex((flattenedDependency) => + flattenedDependency.endsWith(`dependencies.${item}.version`), + ) >= 0, + ); + if (nativeDependencies.length > 0) { + console.error('Native dependencies detected', nativeDependencies); + return true; + } + return false; +} diff --git a/extensions/positron-python/icon.png b/extensions/positron-python/icon.png new file mode 100644 index 00000000000..5ae724858d3 Binary files /dev/null and b/extensions/positron-python/icon.png differ diff --git a/extensions/positron-python/images/ConfigureDebugger.gif b/extensions/positron-python/images/ConfigureDebugger.gif new file mode 100644 index 00000000000..41113d65896 Binary files /dev/null and b/extensions/positron-python/images/ConfigureDebugger.gif differ diff --git a/extensions/positron-python/images/ConfigureTests.gif b/extensions/positron-python/images/ConfigureTests.gif new file mode 100644 index 00000000000..38ae2db551e Binary files /dev/null and b/extensions/positron-python/images/ConfigureTests.gif differ diff --git a/extensions/positron-python/images/InterpreterSelectionZoom.gif b/extensions/positron-python/images/InterpreterSelectionZoom.gif new file mode 100644 index 00000000000..dc5db03aad3 Binary files /dev/null and b/extensions/positron-python/images/InterpreterSelectionZoom.gif differ diff --git a/extensions/positron-python/images/JavascriptProfiler.png b/extensions/positron-python/images/JavascriptProfiler.png new file mode 100644 index 00000000000..f26e1480c02 Binary files /dev/null and b/extensions/positron-python/images/JavascriptProfiler.png differ diff --git a/extensions/positron-python/images/OpenOrCreateNotebook.gif b/extensions/positron-python/images/OpenOrCreateNotebook.gif new file mode 100644 index 00000000000..a0957d415d7 Binary files /dev/null and b/extensions/positron-python/images/OpenOrCreateNotebook.gif differ diff --git a/extensions/positron-python/images/addIcon.PNG b/extensions/positron-python/images/addIcon.PNG new file mode 100644 index 00000000000..8027e617e9e Binary files /dev/null and b/extensions/positron-python/images/addIcon.PNG differ diff --git a/extensions/positron-python/images/codeIcon.PNG b/extensions/positron-python/images/codeIcon.PNG new file mode 100644 index 00000000000..7ad46cee077 Binary files /dev/null and b/extensions/positron-python/images/codeIcon.PNG differ diff --git a/extensions/positron-python/images/dataViewerIcon.PNG b/extensions/positron-python/images/dataViewerIcon.PNG new file mode 100644 index 00000000000..6848c600794 Binary files /dev/null and b/extensions/positron-python/images/dataViewerIcon.PNG differ diff --git a/extensions/positron-python/images/dataviewer.gif b/extensions/positron-python/images/dataviewer.gif new file mode 100644 index 00000000000..ce0c81676c0 Binary files /dev/null and b/extensions/positron-python/images/dataviewer.gif differ diff --git a/extensions/positron-python/images/debugDemo.gif b/extensions/positron-python/images/debugDemo.gif new file mode 100644 index 00000000000..48d7fed5569 Binary files /dev/null and b/extensions/positron-python/images/debugDemo.gif differ diff --git a/extensions/positron-python/images/exportIcon.PNG b/extensions/positron-python/images/exportIcon.PNG new file mode 100644 index 00000000000..e5e588040ee Binary files /dev/null and b/extensions/positron-python/images/exportIcon.PNG differ diff --git a/extensions/positron-python/images/general.gif b/extensions/positron-python/images/general.gif new file mode 100644 index 00000000000..50c95613d68 Binary files /dev/null and b/extensions/positron-python/images/general.gif differ diff --git a/extensions/positron-python/images/interactive.gif b/extensions/positron-python/images/interactive.gif new file mode 100644 index 00000000000..f8080fa9457 Binary files /dev/null and b/extensions/positron-python/images/interactive.gif differ diff --git a/extensions/positron-python/images/kernelchange.gif b/extensions/positron-python/images/kernelchange.gif new file mode 100644 index 00000000000..d2b753b84c0 Binary files /dev/null and b/extensions/positron-python/images/kernelchange.gif differ diff --git a/extensions/positron-python/images/markdownIcon.PNG b/extensions/positron-python/images/markdownIcon.PNG new file mode 100644 index 00000000000..04e5d67749d Binary files /dev/null and b/extensions/positron-python/images/markdownIcon.PNG differ diff --git a/extensions/positron-python/images/playIcon.PNG b/extensions/positron-python/images/playIcon.PNG new file mode 100644 index 00000000000..60ae4a2051d Binary files /dev/null and b/extensions/positron-python/images/playIcon.PNG differ diff --git a/extensions/positron-python/images/plotViewerIcon.PNG b/extensions/positron-python/images/plotViewerIcon.PNG new file mode 100644 index 00000000000..e8ecf0d97b5 Binary files /dev/null and b/extensions/positron-python/images/plotViewerIcon.PNG differ diff --git a/extensions/positron-python/images/plotviewer.gif b/extensions/positron-python/images/plotviewer.gif new file mode 100644 index 00000000000..a3c438b761e Binary files /dev/null and b/extensions/positron-python/images/plotviewer.gif differ diff --git a/extensions/positron-python/images/remoteserver.gif b/extensions/positron-python/images/remoteserver.gif new file mode 100644 index 00000000000..f979d557aa6 Binary files /dev/null and b/extensions/positron-python/images/remoteserver.gif differ diff --git a/extensions/positron-python/images/runbyline.gif b/extensions/positron-python/images/runbyline.gif new file mode 100644 index 00000000000..1c0679f9a45 Binary files /dev/null and b/extensions/positron-python/images/runbyline.gif differ diff --git a/extensions/positron-python/images/savetopythonfile.png b/extensions/positron-python/images/savetopythonfile.png new file mode 100644 index 00000000000..e4a7f08d3db Binary files /dev/null and b/extensions/positron-python/images/savetopythonfile.png differ diff --git a/extensions/positron-python/images/unittest.gif b/extensions/positron-python/images/unittest.gif new file mode 100644 index 00000000000..1511bccb739 Binary files /dev/null and b/extensions/positron-python/images/unittest.gif differ diff --git a/extensions/positron-python/images/variableExplorerIcon.PNG b/extensions/positron-python/images/variableExplorerIcon.PNG new file mode 100644 index 00000000000..f8363dda9de Binary files /dev/null and b/extensions/positron-python/images/variableExplorerIcon.PNG differ diff --git a/extensions/positron-python/images/variableexplorer.png b/extensions/positron-python/images/variableexplorer.png new file mode 100644 index 00000000000..31197571b79 Binary files /dev/null and b/extensions/positron-python/images/variableexplorer.png differ diff --git a/extensions/positron-python/languages/pip-requirements.json b/extensions/positron-python/languages/pip-requirements.json new file mode 100644 index 00000000000..746aa3ac3e2 --- /dev/null +++ b/extensions/positron-python/languages/pip-requirements.json @@ -0,0 +1,5 @@ +{ + "comments": { + "lineComment": "#" + } +} diff --git a/extensions/positron-python/noxfile.py b/extensions/positron-python/noxfile.py new file mode 100644 index 00000000000..b9ebba64544 --- /dev/null +++ b/extensions/positron-python/noxfile.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pathlib +import nox +import shutil + + +@nox.session() +def install_python_libs(session: nox.Session): + requirements = [ + ("./pythonFiles/lib/python", "./requirements.txt"), + ( + "./pythonFiles/lib/jedilsp", + "./pythonFiles/jedilsp_requirements/requirements.txt", + ), + ] + for target, file in requirements: + session.install( + "-t", + target, + "--no-cache-dir", + "--implementation", + "py", + "--no-deps", + "--require-hashes", + "--only-binary", + ":all:", + "-r", + file, + ) + + session.install("packaging") + + # Install debugger + session.run( + "python", + "./pythonFiles/install_debugpy.py", + env={"PYTHONPATH": "./pythonFiles/lib/temp"}, + ) + + # Download get-pip script + session.run( + "python", + "./pythonFiles/download_get_pip.py", + env={"PYTHONPATH": "./pythonFiles/lib/temp"}, + ) + + if pathlib.Path("./pythonFiles/lib/temp").exists(): + shutil.rmtree("./pythonFiles/lib/temp") diff --git a/extensions/positron-python/package.json b/extensions/positron-python/package.json new file mode 100644 index 00000000000..83845e7920e --- /dev/null +++ b/extensions/positron-python/package.json @@ -0,0 +1,1740 @@ +{ + "name": "python", + "displayName": "Positron Python", + "description": "Positron Python", + "version": "2024.99.0-dev", + "featureFlags": { + "usingNewInterpreterStorage": true + }, + "capabilities": { + "untrustedWorkspaces": { + "supported": "limited", + "description": "Only Partial IntelliSense with Pylance is supported. Cannot execute Python with untrusted files." + }, + "virtualWorkspaces": { + "supported": "limited", + "description": "Only Partial IntelliSense supported." + } + }, + "publisher": "ms-python", + "enabledApiProposals": [ + "contribEditorContentMenu", + "quickPickSortByLabel", + "testObserver", + "quickPickItemTooltip", + "terminalDataWriteEvent", + "terminalExecuteCommandEvent" + ], + "author": { + "name": "Posit Software, PBC", + "url": "https://posit.co/" + }, + "contributors": [ + { + "name": "Microsoft Corporation", + "url": "https://github.com/Microsoft/vscode-python" + } + ], + "license": "MIT", + "homepage": "https://github.com/posit-dev/positron-python", + "repository": { + "type": "git", + "url": "https://github.com/posit-dev/positron-python" + }, + "bugs": { + "url": "https://github.com/posit-dev/positron/issues" + }, + "icon": "icon.png", + "galleryBanner": { + "color": "#447099", + "theme": "dark" + }, + "engines": { + "vscode": "^1.82.0" + }, + "enableTelemetry": false, + "keywords": [ + "python", + "datascience", + "unittest", + "multi-root ready" + ], + "categories": [ + "Programming Languages", + "Debuggers", + "Linters", + "Formatters", + "Other", + "Data Science", + "Machine Learning" + ], + "activationEvents": [ + "onStartupFinished" + ], + "main": "./out/client/extension", + "browser": "./dist/extension.browser.js", + "l10n": "./l10n", + "contributes": { + "walkthroughs": [ + { + "id": "pythonWelcome", + "title": "%walkthrough.pythonWelcome.title%", + "description": "%walkthrough.pythonWelcome.description%", + "when": "workspacePlatform != webworker", + "steps": [ + { + "id": "python.createPythonFile", + "title": "%walkthrough.step.python.createPythonFile.title%", + "description": "%walkthrough.step.python.createPythonFile.description%", + "media": { + "svg": "resources/walkthrough/open-folder.svg", + "altText": "%walkthrough.step.python.createPythonFile.altText%" + }, + "when": "" + }, + { + "id": "python.installPythonWin8", + "title": "%walkthrough.step.python.installPythonWin8.title%", + "description": "%walkthrough.step.python.installPythonWin8.description%", + "media": { + "markdown": "resources/walkthrough/install-python-windows-8.md" + }, + "when": "workspacePlatform == windows && showInstallPythonTile" + }, + { + "id": "python.installPythonMac", + "title": "%walkthrough.step.python.installPythonMac.title%", + "description": "%walkthrough.step.python.installPythonMac.description%", + "media": { + "markdown": "resources/walkthrough/install-python-macos.md" + }, + "when": "workspacePlatform == mac && showInstallPythonTile", + "command": "workbench.action.terminal.new" + }, + { + "id": "python.installPythonLinux", + "title": "%walkthrough.step.python.installPythonLinux.title%", + "description": "%walkthrough.step.python.installPythonLinux.description%", + "media": { + "markdown": "resources/walkthrough/install-python-linux.md" + }, + "when": "workspacePlatform == linux && showInstallPythonTile", + "command": "workbench.action.terminal.new" + }, + { + "id": "python.selectInterpreter", + "title": "%walkthrough.step.python.selectInterpreter.title%", + "description": "%walkthrough.step.python.selectInterpreter.description%", + "media": { + "svg": "resources/walkthrough/python-interpreter.svg", + "altText": "%walkthrough.step.python.selectInterpreter.altText%" + }, + "when": "workspaceFolderCount == 0" + }, + { + "id": "python.createEnvironment", + "title": "%walkthrough.step.python.createEnvironment.title%", + "description": "%walkthrough.step.python.createEnvironment.description%", + "media": { + "svg": "resources/walkthrough/create-environment.svg", + "altText": "%walkthrough.step.python.createEnvironment.altText%" + }, + "when": "workspaceFolderCount > 0" + }, + { + "id": "python.runAndDebug", + "title": "%walkthrough.step.python.runAndDebug.title%", + "description": "%walkthrough.step.python.runAndDebug.description%", + "media": { + "svg": "resources/walkthrough/rundebug2.svg", + "altText": "%walkthrough.step.python.runAndDebug.altText%" + }, + "when": "" + }, + { + "id": "python.learnMoreWithDS", + "title": "%walkthrough.step.python.learnMoreWithDS.title%", + "description": "%walkthrough.step.python.learnMoreWithDS.description%", + "media": { + "altText": "%walkthrough.step.python.learnMoreWithDS.altText%", + "svg": "resources/walkthrough/learnmore.svg" + }, + "when": "" + } + ] + }, + { + "id": "pythonWelcome2", + "title": "%walkthrough.pythonWelcome.title%", + "description": "%walkthrough.pythonWelcome.description%", + "when": "false", + "steps": [ + { + "id": "python.createPythonFolder", + "title": "%walkthrough.step.python.createPythonFolder.title%", + "description": "%walkthrough.step.python.createPythonFolder.description%", + "media": { + "svg": "resources/walkthrough/open-folder.svg", + "altText": "%walkthrough.step.python.createPythonFile.altText%" + }, + "when": "workspaceFolderCount = 0" + }, + { + "id": "python.createPythonFile", + "title": "%walkthrough.step.python.createPythonFile.title%", + "description": "%walkthrough.step.python.createPythonFile.description%", + "media": { + "svg": "resources/walkthrough/open-folder.svg", + "altText": "%walkthrough.step.python.createPythonFile.altText%" + }, + "when": "" + }, + { + "id": "python.installPythonWin8", + "title": "%walkthrough.step.python.installPythonWin8.title%", + "description": "%walkthrough.step.python.installPythonWin8.description%", + "media": { + "markdown": "resources/walkthrough/install-python-windows-8.md" + }, + "when": "workspacePlatform == windows && showInstallPythonTile" + }, + { + "id": "python.installPythonMac", + "title": "%walkthrough.step.python.installPythonMac.title%", + "description": "%walkthrough.step.python.installPythonMac.description%", + "media": { + "markdown": "resources/walkthrough/install-python-macos.md" + }, + "when": "workspacePlatform == mac && showInstallPythonTile", + "command": "workbench.action.terminal.new" + }, + { + "id": "python.installPythonLinux", + "title": "%walkthrough.step.python.installPythonLinux.title%", + "description": "%walkthrough.step.python.installPythonLinux.description%", + "media": { + "markdown": "resources/walkthrough/install-python-linux.md" + }, + "when": "workspacePlatform == linux && showInstallPythonTile", + "command": "workbench.action.terminal.new" + }, + { + "id": "python.createEnvironment2", + "title": "%walkthrough.step.python.createEnvironment.title2%", + "description": "%walkthrough.step.python.createEnvironment.description2%", + "media": { + "markdown": "resources/walkthrough/environments-info.md" + }, + "when": "" + }, + { + "id": "python.runAndDebug", + "title": "%walkthrough.step.python.runAndDebug.title%", + "description": "%walkthrough.step.python.runAndDebug.description%", + "media": { + "svg": "resources/walkthrough/rundebug2.svg", + "altText": "%walkthrough.step.python.runAndDebug.altText%" + }, + "when": "" + }, + { + "id": "python.learnMoreWithDS2", + "title": "%walkthrough.step.python.learnMoreWithDS.title%", + "description": "%walkthrough.step.python.learnMoreWithDS.description2%", + "media": { + "altText": "%walkthrough.step.python.learnMoreWithDS.altText%", + "svg": "resources/walkthrough/learnmore.svg" + }, + "when": "" + } + ] + }, + { + "id": "pythonDataScienceWelcome", + "title": "%walkthrough.pythonDataScienceWelcome.title%", + "description": "%walkthrough.pythonDataScienceWelcome.description%", + "when": "false", + "steps": [ + { + "id": "python.installJupyterExt", + "title": "%walkthrough.step.python.installJupyterExt.title%", + "description": "%walkthrough.step.python.installJupyterExt.description%", + "media": { + "svg": "resources/walkthrough/data-science.svg", + "altText": "%walkthrough.step.python.installJupyterExt.altText%" + } + }, + { + "id": "python.createNewNotebook", + "title": "%walkthrough.step.python.createNewNotebook.title%", + "description": "%walkthrough.step.python.createNewNotebook.description%", + "media": { + "svg": "resources/walkthrough/create-notebook.svg", + "altText": "%walkthrough.step.python.createNewNotebook.altText%" + }, + "completionEvents": [ + "onCommand:jupyter.createnewnotebook", + "onCommand:workbench.action.files.openFolder", + "onCommand:workbench.action.files.openFileFolder" + ] + }, + { + "id": "python.openInteractiveWindow", + "title": "%walkthrough.step.python.openInteractiveWindow.title%", + "description": "%walkthrough.step.python.openInteractiveWindow.description%", + "media": { + "svg": "resources/walkthrough/interactive-window.svg", + "altText": "%walkthrough.step.python.openInteractiveWindow.altText%" + }, + "completionEvents": [ + "onCommand:jupyter.createnewinteractive" + ] + }, + { + "id": "python.dataScienceLearnMore", + "title": "%walkthrough.step.python.dataScienceLearnMore.title%", + "description": "%walkthrough.step.python.dataScienceLearnMore.description%", + "media": { + "svg": "resources/walkthrough/learnmore.svg", + "altText": "%walkthrough.step.python.dataScienceLearnMore.altText%" + } + } + ] + } + ], + "breakpoints": [ + { + "language": "html" + }, + { + "language": "jinja" + }, + { + "language": "python" + }, + { + "language": "django-html" + }, + { + "language": "django-txt" + } + ], + "commands": [ + { + "title": "%python.command.python.createNewFile.title%", + "shortTitle": "%python.menu.createNewFile.title%", + "category": "Python", + "command": "python.createNewFile" + }, + { + "category": "Python", + "command": "python.analysis.restartLanguageServer", + "title": "%python.command.python.analysis.restartLanguageServer.title%" + }, + { + "category": "Python", + "command": "python.clearCacheAndReload", + "title": "%python.command.python.clearCacheAndReload.title%" + }, + { + "category": "Python", + "command": "python.clearWorkspaceInterpreter", + "title": "%python.command.python.clearWorkspaceInterpreter.title%" + }, + { + "category": "Python", + "command": "python.configureTests", + "title": "%python.command.python.configureTests.title%" + }, + { + "category": "Python", + "command": "python.createTerminal", + "title": "%python.command.python.createTerminal.title%" + }, + { + "category": "Python", + "command": "python.createEnvironment", + "title": "%python.command.python.createEnvironment.title%" + }, + { + "category": "Python", + "command": "python.createEnvironment-button", + "title": "%python.command.python.createEnvironment.title%" + }, + { + "category": "Python", + "command": "python.enableSourceMapSupport", + "title": "%python.command.python.enableSourceMapSupport.title%" + }, + { + "category": "Python", + "command": "python.execInTerminal", + "title": "%python.command.python.execInTerminal.title%" + }, + { + "category": "Python", + "command": "python.execInTerminal-icon", + "icon": "$(play)", + "title": "%python.command.python.execInTerminalIcon.title%" + }, + { + "category": "Python", + "command": "python.execInConsole", + "icon": "$(play)", + "title": "%python.command.python.execInConsole.title%" + }, + { + "category": "Python", + "command": "python.debugInTerminal", + "icon": "$(debug-alt)", + "title": "%python.command.python.debugInTerminal.title%" + }, + { + "category": "Python", + "command": "python.execSelectionInDjangoShell", + "title": "%python.command.python.execSelectionInDjangoShell.title%" + }, + { + "category": "Python", + "command": "python.execSelectionInTerminal", + "title": "%python.command.python.execSelectionInTerminal.title%" + }, + { + "category": "Python", + "command": "python.execSelectionInConsole", + "icon": "$(play)", + "title": "%python.command.python.execSelectionInConsole.title%" + }, + { + "category": "Python", + "command": "python.launchTensorBoard", + "title": "%python.command.python.launchTensorBoard.title%" + }, + { + "category": "Python", + "command": "python.refreshTensorBoard", + "enablement": "python.hasActiveTensorBoardSession", + "icon": "$(refresh)", + "title": "%python.command.python.refreshTensorBoard.title%" + }, + { + "category": "Python", + "command": "python.reportIssue", + "title": "%python.command.python.reportIssue.title%" + }, + { + "category": "Test", + "command": "testing.reRunFailTests", + "icon": "$(run-errors)", + "title": "%python.command.testing.rerunFailedTests.title%" + }, + { + "category": "Python", + "command": "python.setInterpreter", + "title": "%python.command.python.setInterpreter.title%" + }, + { + "category": "Python", + "command": "python.startREPL", + "title": "%python.command.python.startREPL.title%" + }, + { + "category": "Python", + "command": "python.viewLanguageServerOutput", + "enablement": "python.hasLanguageServerOutputChannel", + "title": "%python.command.python.viewLanguageServerOutput.title%" + }, + { + "category": "Python", + "command": "python.viewOutput", + "icon": { + "dark": "resources/dark/repl.svg", + "light": "resources/light/repl.svg" + }, + "title": "%python.command.python.viewOutput.title%" + }, + { + "category": "Python", + "command": "python.installJupyter", + "title": "%python.command.python.installJupyter.title%" + } + ], + "configuration": { + "properties": { + "python.activeStateToolPath": { + "default": "state", + "description": "%python.activeStateToolPath.description%", + "scope": "machine-overridable", + "type": "string" + }, + "python.autoComplete.extraPaths": { + "default": [], + "description": "%python.autoComplete.extraPaths.description%", + "scope": "resource", + "type": "array", + "uniqueItems": true + }, + "python.createEnvironment.contentButton": { + "default": "show", + "markdownDescription": "%python.createEnvironment.contentButton.description%", + "scope": "machine-overridable", + "type": "string", + "enum": [ + "show", + "hide" + ] + }, + "python.createEnvironment.trigger": { + "default": "off", + "markdownDescription": "%python.createEnvironment.trigger.description%", + "scope": "machine-overridable", + "type": "string", + "enum": [ + "off", + "prompt" + ] + }, + "python.condaPath": { + "default": "", + "description": "%python.condaPath.description%", + "scope": "machine", + "type": "string" + }, + "python.defaultInterpreterPath": { + "default": "python", + "markdownDescription": "%python.defaultInterpreterPath.description%", + "scope": "machine-overridable", + "type": "string" + }, + "python.diagnostics.sourceMapsEnabled": { + "default": false, + "description": "%python.diagnostics.sourceMapsEnabled.description%", + "scope": "application", + "type": "boolean" + }, + "python.envFile": { + "default": "${workspaceFolder}/.env", + "description": "%python.envFile.description%", + "scope": "resource", + "type": "string" + }, + "python.experiments.enabled": { + "default": true, + "description": "%python.experiments.enabled.description%", + "scope": "machine", + "type": "boolean" + }, + "python.experiments.optInto": { + "default": [], + "markdownDescription": "%python.experiments.optInto.description%", + "items": { + "enum": [ + "All", + "pythonSurveyNotification", + "pythonPromptNewToolsExt", + "pythonTerminalEnvVarActivation", + "pythonDiscoveryUsingWorkers", + "pythonTestAdapter", + "pythonREPLSmartSend", + "pythonRecommendTensorboardExt" + ], + "enumDescriptions": [ + "%python.experiments.All.description%", + "%python.experiments.pythonSurveyNotification.description%", + "%python.experiments.pythonPromptNewToolsExt.description%", + "%python.experiments.pythonTerminalEnvVarActivation.description%", + "%python.experiments.pythonDiscoveryUsingWorkers.description%", + "%python.experiments.pythonTestAdapter.description%", + "%python.experiments.pythonREPLSmartSend.description%", + "%python.experiments.pythonRecommendTensorboardExt.description%" + ] + }, + "scope": "machine", + "type": "array", + "uniqueItems": true + }, + "python.experiments.optOutFrom": { + "default": [], + "markdownDescription": "%python.experiments.optOutFrom.description%", + "items": { + "enum": [ + "All", + "pythonSurveyNotification", + "pythonPromptNewToolsExt", + "pythonTerminalEnvVarActivation", + "pythonDiscoveryUsingWorkers", + "pythonTestAdapter", + "pythonREPLSmartSend" + ], + "enumDescriptions": [ + "%python.experiments.All.description%", + "%python.experiments.pythonSurveyNotification.description%", + "%python.experiments.pythonPromptNewToolsExt.description%", + "%python.experiments.pythonTerminalEnvVarActivation.description%", + "%python.experiments.pythonDiscoveryUsingWorkers.description%", + "%python.experiments.pythonTestAdapter.description%", + "%python.experiments.pythonREPLSmartSend.description%" + ] + }, + "scope": "machine", + "type": "array", + "uniqueItems": true + }, + "python.globalModuleInstallation": { + "default": false, + "description": "%python.globalModuleInstallation.description%", + "scope": "resource", + "type": "boolean" + }, + "python.languageServerDebug": { + "default": false, + "description": "%python.languageServerDebug.description%", + "scope": "application", + "type": "boolean" + }, + "python.languageServerLogLevel": { + "default": "error", + "markdownDescription": "%python.languageServerLogLevel.description%", + "enum": [ + "critical", + "error", + "warn", + "info", + "debug" + ], + "scope": "application", + "type": "string" + }, + "python.interpreter.infoVisibility": { + "default": "never", + "description": "%python.interpreter.infoVisibility.description%", + "enum": [ + "never", + "onPythonRelated", + "always" + ], + "enumDescriptions": [ + "%python.interpreter.infoVisibility.never.description%", + "%python.interpreter.infoVisibility.onPythonRelated.description%", + "%python.interpreter.infoVisibility.always.description%" + ], + "scope": "machine", + "type": "string" + }, + "python.logging.level": { + "default": "error", + "deprecationMessage": "%python.logging.level.deprecation%", + "description": "%python.logging.level.description%", + "enum": [ + "debug", + "error", + "info", + "off", + "warn" + ], + "scope": "machine", + "type": "string" + }, + "python.missingPackage.severity": { + "default": "Hint", + "description": "%python.missingPackage.severity.description%", + "enum": [ + "Error", + "Hint", + "Information", + "Warning" + ], + "scope": "resource", + "type": "string", + "tags": [ + "experimental" + ] + }, + "python.pipenvPath": { + "default": "pipenv", + "description": "%python.pipenvPath.description%", + "scope": "machine-overridable", + "type": "string" + }, + "python.poetryPath": { + "default": "poetry", + "description": "%python.poetryPath.description%", + "scope": "machine-overridable", + "type": "string" + }, + "python.quietMode": { + "scope": "window", + "type": "boolean", + "default": false, + "description": "%python.quietMode.description%" + }, + "python.tensorBoard.logDirectory": { + "default": "", + "description": "%python.tensorBoard.logDirectory.description%", + "scope": "resource", + "type": "string", + "markdownDeprecationMessage": "%python.tensorBoard.logDirectory.markdownDeprecationMessage%", + "deprecationMessage": "%python.tensorBoard.logDirectory.deprecationMessage%" + }, + "python.terminal.activateEnvInCurrentTerminal": { + "default": false, + "description": "%python.terminal.activateEnvInCurrentTerminal.description%", + "scope": "resource", + "type": "boolean" + }, + "python.terminal.activateEnvironment": { + "default": true, + "description": "%python.terminal.activateEnvironment.description%", + "scope": "resource", + "type": "boolean" + }, + "python.terminal.executeInFileDir": { + "default": false, + "description": "%python.terminal.executeInFileDir.description%", + "scope": "resource", + "type": "boolean" + }, + "python.terminal.focusAfterLaunch": { + "default": false, + "description": "%python.terminal.focusAfterLaunch.description%", + "scope": "resource", + "type": "boolean" + }, + "python.terminal.launchArgs": { + "default": [], + "description": "%python.terminal.launchArgs.description%", + "scope": "resource", + "type": "array" + }, + "python.REPL.enableREPLSmartSend": { + "default": true, + "description": "%python.EnableREPLSmartSend.description%", + "scope": "resource", + "type": "boolean" + }, + "python.testing.autoTestDiscoverOnSaveEnabled": { + "default": true, + "description": "%python.testing.autoTestDiscoverOnSaveEnabled.description%", + "scope": "resource", + "type": "boolean" + }, + "python.testing.cwd": { + "default": null, + "description": "%python.testing.cwd.description%", + "scope": "resource", + "type": "string" + }, + "python.testing.debugPort": { + "default": 3000, + "description": "%python.testing.debugPort.description%", + "scope": "resource", + "type": "number" + }, + "python.testing.promptToConfigure": { + "default": true, + "description": "%python.testing.promptToConfigure.description%", + "scope": "resource", + "type": "boolean" + }, + "python.testing.pytestArgs": { + "default": [], + "description": "%python.testing.pytestArgs.description%", + "items": { + "type": "string" + }, + "scope": "resource", + "type": "array" + }, + "python.testing.pytestEnabled": { + "default": false, + "description": "%python.testing.pytestEnabled.description%", + "scope": "resource", + "type": "boolean" + }, + "python.testing.pytestPath": { + "default": "pytest", + "description": "%python.testing.pytestPath.description%", + "scope": "machine-overridable", + "type": "string" + }, + "python.testing.unittestArgs": { + "default": [ + "-v", + "-s", + ".", + "-p", + "*test*.py" + ], + "description": "%python.testing.unittestArgs.description%", + "items": { + "type": "string" + }, + "scope": "resource", + "type": "array" + }, + "python.testing.unittestEnabled": { + "default": false, + "description": "%python.testing.unittestEnabled.description%", + "scope": "resource", + "type": "boolean" + }, + "python.venvFolders": { + "default": [], + "description": "%python.venvFolders.description%", + "items": { + "type": "string" + }, + "scope": "machine", + "type": "array", + "uniqueItems": true + }, + "python.venvPath": { + "default": "", + "description": "%python.venvPath.description%", + "scope": "machine", + "type": "string" + } + }, + "title": "Python", + "type": "object" + }, + "configurationDefaults": { + "pyright.disableLanguageServices": true, + "pyright.disableOrganizeImports": true, + "python.analysis.autoImportCompletions": false, + "python.analysis.typeCheckingMode": "off" + }, + "debuggers": [ + { + "configurationAttributes": { + "attach": { + "properties": { + "connect": { + "label": "Attach by connecting to debugpy over a socket.", + "properties": { + "host": { + "default": "127.0.0.1", + "description": "Hostname or IP address to connect to.", + "type": "string" + }, + "port": { + "description": "Port to connect to.", + "type": "number" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "debugAdapterPath": { + "description": "Path (fully qualified) to the python debug adapter executable.", + "type": "string" + }, + "django": { + "default": false, + "description": "Django debugging.", + "type": "boolean" + }, + "host": { + "default": "127.0.0.1", + "description": "Hostname or IP address to connect to.", + "type": "string" + }, + "jinja": { + "default": null, + "description": "Jinja template debugging (e.g. Flask).", + "enum": [ + false, + null, + true + ] + }, + "justMyCode": { + "default": true, + "description": "If true, show and debug only user-written code. If false, show and debug all code, including library calls.", + "type": "boolean" + }, + "listen": { + "label": "Attach by listening for incoming socket connection from debugpy", + "properties": { + "host": { + "default": "127.0.0.1", + "description": "Hostname or IP address of the interface to listen on.", + "type": "string" + }, + "port": { + "description": "Port to listen on.", + "type": "number" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "logToFile": { + "default": false, + "description": "Enable logging of debugger events to a log file.", + "type": "boolean" + }, + "pathMappings": { + "default": [], + "items": { + "label": "Path mapping", + "properties": { + "localRoot": { + "default": "${workspaceFolder}", + "label": "Local source root.", + "type": "string" + }, + "remoteRoot": { + "default": "", + "label": "Remote source root.", + "type": "string" + } + }, + "required": [ + "localRoot", + "remoteRoot" + ], + "type": "object" + }, + "label": "Path mappings.", + "type": "array" + }, + "port": { + "description": "Port to connect to.", + "type": "number" + }, + "processId": { + "anyOf": [ + { + "default": "${command:pickProcess}", + "description": "Use process picker to select a process to attach, or Process ID as integer.", + "enum": [ + "${command:pickProcess}" + ] + }, + { + "description": "ID of the local process to attach to.", + "type": "integer" + } + ] + }, + "redirectOutput": { + "default": true, + "description": "Redirect output.", + "type": "boolean" + }, + "showReturnValue": { + "default": true, + "description": "Show return value of functions when stepping.", + "type": "boolean" + }, + "subProcess": { + "default": false, + "description": "Whether to enable Sub Process debugging", + "type": "boolean" + } + } + }, + "launch": { + "properties": { + "args": { + "default": [], + "description": "Command line arguments passed to the program.", + "items": { + "type": "string" + }, + "type": [ + "array", + "string" + ] + }, + "autoReload": { + "default": {}, + "description": "Configures automatic reload of code on edit.", + "properties": { + "enable": { + "default": false, + "description": "Automatically reload code on edit.", + "type": "boolean" + }, + "exclude": { + "default": [ + "**/.git/**", + "**/.metadata/**", + "**/__pycache__/**", + "**/node_modules/**", + "**/site-packages/**" + ], + "description": "Glob patterns of paths to exclude from auto reload.", + "items": { + "type": "string" + }, + "type": "array" + }, + "include": { + "default": [ + "**/*.py", + "**/*.pyw" + ], + "description": "Glob patterns of paths to include in auto reload.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "console": { + "default": "integratedTerminal", + "description": "Where to launch the debug target: internal console, integrated terminal, or external terminal.", + "enum": [ + "externalTerminal", + "integratedTerminal", + "internalConsole" + ] + }, + "consoleTitle": { + "default": "Python Debug Console", + "description": "Display name of the debug console or terminal" + }, + "cwd": { + "default": "${workspaceFolder}", + "description": "Absolute path to the working directory of the program being debugged. Default is the root directory of the file (leave empty).", + "type": "string" + }, + "debugAdapterPath": { + "description": "Path (fully qualified) to the python debug adapter executable.", + "type": "string" + }, + "django": { + "default": false, + "description": "Django debugging.", + "type": "boolean" + }, + "env": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "description": "Environment variables defined as a key value pair. Property ends up being the Environment Variable and the value of the property ends up being the value of the Env Variable.", + "type": "object" + }, + "envFile": { + "default": "${workspaceFolder}/.env", + "description": "Absolute path to a file containing environment variable definitions.", + "type": "string" + }, + "gevent": { + "default": false, + "description": "Enable debugging of gevent monkey-patched code.", + "type": "boolean" + }, + "host": { + "default": "localhost", + "description": "IP address of the of the local debug server (default is localhost).", + "type": "string" + }, + "jinja": { + "default": null, + "description": "Jinja template debugging (e.g. Flask).", + "enum": [ + false, + null, + true + ] + }, + "justMyCode": { + "default": true, + "description": "Debug only user-written code.", + "type": "boolean" + }, + "logToFile": { + "default": false, + "description": "Enable logging of debugger events to a log file.", + "type": "boolean" + }, + "module": { + "default": "", + "description": "Name of the module to be debugged.", + "type": "string" + }, + "pathMappings": { + "default": [], + "items": { + "label": "Path mapping", + "properties": { + "localRoot": { + "default": "${workspaceFolder}", + "label": "Local source root.", + "type": "string" + }, + "remoteRoot": { + "default": "", + "label": "Remote source root.", + "type": "string" + } + }, + "required": [ + "localRoot", + "remoteRoot" + ], + "type": "object" + }, + "label": "Path mappings.", + "type": "array" + }, + "port": { + "default": 0, + "description": "Debug port (default is 0, resulting in the use of a dynamic port).", + "type": "number" + }, + "program": { + "default": "${file}", + "description": "Absolute path to the program.", + "type": "string" + }, + "purpose": { + "default": [], + "description": "Tells extension to use this configuration for test debugging, or when using debug-in-terminal command.", + "items": { + "enum": [ + "debug-test", + "debug-in-terminal" + ], + "enumDescriptions": [ + "Use this configuration while debugging tests using test view or test debug commands.", + "Use this configuration while debugging a file using debug in terminal button in the editor." + ] + }, + "type": "array" + }, + "pyramid": { + "default": false, + "description": "Whether debugging Pyramid applications", + "type": "boolean" + }, + "python": { + "default": "${command:python.interpreterPath}", + "description": "Absolute path to the Python interpreter executable; overrides workspace configuration if set.", + "type": "string" + }, + "pythonArgs": { + "default": [], + "description": "Command-line arguments passed to the Python interpreter. To pass arguments to the debug target, use \"args\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "redirectOutput": { + "default": true, + "description": "Redirect output.", + "type": "boolean" + }, + "showReturnValue": { + "default": true, + "description": "Show return value of functions when stepping.", + "type": "boolean" + }, + "stopOnEntry": { + "default": false, + "description": "Automatically stop after launch.", + "type": "boolean" + }, + "subProcess": { + "default": false, + "description": "Whether to enable Sub Process debugging", + "type": "boolean" + }, + "sudo": { + "default": false, + "description": "Running debug program under elevated permissions (on Unix).", + "type": "boolean" + } + } + } + }, + "deprecated": "%python.debugger.deprecatedMessage%", + "configurationSnippets": [], + "label": "Python", + "languages": [ + "python" + ], + "type": "python", + "variables": { + "pickProcess": "python.pickLocalProcess" + }, + "when": "!virtualWorkspace && shellExecutionSupported", + "hiddenWhen": "true" + } + ], + "grammars": [ + { + "language": "pip-requirements", + "path": "./syntaxes/pip-requirements.tmLanguage.json", + "scopeName": "source.pip-requirements" + } + ], + "jsonValidation": [ + { + "fileMatch": ".condarc", + "url": "./schemas/condarc.json" + }, + { + "fileMatch": "environment.yml", + "url": "./schemas/conda-environment.json" + }, + { + "fileMatch": "meta.yaml", + "url": "./schemas/conda-meta.json" + } + ], + "keybindings": [ + { + "command": "python.refreshTensorBoard", + "key": "ctrl+r", + "mac": "cmd+r", + "when": "python.hasActiveTensorBoardSession" + }, + { + "command": "python.execInConsole", + "key": "ctrl+shift+enter", + "mac": "cmd+shift+enter", + "when": "editorTextFocus && editorLangId == python && !findInputFocussed && !replaceInputFocussed && !jupyter.ownsSelection && !notebookEditorFocused" + }, + { + "command": "python.execSelectionInConsole", + "key": "ctrl+enter", + "mac": "cmd+enter", + "when": "editorTextFocus && editorLangId == python && !findInputFocussed && !replaceInputFocussed && !jupyter.ownsSelection && !notebookEditorFocused" + } + ], + "languages": [ + { + "aliases": [ + "Jinja" + ], + "extensions": [ + ".j2", + ".jinja2" + ], + "id": "jinja" + }, + { + "aliases": [ + "pip requirements", + "requirements.txt" + ], + "configuration": "./languages/pip-requirements.json", + "filenamePatterns": [ + "**/*requirements*.{txt, in}", + "**/*constraints*.txt", + "**/requirements/*.{txt,in}", + "**/constraints/*.txt" + ], + "filenames": [ + "constraints.txt", + "requirements.in", + "requirements.txt" + ], + "id": "pip-requirements" + }, + { + "filenames": [ + ".condarc" + ], + "id": "yaml" + }, + { + "filenames": [ + ".flake8", + ".pep8", + ".pylintrc", + ".pypirc" + ], + "id": "ini" + }, + { + "filenames": [ + "Pipfile", + "poetry.lock" + ], + "id": "toml" + }, + { + "filenames": [ + "Pipfile.lock" + ], + "id": "json" + } + ], + "menus": { + "commandPalette": [ + { + "category": "Python", + "command": "python.analysis.restartLanguageServer", + "title": "%python.command.python.analysis.restartLanguageServer.title%", + "when": "!virtualWorkspace && shellExecutionSupported && editorLangId == python" + }, + { + "category": "Python", + "command": "python.clearCacheAndReload", + "title": "%python.command.python.clearCacheAndReload.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.clearWorkspaceInterpreter", + "title": "%python.command.python.clearWorkspaceInterpreter.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.configureTests", + "title": "%python.command.python.configureTests.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.createEnvironment", + "title": "%python.command.python.createEnvironment.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.createEnvironment-button", + "title": "%python.command.python.createEnvironment.title%", + "when": "false" + }, + { + "category": "Python", + "command": "python.createTerminal", + "title": "%python.command.python.createTerminal.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.enableSourceMapSupport", + "title": "%python.command.python.enableSourceMapSupport.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.execInTerminal", + "title": "%python.command.python.execInTerminal.title%", + "when": "!virtualWorkspace && shellExecutionSupported && editorLangId == python" + }, + { + "category": "Python", + "command": "python.execInTerminal-icon", + "icon": "$(play)", + "title": "%python.command.python.execInTerminalIcon.title%", + "when": "false" + }, + { + "category": "Python", + "command": "python.execInConsole", + "title": "%python.command.python.execInConsole.title%", + "when": "!virtualWorkspace && shellExecutionSupported && editorLangId == python" + }, + { + "category": "Python", + "command": "python.debugInTerminal", + "icon": "$(debug-alt)", + "title": "%python.command.python.debugInTerminal.title%", + "when": "!virtualWorkspace && shellExecutionSupported && editorLangId == python" + }, + { + "category": "Python", + "command": "python.execSelectionInConsole", + "title": "%python.command.python.execSelectionInConsole.title%", + "when": "!virtualWorkspace && shellExecutionSupported && editorLangId == python" + }, + { + "category": "Python", + "command": "python.execSelectionInDjangoShell", + "title": "%python.command.python.execSelectionInDjangoShell.title%", + "when": "false" + }, + { + "category": "Python", + "command": "python.execSelectionInTerminal", + "title": "%python.command.python.execSelectionInTerminal.title%", + "when": "false" + }, + { + "category": "Python", + "command": "python.launchTensorBoard", + "title": "%python.command.python.launchTensorBoard.title%", + "when": "!virtualWorkspace && shellExecutionSupported && !python.tensorboardExtInstalled" + }, + { + "category": "Python", + "command": "python.refreshTensorBoard", + "enablement": "python.hasActiveTensorBoardSession", + "icon": "$(refresh)", + "title": "%python.command.python.refreshTensorBoard.title%", + "when": "!virtualWorkspace && shellExecutionSupported && !python.tensorboardExtInstalled" + }, + { + "category": "Python", + "command": "python.reportIssue", + "title": "%python.command.python.reportIssue.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Test", + "command": "testing.reRunFailTests", + "icon": "$(run-errors)", + "title": "%python.command.testing.rerunFailedTests.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.setInterpreter", + "title": "%python.command.python.setInterpreter.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.startREPL", + "title": "%python.command.python.startREPL.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.viewLanguageServerOutput", + "enablement": "python.hasLanguageServerOutputChannel", + "title": "%python.command.python.viewLanguageServerOutput.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + }, + { + "category": "Python", + "command": "python.viewOutput", + "title": "%python.command.python.viewOutput.title%", + "when": "!virtualWorkspace && shellExecutionSupported" + } + ], + "editor/content": [ + { + "group": "Python", + "command": "python.createEnvironment-button", + "when": "showCreateEnvButton && resourceLangId == pip-requirements && !virtualWorkspace && shellExecutionSupported && !inDiffEditor && !isMergeResultEditor && pythonDepsNotInstalled" + }, + { + "group": "Python", + "command": "python.createEnvironment-button", + "when": "showCreateEnvButton && resourceFilename == pyproject.toml && pipInstallableToml && !virtualWorkspace && shellExecutionSupported && !inDiffEditor && !isMergeResultEditor && pythonDepsNotInstalled" + } + ], + "editor/context": [ + { + "submenu": "python.run", + "group": "Python", + "when": "editorLangId == python && !virtualWorkspace && shellExecutionSupported && isWorkspaceTrusted" + }, + { + "submenu": "python.runFileInteractive", + "group": "Jupyter2", + "when": "editorLangId == python && !virtualWorkspace && shellExecutionSupported && !isJupyterInstalled && isWorkspaceTrusted" + } + ], + "python.runFileInteractive": [ + { + "command": "python.installJupyter", + "group": "Jupyter2", + "when": "resourceLangId == python && !virtualWorkspace && shellExecutionSupported" + } + ], + "python.run": [ + { + "command": "python.execSelectionInConsole", + "group": "Python", + "when": "editorFocus && editorLangId == python && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execInConsole", + "group": "Python", + "when": "resourceLangId == python && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execInTerminal", + "group": "Python", + "when": "resourceLangId == python && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execSelectionInDjangoShell", + "group": "Python", + "when": "false" + }, + { + "command": "python.execSelectionInTerminal", + "group": "Python", + "when": "false" + } + ], + "editor/title": [ + { + "command": "python.refreshTensorBoard", + "group": "navigation@0", + "when": "python.hasActiveTensorBoardSession && !virtualWorkspace && shellExecutionSupported" + } + ], + "editor/title/run": [ + { + "command": "python.execSelectionInConsole", + "group": "navigation@0", + "title": "%python.command.python.execSelectionInConsole.title%", + "when": "resourceLangId == python && !isInDiffEditor && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execInConsole", + "group": "navigation@1", + "title": "%python.command.python.execInConsole.title%", + "when": "resourceLangId == python && !isInDiffEditor && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execInTerminal-icon", + "group": "navigation@2", + "title": "%python.command.python.execInTerminalIcon.title%", + "when": "resourceLangId == python && !isInDiffEditor && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.debugInTerminal", + "group": "navigation@3", + "title": "%python.command.python.debugInTerminal.title%", + "when": "resourceLangId == python && !isInDiffEditor && !virtualWorkspace && shellExecutionSupported" + } + ], + "explorer/context": [ + { + "command": "python.execInConsole", + "group": "Python", + "when": "resourceLangId == python && !virtualWorkspace && shellExecutionSupported" + }, + { + "command": "python.execInTerminal", + "group": "Python", + "when": "resourceLangId == python && !virtualWorkspace && shellExecutionSupported" + } + ], + "file/newFile": [ + { + "command": "python.createNewFile", + "group": "file", + "when": "!virtualWorkspace" + } + ], + "view/title": [ + { + "command": "testing.reRunFailTests", + "when": "view == workbench.view.testing && hasFailedTests && !virtualWorkspace && shellExecutionSupported", + "group": "navigation@1" + } + ] + }, + "snippets": [ + { + "language": "python", + "path": "./snippets/python.code-snippets" + } + ], + "submenus": [ + { + "id": "python.run", + "label": "%python.editor.context.submenu.runPython%", + "icon": "$(play)" + }, + { + "id": "python.runFileInteractive", + "label": "%python.editor.context.submenu.runPythonInteractive%" + } + ], + "viewsWelcome": [ + { + "view": "testing", + "contents": "Configure a test framework to see your tests here.\n[Configure Python Tests](command:python.configureTests)", + "when": "!virtualWorkspace && shellExecutionSupported" + } + ], + "yamlValidation": [ + { + "fileMatch": ".condarc", + "url": "./schemas/condarc.json" + }, + { + "fileMatch": "environment.yml", + "url": "./schemas/conda-environment.json" + }, + { + "fileMatch": "meta.yaml", + "url": "./schemas/conda-meta.json" + } + ], + "languageRuntimes": [ + { + "languageId": "python" + } + ] + }, + "scripts": { + "package": "gulp clean && gulp prePublishBundle && vsce package -o ms-python-insiders.vsix", + "prePublish": "gulp clean && gulp prePublishNonBundle", + "compile": "tsc -watch -p ./", + "compileApi": "node ./node_modules/typescript/lib/tsc.js -b ./pythonExtensionApi/tsconfig.json", + "compiled": "deemon npm run compile", + "kill-compiled": "deemon --kill npm run compile", + "checkDependencies": "gulp checkDependencies", + "postinstall": "gulp installPythonLibs", + "test": "node ./out/test/standardTest.js && node ./out/test/multiRootTest.js", + "test:unittests": "mocha --config ./build/.mocha.unittests.json", + "test:unittests:cover": "nyc --no-clean --nycrc-path ./build/.nycrc mocha --config ./build/.mocha.unittests.json", + "test:functional": "mocha --require source-map-support/register --config ./build/.mocha.functional.json", + "test:functional:perf": "node --inspect-brk ./node_modules/mocha/bin/_mocha --require source-map-support/register --config ./build/.mocha.functional.perf.json", + "test:functional:memleak": "node --inspect-brk ./node_modules/mocha/bin/_mocha --require source-map-support/register --config ./build/.mocha.functional.json", + "test:functional:cover": "nyc --no-clean --nycrc-path ./build/.nycrc mocha --require source-map-support/register --config ./build/.mocha.functional.json", + "test:cover:report": "nyc --nycrc-path ./build/.nycrc report --reporter=text --reporter=html --reporter=text-summary --reporter=cobertura", + "testDebugger": "node ./out/test/testBootstrap.js ./out/test/debuggerTest.js", + "testDebugger:cover": "nyc --no-clean --use-spawn-wrap --nycrc-path ./build/.nycrc --require source-map-support/register node ./out/test/debuggerTest.js", + "testSingleWorkspace": "node ./out/test/testBootstrap.js ./out/test/standardTest.js", + "testSingleWorkspace:cover": "nyc --no-clean --use-spawn-wrap --nycrc-path ./build/.nycrc --require source-map-support/register node ./out/test/standardTest.js", + "preTestJediLSP": "node ./out/test/languageServers/jedi/lspSetup.js", + "testJediLSP": "node ./out/test/languageServers/jedi/lspSetup.js && cross-env CODE_TESTS_WORKSPACE=src/test VSC_PYTHON_CI_TEST_GREP='Language Server:' node ./out/test/testBootstrap.js ./out/test/standardTest.js && node ./out/test/languageServers/jedi/lspTeardown.js", + "testMultiWorkspace": "node ./out/test/testBootstrap.js ./out/test/multiRootTest.js", + "testPerformance": "node ./out/test/testBootstrap.js ./out/test/performanceTest.js", + "testSmoke": "cross-env INSTALL_JUPYTER_EXTENSION=true \"node ./out/test/smokeTest.js\"", + "testInsiders": "cross-env VSC_PYTHON_CI_TEST_VSC_CHANNEL=insiders INSTALL_PYLANCE_EXTENSION=true TEST_FILES_SUFFIX=insiders.test CODE_TESTS_WORKSPACE=src/testMultiRootWkspc/smokeTests \"node ./out/test/standardTest.js\"", + "lint-staged": "node gulpfile.js", + "lint": "eslint --ext .ts,.js src build pythonExtensionApi", + "lint-fix": "eslint --fix --ext .ts,.js src build pythonExtensionApi gulpfile.js", + "format-check": "prettier --check 'src/**/*.ts' 'build/**/*.js' '.github/**/*.yml' gulpfile.js", + "format-fix": "prettier --write 'src/**/*.ts' 'build/**/*.js' '.github/**/*.yml' gulpfile.js", + "clean": "gulp clean", + "addExtensionPackDependencies": "gulp addExtensionPackDependencies", + "updateBuildNumber": "gulp updateBuildNumber", + "verifyBundle": "gulp verifyBundle", + "webpack": "webpack" + }, + "dependencies": { + "@iarna/toml": "^2.2.5", + "@jupyter-widgets/html-manager": "^1.0.9", + "@vscode/extension-telemetry": "^0.8.4", + "@vscode/jupyter-lsp-middleware": "^0.2.50", + "arch": "^2.1.0", + "fs-extra": "^10.0.1", + "glob": "^7.2.0", + "hash.js": "^1.1.7", + "iconv-lite": "^0.6.3", + "inversify": "^6.0.1", + "jsonc-parser": "^3.0.0", + "jupyter-matplotlib": "^0.11.3", + "lodash": "^4.17.21", + "minimatch": "^5.0.1", + "named-js-regexp": "^1.3.3", + "node-stream-zip": "^1.6.0", + "p-queue": "^6.6.2", + "portfinder": "^1.0.28", + "reflect-metadata": "^0.1.12", + "requirejs": "^2.3.6", + "rxjs": "^6.5.4", + "rxjs-compat": "^6.5.4", + "semver": "^7.5.2", + "stack-trace": "0.0.10", + "sudo-prompt": "^9.2.1", + "tmp": "^0.0.33", + "uint64be": "^3.0.0", + "unicode": "^14.0.0", + "untildify": "^4.0.0", + "vscode-debugprotocol": "^1.28.0", + "vscode-jsonrpc": "^8.2.0", + "vscode-languageclient": "^9.0.1", + "vscode-languageserver-protocol": "^3.17.5", + "vscode-tas-client": "^0.1.84", + "which": "^2.0.2", + "winreg": "^1.2.4", + "xml2js": "^0.5.0" + }, + "devDependencies": { + "@istanbuljs/nyc-config-typescript": "^1.0.2", + "@types/bent": "^7.3.0", + "@types/chai": "^4.1.2", + "@types/chai-arrays": "^2.0.0", + "@types/chai-as-promised": "^7.1.0", + "@types/download": "^8.0.1", + "@types/fs-extra": "^9.0.13", + "@types/glob": "^7.2.0", + "@types/lodash": "^4.14.104", + "@types/mocha": "^9.1.0", + "@types/node": "^18.17.1", + "@types/semver": "^5.5.0", + "@types/shortid": "^0.0.29", + "@types/sinon": "^10.0.11", + "@types/stack-trace": "0.0.29", + "@types/tmp": "^0.0.33", + "@types/vscode": "^1.81.0", + "@types/which": "^2.0.1", + "@types/winreg": "^1.2.30", + "@types/xml2js": "0.4.9", + "@typescript-eslint/eslint-plugin": "^3.7.0", + "@typescript-eslint/parser": "^3.7.0", + "@vscode/test-electron": "^2.3.8", + "@vscode/vsce": "^2.18.0", + "bent": "^7.3.12", + "chai": "^4.1.2", + "chai-arrays": "^2.0.0", + "chai-as-promised": "^7.1.1", + "copy-webpack-plugin": "^9.1.0", + "cross-spawn": "^6.0.5", + "del": "^6.0.0", + "download": "^8.0.0", + "eslint": "^7.2.0", + "eslint-config-airbnb": "^18.2.0", + "eslint-config-prettier": "^8.3.0", + "eslint-plugin-import": "^2.25.4", + "eslint-plugin-jsx-a11y": "^6.3.1", + "eslint-plugin-react": "^7.20.3", + "eslint-plugin-react-hooks": "^4.0.0", + "expose-loader": "^3.1.0", + "flat": "^5.0.2", + "get-port": "^5.1.1", + "gulp": "^4.0.0", + "gulp-typescript": "^5.0.0", + "mocha": "^9.2.2", + "mocha-junit-reporter": "^2.0.2", + "mocha-multi-reporters": "^1.1.7", + "node-has-native-dependencies": "^1.0.2", + "node-loader": "^1.0.2", + "node-polyfill-webpack-plugin": "^1.1.4", + "nyc": "^15.0.0", + "prettier": "2.0.2", + "rewiremock": "^3.13.0", + "rimraf": "^3.0.2", + "shortid": "^2.2.8", + "sinon": "^13.0.1", + "source-map-support": "^0.5.12", + "ts-loader": "^9.2.8", + "ts-mockito": "^2.5.0", + "ts-node": "^10.7.0", + "tsconfig-paths-webpack-plugin": "^3.2.0", + "typemoq": "^2.1.0", + "typescript": "4.5.5", + "uuid": "^8.3.2", + "webpack": "^5.76.0", + "webpack-bundle-analyzer": "^4.5.0", + "webpack-cli": "^4.9.2", + "webpack-fix-default-import-plugin": "^1.0.3", + "webpack-merge": "^5.8.0", + "webpack-node-externals": "^3.0.0", + "webpack-require-from": "^1.8.6", + "worker-loader": "^3.0.8", + "yargs": "^15.3.1" + }, + "extensionDependencies": [ + "vscode.jupyter-adapter" + ] +} \ No newline at end of file diff --git a/extensions/positron-python/package.nls.json b/extensions/positron-python/package.nls.json new file mode 100644 index 00000000000..127c2b08ac4 --- /dev/null +++ b/extensions/positron-python/package.nls.json @@ -0,0 +1,184 @@ +{ + "displayName": "Python", + "description": "Python kernel, Debugging (multi-threaded, remote), code formatting, refactoring, unit tests, and more.", + "python.command.python.startREPL.title": "Start REPL", + "python.command.python.createEnvironment.title": "Create Environment...", + "python.command.python.createNewFile.title": "New Python File", + "python.command.python.createTerminal.title": "Create Terminal", + "python.command.python.execInTerminal.title": "Run Python File in Terminal", + "python.command.python.execInConsole.title": "Run Python File in Console", + "python.command.python.debugInTerminal.title": "Debug Python File in Terminal", + "python.command.python.execInTerminalIcon.title": "Run Python File in Terminal", + "python.command.python.execInDedicatedTerminal.title": "Run Python File in Dedicated Terminal", + "python.command.python.setInterpreter.title": "Select Interpreter", + "python.command.python.clearWorkspaceInterpreter.title": "Clear Workspace Interpreter Setting", + "python.command.python.viewOutput.title": "Show Output", + "python.command.python.installJupyter.title": "Install the Jupyter extension", + "python.command.python.viewLanguageServerOutput.title": "Show Language Server Output", + "python.command.python.configureTests.title": "Configure Tests", + "python.command.testing.rerunFailedTests.title": "Rerun Failed Tests", + "python.command.python.execSelectionInTerminal.title": "Run Selection/Line in Python Terminal", + "python.command.python.execSelectionInDjangoShell.title": "Run Selection/Line in Django Shell", + "python.command.python.execSelectionInConsole.title": "Run Selection in Console", + "python.command.python.reportIssue.title": "Report Issue...", + "python.command.python.enableSourceMapSupport.title": "Enable Source Map Support For Extension Debugging", + "python.command.python.clearCacheAndReload.title": "Clear Cache and Reload Window", + "python.command.python.analysis.restartLanguageServer.title": "Restart Language Server", + "python.command.python.launchTensorBoard.title": "Launch TensorBoard", + "python.command.python.refreshTensorBoard.title": "Refresh TensorBoard", + "python.createEnvironment.contentButton.description": "Show or hide Create Environment button in the editor for `requirements.txt` or other dependency files.", + "python.createEnvironment.trigger.description": "Detect if environment creation is required for the current project", + "python.menu.createNewFile.title": "Python File", + "python.editor.context.submenu.runPython": "Run Python", + "python.editor.context.submenu.runPythonInteractive": "Run in Interactive window", + "python.activeStateToolPath.description": "Path to the State Tool executable for ActiveState runtimes (version 0.36+).", + "python.autoComplete.extraPaths.description": "List of paths to libraries and the like that need to be imported by auto complete engine. E.g. when using Google App SDK, the paths are not in system path, hence need to be added into this list.", + "python.condaPath.description": "Path to the conda executable to use for activation (version 4.4+).", + "python.debugger.deprecatedMessage": "This configuration will be deprecated soon. Please replace `python` with `debugpy` to use the new Python Debugger extension.", + "python.defaultInterpreterPath.description": "Path to default Python to use when extension loads up for the first time, no longer used once an interpreter is selected for the workspace. See [here](https://aka.ms/AAfekmf) to understand when this is used", + "python.diagnostics.sourceMapsEnabled.description": "Enable source map support for meaningful stack traces in error logs.", + "python.envFile.description": "Absolute path to a file containing environment variable definitions.", + "python.experiments.enabled.description": "Enables A/B tests experiments in the Python extension. If enabled, you may get included in proposed enhancements and/or features.", + "python.experiments.optInto.description": "List of experiment to opt into. If empty, user is assigned the default experiment groups. See [here](https://github.com/microsoft/vscode-python/wiki/AB-Experiments) for more details.", + "python.experiments.optOutFrom.description": "List of experiment to opt out of. If empty, user is assigned the default experiment groups. See [here](https://github.com/microsoft/vscode-python/wiki/AB-Experiments) for more details.", + "python.experiments.All.description": "Combined list of all experiments.", + "python.experiments.pythonSurveyNotification.description": "Denotes the Python Survey Notification experiment.", + "python.experiments.pythonPromptNewToolsExt.description": "Denotes the Python Prompt New Tools Extension experiment.", + "python.experiments.pythonTerminalEnvVarActivation.description": "Enables use of environment variables to activate terminals instead of sending activation commands.", + "python.experiments.pythonDiscoveryUsingWorkers.description": "Enables use of worker threads to do heavy computation when discovering interpreters.", + "python.experiments.pythonTestAdapter.description": "Denotes the Python Test Adapter experiment.", + "python.experiments.pythonREPLSmartSend.description": "Denotes the Python REPL Smart Send experiment.", + "python.experiments.pythonRecommendTensorboardExt.description": "Denotes the Tensorboard Extension recommendation experiment.", + "python.globalModuleInstallation.description": "Whether to install Python modules globally when not using an environment.", + "python.languageServerDebug.description": "Whether debug should be enabled for Positron's Python language server. Requires a restart to take effect.", + "python.languageServerLogLevel.description": "Controls the [logging level](https://docs.python.org/3/library/logging.html#levels) of Positron's Python language server. Requires a restart to take effect.", + "python.languageServer.description": "Defines type of the language server.", + "python.languageServer.defaultDescription": "Automatically select a language server: Pylance if installed and available, otherwise fallback to Jedi.", + "python.languageServer.jediDescription": "Use Jedi behind the Language Server Protocol (LSP) as a language server.", + "python.languageServer.pylanceDescription": "Use Pylance as a language server.", + "python.languageServer.noneDescription": "Disable language server capabilities.", + "python.interpreter.infoVisibility.description": "Controls when to display information of selected interpreter in the status bar.", + "python.interpreter.infoVisibility.never.description": "Never display information.", + "python.interpreter.infoVisibility.onPythonRelated.description": "Only display information if Python-related files are opened.", + "python.interpreter.infoVisibility.always.description": "Always display information.", + "python.logging.level.description": "The logging level the extension logs at, defaults to 'error'", + "python.logging.level.deprecation": "This setting is deprecated. Please use command `Developer: Set Log Level...` to set logging level.", + "python.missingPackage.severity.description": "Set severity of missing packages in requirements.txt or pyproject.toml", + "python.pipenvPath.description": "Path to the pipenv executable to use for activation.", + "python.poetryPath.description": "Path to the poetry executable.", + "python.quietMode.description": "Start Positron's IPython shell in quiet mode, to suppress initial version and help messages (shut down Python and start a new Python session to apply).", + "python.EnableREPLSmartSend.description": "Toggle Smart Send for the Python REPL. Smart Send enables sending the smallest runnable block of code to the REPL on Shift+Enter and moves the cursor accordingly.", + "python.tensorBoard.logDirectory.description": "Set this setting to your preferred TensorBoard log directory to skip log directory prompt when starting TensorBoard.", + "python.tensorBoard.logDirectory.markdownDeprecationMessage": "Tensorboard support has been moved to the extension [Tensorboard extension](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.tensorboard). Instead use the setting `tensorBoard.logDirectory`.", + "python.tensorBoard.logDirectory.deprecationMessage": "Tensorboard support has been moved to the extension Tensorboard extension. Instead use the setting `tensorBoard.logDirectory`.", + "python.terminal.activateEnvInCurrentTerminal.description": "Activate Python Environment in the current Terminal on load of the Extension.", + "python.terminal.activateEnvironment.description": "Activate Python Environment in all Terminals created.", + "python.terminal.executeInFileDir.description": "When executing a file in the terminal, whether to use execute in the file's directory, instead of the current open folder.", + "python.terminal.focusAfterLaunch.description": "When launching a python terminal, whether to focus the cursor on the terminal.", + "python.terminal.launchArgs.description": "Python launch arguments to use when executing a file in the terminal.", + "python.testing.autoTestDiscoverOnSaveEnabled.description": "Enable auto run test discovery when saving a test file.", + "python.testing.cwd.description": "Optional working directory for tests.", + "python.testing.debugPort.description": "Port number used for debugging of tests.", + "python.testing.promptToConfigure.description": "Prompt to configure a test framework if potential tests directories are discovered.", + "python.testing.pytestArgs.description": "Arguments passed in. Each argument is a separate item in the array.", + "python.testing.pytestEnabled.description": "Enable testing using pytest.", + "python.testing.pytestPath.description": "Path to pytest (pytest), you can use a custom version of pytest by modifying this setting to include the full path.", + "python.testing.unittestArgs.description": "Arguments passed in. Each argument is a separate item in the array.", + "python.testing.unittestEnabled.description": "Enable testing using unittest.", + "python.venvFolders.description": "Folders in your home directory to look into for virtual environments (supports pyenv, direnv and virtualenvwrapper by default).", + "python.venvPath.description": "Path to folder with a list of Virtual Environments (e.g. ~/.pyenv, ~/Envs, ~/.virtualenvs).", + "walkthrough.pythonWelcome.title": "Get Started with Python Development", + "walkthrough.pythonWelcome.description": "Your first steps to set up a Python project with all the powerful tools and features that the Python extension has to offer!", + "walkthrough.step.python.createPythonFile.title": "Create a Python file", + "walkthrough.step.python.createPythonFolder.title": "Open a Python project folder", + "walkthrough.step.python.createPythonFile.description": { + "message": "[Open](command:toSide:workbench.action.files.openFile) or [create](command:toSide:workbench.action.files.newUntitledFile?%7B%22languageId%22%3A%22python%22%7D) a Python file - make sure to save it as \".py\".\n[Create Python File](command:toSide:workbench.action.files.newUntitledFile?%7B%22languageId%22%3A%22python%22%7D)", + "comment": [ + "{Locked='](command:toSide:workbench.action.files.newUntitledFile?%7B%22languageId%22%3A%22python%22%7D'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.createPythonFolder.description": { + "message": "[Open](command:workbench.action.files.openFolder) or create a project folder.\n[Open Project Folder](command:workbench.action.files.openFolder)", + "comment": [ + "{Locked='](command:workbench.action.files.openFolder'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.installPythonWin8.title": "Install Python", + "walkthrough.step.python.installPythonWin8.description": "The Python Extension requires Python to be installed. Install Python [from python.org](https://www.python.org/downloads).\n\n[Install Python](https://www.python.org/downloads)\n", + "walkthrough.step.python.installPythonMac.title": "Install Python", + "walkthrough.step.python.installPythonMac.description": { + "message": "The Python Extension requires Python to be installed. Install Python 3 through the terminal.\n[Install Python via Brew](command:python.installPythonOnMac)\n", + "comment": [ + "{Locked='](command:python.installPythonOnMac'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.installPythonLinux.title": "Install Python", + "walkthrough.step.python.installPythonLinux.description": { + "message": "The Python Extension requires Python to be installed. Install Python 3 through the terminal.\n[Install Python via terminal](command:python.installPythonOnLinux)\n", + "comment": [ + "{Locked='](command:python.installPythonOnLinux'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.selectInterpreter.title": "Select a Python Interpreter", + "walkthrough.step.python.selectInterpreter.description": { + "message": "Choose which Python interpreter/environment you want to use for your Python project.\n[Select Python Interpreter](command:python.setInterpreter)\n**Tip**: Run the ``Python: Select Interpreter`` command in the [Command Palette](command:workbench.action.showCommands).", + "comment": [ + "{Locked='](command:python.setInterpreter'}", + "{Locked='](command:workbench.action.showCommands'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.createEnvironment.title": "Create a Python Environment ", + "walkthrough.step.python.createEnvironment.title2": "Create or select a Python Environment ", + "walkthrough.step.python.createEnvironment.description": { + "message": "Create an environment for your Python project.\n[Create Environment](command:python.createEnvironment)\n**Tip**: Run the ``Python: Create Environment`` command in the [Command Palette](command:workbench.action.showCommands).\n 🔍 Check out our [docs](https://aka.ms/pythonenvs) to learn more.", + "comment": [ + "{Locked='](command:python.createEnvironment'}", + "{Locked='](command:workbench.action.showCommands'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.createEnvironment.description2": "Create an environment for your Python project or use [Select Python Interpreter](command:python.setInterpreter) to select an existing one.\n[Create Environment](command:python.createEnvironment)\n**Tip**: Run the ``Python: Create Environment`` command in the [Command Palette](command:workbench.action.showCommands).", + "walkthrough.step.python.runAndDebug.title": "Run and debug your Python file", + "walkthrough.step.python.runAndDebug.description": "Open your Python file and click on the play button on the top right of the editor, or press F5 when on the file and select \"Python File\" to run with the debugger. \n \n[Learn more](https://code.visualstudio.com/docs/python/python-tutorial#_run-hello-world)", + "walkthrough.step.python.learnMoreWithDS.title": "Explore more resources", + "walkthrough.step.python.learnMoreWithDS.description": { + "message": "🎨 Explore all the features the Python extension has to offer by looking for \"Python\" in the [Command Palette](command:workbench.action.showCommands). \n 📈 Learn more about getting started with [data science](command:workbench.action.openWalkthrough?%7B%22category%22%3A%22ms-python.python%23pythonDataScienceWelcome%22%2C%22step%22%3A%22ms-python.python%23python.createNewNotebook%22%7D) in Python. \n ✨ Take a look at our [Release Notes](https://aka.ms/AA8dxtb) to learn more about the latest features. \n \n[Learn More](https://aka.ms/AA8dqti)", + "comment": [ + "{Locked='](command:workbench.action.showCommands'}", + "{Locked='](command:workbench.action.openWalkthrough?%7B%22category%22%3A%22ms-python.python%23pythonDataScienceWelcome%22%2C%22step%22%3A%22ms-python.python%23python.createNewNotebook%22%7D'}", + "Do not translate the 'command:*' part inside of the '(..)'. It is an internal command syntax for VS Code", + "Please make sure there is no space between the right bracket and left parenthesis: ]( this is an internal syntax for links" + ] + }, + "walkthrough.step.python.learnMoreWithDS.description2": "🎨 Explore all the features the Python extension has to offer by looking for \"Python\" in the [Command Palette](command:workbench.action.showCommands). \n 📈 Learn more about getting started with [data science](command:workbench.action.openWalkthrough?%7B%22category%22%3A%22ms-python.python%23pythonDataScienceWelcome%22%2C%22step%22%3A%22ms-python.python%23python.createNewNotebook%22%7D) in Python. \n ✨ Take a look at our [Release Notes](https://aka.ms/AA8dxtb) to learn more about the latest features. \n \n[Follow along with the Python Tutorial](https://aka.ms/AA8dqti)", + "walkthrough.pythonDataScienceWelcome.title": "Get Started with Python for Data Science", + "walkthrough.pythonDataScienceWelcome.description": "Your first steps to getting started with a Data Science project with Python!", + "walkthrough.step.python.installJupyterExt.title": "Install Jupyter extension", + "walkthrough.step.python.installJupyterExt.description": "If you haven't already, install the [Jupyter extension](command:workbench.extensions.search?\"ms-toolsai.jupyter\") to take full advantage of notebooks experiences in VS Code!\n \n[Search Jupyter extension](command:workbench.extensions.search?\"ms-toolsai.jupyter\")", + "walkthrough.step.python.createNewNotebook.title": "Create or open a Jupyter Notebook", + "walkthrough.step.python.createNewNotebook.description": "Right click in the file explorer and create a new file with an .ipynb extension. Or, open the [Command Palette](command:workbench.action.showCommands) and run the command \n``Jupyter: Create New Blank Notebook``.\n[Create new Jupyter Notebook](command:toSide:jupyter.createnewnotebook)\n If you have an existing project, you can also [open a folder](command:workbench.action.files.openFolder) and/or clone a project from GitHub: [clone a Git repository](command:git.clone).", + "walkthrough.step.python.openInteractiveWindow.title": "Open the Python Interactive Window", + "walkthrough.step.python.openInteractiveWindow.description": "The Python Interactive Window is a Python shell where you can execute and view the results of your Python code. You can create cells on a Python file by typing ``#%%``.\n \nTo open the interactive window anytime, open the [Command Palette](command:workbench.action.showCommands) and run the command \n``Jupyter: Create Interactive Window``.\n[Open Interactive Window](command:jupyter.createnewinteractive)", + "walkthrough.step.python.dataScienceLearnMore.title": "Find out more!", + "walkthrough.step.python.dataScienceLearnMore.description": "📒 Take a look into the [Jupyter extension](command:workbench.extensions.search?\"ms-toolsai.jupyter\") features, by looking for \"Jupyter\" in the [Command Palette](command:workbench.action.showCommands). \n 🏃🏻 Find out more features in our [Tutorials](https://aka.ms/AAdjzpd). \n[Learn more](https://aka.ms/AAdar6q)", + "walkthrough.step.python.createPythonFile.altText": "Open a Python file or a folder with a Python project.", + "walkthrough.step.python.selectInterpreter.altText": "Selecting a Python interpreter from the status bar", + "walkthrough.step.python.createEnvironment.altText": "Creating a Python environment from the Command Palette", + "walkthrough.step.python.runAndDebug.altText": "How to run and debug in VS Code with F5 or the play button on the top right.", + "walkthrough.step.python.learnMoreWithDS.altText": "Image representing our documentation page and mailing list resources.", + "walkthrough.step.python.installJupyterExt.altText": "Creating a new Jupyter notebook", + "walkthrough.step.python.createNewNotebook.altText": "Creating a new Jupyter notebook", + "walkthrough.step.python.openInteractiveWindow.altText": "Opening Python interactive window", + "walkthrough.step.python.dataScienceLearnMore.altText": "Image representing our documentation page and mailing list resources." +} diff --git a/extensions/positron-python/positron-dts/README.md b/extensions/positron-python/positron-dts/README.md new file mode 100644 index 00000000000..f1bf984712a --- /dev/null +++ b/extensions/positron-python/positron-dts/README.md @@ -0,0 +1,8 @@ + +## positron-dts + +This folder contains the Positron API type definitions. It mirrors its sibling +folder `vscode-dts`; the primary difference is that the Positron types do not +have proposals. + + diff --git a/extensions/positron-python/positron-dts/positron.d.ts b/extensions/positron-python/positron-dts/positron.d.ts new file mode 100644 index 00000000000..77995183987 --- /dev/null +++ b/extensions/positron-python/positron-dts/positron.d.ts @@ -0,0 +1,1215 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +declare module 'positron' { + + import * as vscode from 'vscode'; // eslint-disable-line + + /** + * The current Positron version. + */ + export const version: string; + + /** The set of possible language runtime messages */ + export enum LanguageRuntimeMessageType { + /** A message representing output (text, plots, etc.) */ + Output = 'output', + + /** A message representing output from one of the standard streams (stdout or stderr) */ + Stream = 'stream', + + /** A message representing echoed user input */ + Input = 'input', + + /** A message representing an error that occurred while executing user code */ + Error = 'error', + + /** A message representing a prompt for user input */ + Prompt = 'prompt', + + /** A message representing a change in the runtime's online state */ + State = 'state', + + /** A message representing a runtime event */ + Event = 'event', + + /** A message representing a new comm (client instance) being opened from the rutime side */ + CommOpen = 'comm_open', + + /** A message representing data received via a comm (to a client instance) */ + CommData = 'comm_data', + + /** A message indicating that a comm (client instance) was closed from the server side */ + CommClosed = 'comm_closed', + } + + /** + * The set of possible statuses for a language runtime while online + */ + export enum RuntimeOnlineState { + /** The runtime is ready to execute code. */ + Idle = 'idle', + + /** The runtime is busy executing code. */ + Busy = 'busy', + } + + /** + * The set of possible statuses for a language runtime + */ + export enum RuntimeState { + /** The runtime has not been started or initialized yet. */ + Uninitialized = 'uninitialized', + + /** The runtime is initializing (preparing to start). */ + Initializing = 'initializing', + + /** The runtime is in the process of starting up. It isn't ready for messages. */ + Starting = 'starting', + + /** The runtime has a heartbeat and is ready for messages. */ + Ready = 'ready', + + /** The runtime is ready to execute code. */ + Idle = 'idle', + + /** The runtime is busy executing code. */ + Busy = 'busy', + + /** The runtime's host process has ended. */ + Exited = 'exited', + + /** The runtime is not responding to heartbeats and is presumed offline. */ + Offline = 'offline', + } + + /** + * Results of analyzing code fragment for completeness + */ + export enum RuntimeCodeFragmentStatus { + /** The code fragment is complete: it is a valid, self-contained expression */ + Complete = 'complete', + + /** The code is incomplete: it is an expression that is missing elements or operands, such as "1 +" or "foo(" */ + Incomplete = 'incomplete', + + /** The code is invalid: an expression that cannot be parsed because of a syntax error */ + Invalid = 'invalid', + + /** It was not possible to ascertain the code fragment's status */ + Unknown = 'unknown' + } + + /** + * Possible code execution modes for a language runtime + */ + export enum RuntimeCodeExecutionMode { + /** The code was entered interactively, and should be executed and stored in the runtime's history. */ + Interactive = 'interactive', + + /** The code should be executed but not stored in history. */ + Transient = 'transient', + + /** The code execution should be fully silent, neither displayed to the user nor stored in history. */ + Silent = 'silent' + } + + /** + * Possible error dispositions for a language runtime + */ + export enum RuntimeErrorBehavior { + /** The runtime should stop when an error is encountered. */ + Stop = 'stop', + + /** The runtime should continue execution when an error is encountered */ + Continue = 'continue', + } + + /** + * Possible reasons a language runtime could exit. + */ + export enum RuntimeExitReason { + /** The runtime exited because it could not start correctly. */ + StartupFailed = 'startupFailed', + + /** The runtime is shutting down at the request of the user. */ + Shutdown = 'shutdown', + + /** The runtime exited because it was forced to quit. */ + ForcedQuit = 'forcedQuit', + + /** The runtime is exiting in order to restart. */ + Restart = 'restart', + + /** The runtime is exiting in order to switch to a new runtime. */ + SwitchRuntime = 'switchRuntime', + + /** The runtime exited because of an error, most often a crash. */ + Error = 'error', + + /** + * The runtime exited for an unknown reason. This typically means that + * it exited unexpectedly but with a normal exit code (0). + */ + Unknown = 'unknown', + } + + /** + * LanguageRuntimeExit is an interface that defines an event occurring when a + * language runtime exits. + */ + export interface LanguageRuntimeExit { + /** Runtime name */ + runtime_name: string; + + /** + * The process exit code, if the runtime is backed by a process. If the + * runtime is not backed by a process, this should just be 0 for a + * succcessful exit and 1 for an error. + */ + exit_code: number; + + /** + * The reason the runtime exited. + */ + reason: RuntimeExitReason; + + /** The exit message, if any. */ + message: string; + } + + /** + * LanguageRuntimeMessage is an interface that defines an event occurring in a + * language runtime, such as outputting text or plots. + */ + export interface LanguageRuntimeMessage { + /** The event ID */ + id: string; + + /** The ID of this event's parent (the event that caused it), if applicable */ + parent_id: string; + + /** The message's date and time, in ISO 8601 format */ + when: string; + + /** The type of event */ + type: LanguageRuntimeMessageType; + } + + /** LanguageRuntimeOutput is a LanguageRuntimeMessage representing output (text, plots, etc.) */ + export interface LanguageRuntimeOutput extends LanguageRuntimeMessage { + /** A record of data MIME types to the associated data, e.g. `text/plain` => `'hello world'` */ + data: Record; + } + + /** + * The set of possible output locations for a LanguageRuntimeOutput. + */ + export enum PositronOutputLocation { + /** The output should be displayed inline in Positron's Console */ + Console = 'console', + + /** The output should be displayed in Positron's Viewer pane */ + Viewer = 'viewer', + + /** The output should be displayed in Positron's Plots pane */ + Plot = 'plot', + } + + /** + * LanguageRuntimeWebOutput amends LanguageRuntimeOutput with additional information needed + * to render web content in Positron. + */ + export interface LanguageRuntimeWebOutput extends LanguageRuntimeOutput { + /** Where the web output should be displayed */ + output_location: PositronOutputLocation; + + /** The set of resource roots needed to display the output */ + resource_roots: vscode.Uri[]; + } + + /** + * The set of standard stream names supported for streaming textual output. + */ + export enum LanguageRuntimeStreamName { + Stdout = 'stdout', + Stderr = 'stderr' + } + + /** + * LanguageRuntimeStream is a LanguageRuntimeMessage representing output from a standard stream + * (stdout or stderr). + */ + export interface LanguageRuntimeStream extends LanguageRuntimeMessage { + /** The stream name */ + name: LanguageRuntimeStreamName; + + /** The stream's text */ + text: string; + } + + /** LanguageRuntimeInput is a LanguageRuntimeMessage representing echoed user input */ + export interface LanguageRuntimeInput extends LanguageRuntimeMessage { + /** The code that was input */ + code: string; + + /** The execution count */ + execution_count: number; + } + + /** LanguageRuntimePrompt is a LanguageRuntimeMessage representing a prompt for input */ + export interface LanguageRuntimePrompt extends LanguageRuntimeMessage { + /** The prompt text */ + prompt: string; + + /** Whether this is a password prompt (and typing should be hidden) */ + password: boolean; + } + + /** LanguageRuntimeInfo contains metadata about the runtime after it has started. */ + export interface LanguageRuntimeInfo { + /** A startup banner */ + banner: string; + + /** The implementation version number */ + implementation_version: string; + + /** The language version number */ + language_version: string; + + /** Initial prompt string in case user customized it */ + input_prompt?: string; + + /** Continuation prompt string in case user customized it */ + continuation_prompt?: string; + } + + /** LanguageRuntimeState is a LanguageRuntimeMessage representing a new runtime state */ + export interface LanguageRuntimeState extends LanguageRuntimeMessage { + /** The new state */ + state: RuntimeOnlineState; + } + + /** LanguageRuntimeError is a LanguageRuntimeMessage that represents a run-time error */ + export interface LanguageRuntimeError extends LanguageRuntimeMessage { + /** The error name */ + name: string; + + /** The error message */ + message: string; + + /** The error stack trace */ + traceback: Array; + } + + /** + * LanguageRuntimeCommOpen is a LanguageRuntimeMessage that indicates a + * comm (client instance) was opened from the server side + */ + export interface LanguageRuntimeCommOpen extends LanguageRuntimeMessage { + /** The unique ID of the comm being opened */ + comm_id: string; + + /** The name (type) of the comm being opened, e.g. 'jupyter.widget' */ + target_name: string; + + /** The data from the back-end */ + data: object; + } + + /** LanguageRuntimeCommMessage is a LanguageRuntimeMessage that represents data for a comm (client instance) */ + export interface LanguageRuntimeCommMessage extends LanguageRuntimeMessage { + /** The unique ID of the client comm ID for which the message is intended */ + comm_id: string; + + /** The data from the back-end */ + data: object; + } + + /** + * LanguageRuntimeCommClosed is a LanguageRuntimeMessage that indicates a + * comm (client instance) was closed from the server side + */ + export interface LanguageRuntimeCommClosed extends LanguageRuntimeMessage { + /** The unique ID of the client comm ID for which the message is intended */ + comm_id: string; + + /** The data from the back-end */ + data: object; + } + + /** + * LanguageRuntimeMetadata contains information about a language runtime that is known + * before the runtime is started. + */ + export interface LanguageRuntimeMetadata { + /** The path to the runtime. */ + runtimePath: string; + + /** A unique identifier for this runtime; takes the form of a GUID */ + runtimeId: string; + + /** + * The fully qualified name of the runtime displayed to the user; e.g. "R 4.2 (64-bit)". + * Should be unique across languages. + */ + runtimeName: string; + + /** + * A language specific runtime name displayed to the user; e.g. "4.2 (64-bit)". + * Should be unique within a single language. + */ + runtimeShortName: string; + + /** The version of the runtime itself (e.g. kernel or extension version) as a string; e.g. "0.1" */ + runtimeVersion: string; + + /** The runtime's source or origin; e.g. PyEnv, System, Homebrew, Conda, etc. */ + runtimeSource: string; + + /** The free-form, user-friendly name of the language this runtime can execute; e.g. "R" */ + languageName: string; + + /** + * The Visual Studio Code Language ID of the language this runtime can execute; e.g. "r" + * + * See here for a list of known language IDs: + * https://code.visualstudio.com/docs/languages/identifiers#_known-language-identifiers + */ + languageId: string; + + /** The version of the language; e.g. "4.2" */ + languageVersion: string; + + /** The Base64-encoded icon SVG for the language. */ + base64EncodedIconSvg: string | undefined; + + /** Whether the runtime should start up automatically or wait until explicitly requested */ + startupBehavior: LanguageRuntimeStartupBehavior; + + /** Where sessions will be located; used as a hint to control session restoration */ + sessionLocation: LanguageRuntimeSessionLocation; + + /** + * Extra data supplied by the runtime provider; not read by Positron but supplied + * when creating a new session from the metadata. + */ + extraRuntimeData: any; + } + + export interface RuntimeSessionMetadata { + /** The ID of this session */ + readonly sessionId: string; + + /** The user-facing name of this session */ + readonly sessionName: string; + + /** The session's mode */ + readonly sessionMode: LanguageRuntimeSessionMode; + + /** The URI of the notebook document associated with the session, if any */ + readonly notebookUri?: vscode.Uri; + } + + /** + * LanguageRuntimeSessionMode is an enum representing the set of possible + * modes for a language runtime session. + */ + export enum LanguageRuntimeSessionMode { + /** + * The runtime session is bound to a Positron console. Typically, + * there's only one console session per language. + */ + Console = 'console', + + /** The runtime session backs a notebook. */ + Notebook = 'notebook', + + /** The runtime session is a background session (not attached to any UI). */ + Background = 'background', + } + + + /** + * LanguageRuntimeDynState contains information about a language runtime that may + * change after a runtime has started. + */ + export interface LanguageRuntimeDynState { + /** The text the language's interpreter uses to prompt the user for input, e.g. ">" or ">>>" */ + inputPrompt: string; + + /** The text the language's interpreter uses to prompt the user for continued input, e.g. "+" or "..." */ + continuationPrompt: string; + } + + export enum LanguageRuntimeStartupBehavior { + /** + * The runtime should be started immediately after registration; usually used for runtimes + * that are affiliated with the current workspace. + */ + Immediate = 'immediate', + + /** + * The runtime should start automatically; usually used for runtimes that provide LSPs + */ + Implicit = 'implicit', + + /** + * The runtime should start when the user explicitly requests it; + * usually used for runtimes that only provide REPLs + */ + Explicit = 'explicit', + } + + /** + * An enumeration of possible locations for runtime sessions. + */ + export enum LanguageRuntimeSessionLocation { + /** + * The runtime session is located in the current workspace (usually a + * terminal); it should be restored when the workspace is re-opened. + */ + Workspace = 'workspace', + + /** + * The runtime session is browser-only; it should not be restored when the + * workspace is re-opened. + */ + Browser = 'browser', + } + + /** + * The set of client types that can be generated by a language runtime. Note + * that, because client types can share a namespace with other kinds of + * widgets, each client type in Positron's API is prefixed with the string + * "positron". + */ + export enum RuntimeClientType { + Variables = 'positron.variables', + Lsp = 'positron.lsp', + Dap = 'positron.dap', + Plot = 'positron.plot', + DataExplorer = 'positron.dataExplorer', + Ui = 'positron.ui', + Help = 'positron.help', + Connection = 'positron.connection', + IPyWidget = 'jupyter.widget', + + // Future client types may include: + // - Watch window/variable explorer + // - Code inspector + // - etc. + } + + /** + * The possible states for a language runtime client instance. These + * represent the state of the communications channel between the client and + * the runtime. + */ + export enum RuntimeClientState { + /** The client has not yet been initialized */ + Uninitialized = 'uninitialized', + + /** The connection between the server and the client is being opened */ + Opening = 'opening', + + /** The connection between the server and the client has been established */ + Connected = 'connected', + + /** The connection between the server and the client is being closed */ + Closing = 'closing', + + /** The connection between the server and the client is closed */ + Closed = 'closed', + } + + /** + * An instance of a client widget generated by a language runtime. See + * RuntimeClientType for the set of possible client types. + * + * The client is responsible for disposing itself when it is no longer + * needed; this will trigger the closure of the communications channel + * between the client and the runtime. + */ + export interface RuntimeClientInstance extends vscode.Disposable { + onDidChangeClientState: vscode.Event; + onDidSendEvent: vscode.Event; + performRpc(data: object): Thenable; + getClientState(): RuntimeClientState; + getClientId(): string; + getClientType(): RuntimeClientType; + } + + /** + * RuntimeVariablesClient is a client that tracks the variables in the runtime. + */ + export interface RuntimeVariablesClient extends RuntimeClientInstance { + onDidChangeVariables: vscode.Event>; + getCurrentVariables(): Array; + } + + export interface Variable { + name: string; + value: string; + length: number; + size: number; + } + + export interface LanguageRuntimeManager { + /** + * Returns a generator that yields metadata about the language runtimes + * that are available to the user. + * + * This metadata will be passed to `createSession` to create new runtime + * sessions. + */ + discoverRuntimes(): AsyncGenerator; + + /** + * An optional event that fires when a new runtime is discovered. + * + * Not fired during `discoverRuntimes()`; used to notify Positron of a + * new runtime or environment after the initial discovery has completed. + */ + onDidDiscoverRuntime?: vscode.Event; + + /** + * An optional metadata validation function. If provided, Positron will + * validate any stored metadata before attempting to use it to create a + * new session. This happens when a workspace is re-opened, for example. + * + * If the metadata is invalid, the function should return a new version + * of the metadata with the necessary corrections. + * + * If it is not possible to correct the metadata, the function should + * reject with an error. + * + * @param metadata The metadata to validate + * @returns A Thenable that resolves with an updated version of the + * metadata. + */ + validateMetadata?(metadata: LanguageRuntimeMetadata): + Thenable; + + /** + * Creates a new runtime session. + * + * @param runtimeMetadata One of the runtime metadata items returned by + * `discoverRuntimes`. + * @param sessionMetadata The metadata for the new session. + * + * @returns A Thenable that resolves with the new session, or rejects with an error. + */ + createSession(runtimeMetadata: LanguageRuntimeMetadata, + sessionMetadata: RuntimeSessionMetadata): + Thenable; + + /** + * Reconnects to a runtime session using the given metadata. + * + * Implementing this method is optional, since not all sessions can be + * reconnected; for example, sessions that run in the browser cannot be + * reconnected. + * + * @param runtimeMetadata The metadata for the runtime that owns the + * session. + * @param sessionMetadata The metadata for the session to reconnect. + * + * @returns A Thenable that resolves with the reconnected session, or + * rejects with an error. + */ + restoreSession?(runtimeMetadata: LanguageRuntimeMetadata, + sessionMetadata: RuntimeSessionMetadata): + Thenable; + } + + /** + * An enum representing the set of runtime method error codes; these map to + * JSON-RPC error codes. + */ + export enum RuntimeMethodErrorCode { + ParseError = -32700, + InvalidRequest = -32600, + MethodNotFound = -32601, + InvalidParams = -32602, + InternalError = -32603, + ServerErrorStart = -32000, + ServerErrorEnd = -32099 + } + + /** + * An error returned by a runtime method call. + */ + export interface RuntimeMethodError { + /** An error code */ + code: RuntimeMethodErrorCode; + + /** A human-readable error message */ + message: string; + + /** + * A name for the error, for compatibility with the Error object. + * Usually `RPC Error ${code}`. + */ + name: string; + + /** Additional error information (optional) */ + data: any | undefined; + } + + /** + * LanguageRuntimeSession is an interface implemented by extensions that provide a + * set of common tools for interacting with a language runtime, such as code + * execution, LSP implementation, and plotting. + */ + export interface LanguageRuntimeSession extends vscode.Disposable { + + /** An object supplying immutable metadata about this specific session */ + readonly metadata: RuntimeSessionMetadata; + + /** + * An object supplying metadata about the runtime with which this + * session is associated. + */ + readonly runtimeMetadata: LanguageRuntimeMetadata; + + /** The state of the runtime that changes during a user session */ + dynState: LanguageRuntimeDynState; + + /** An object that emits language runtime events */ + onDidReceiveRuntimeMessage: vscode.Event; + + /** An object that emits the current state of the runtime */ + onDidChangeRuntimeState: vscode.Event; + + /** An object that emits an event when the user's session ends and the runtime exits */ + onDidEndSession: vscode.Event; + + /** + * Opens a resource in the runtime. + * @param resource The resource to open. + * @returns true if the resource was opened; otherwise, false. + */ + openResource?(resource: vscode.Uri | string): Thenable; + + /** Execute code in the runtime */ + execute(code: string, + id: string, + mode: RuntimeCodeExecutionMode, + errorBehavior: RuntimeErrorBehavior): void; + + /** + * Calls a method in the runtime and returns the result. + * + * Throws a RuntimeMethodError if the method call fails. + * + * @param method The name of the method to call + * @param args Arguments to pass to the method + */ + callMethod?(method: string, ...args: any[]): Thenable; + + /** Test a code fragment for completeness */ + isCodeFragmentComplete(code: string): Thenable; + + /** + * Create a new instance of a client; return null if the client type + * is not supported by this runtime, or a string containing the ID of + * the client if it is supported. + * + * @param id The unique, client-supplied ID of the client instance. Can be any + * unique string. + * @param type The type of client to create + * @param params A set of parameters to pass to the client; specific to the client type + */ + createClient(id: string, type: RuntimeClientType, params: any): Thenable; + + /** + * List all clients, optionally filtered by type. + * + * @param type If specified, only clients of this type will be returned. + * @returns A Thenable that resolves with a map of client IDs to client types. + */ + listClients(type?: RuntimeClientType): Thenable>; + + /** Remove an instance of a client (created with `createClient`) */ + removeClient(id: string): void; + + /** + * Send a message to the server end of a client instance. Any replies to the message + * will be sent back to the client via the `onDidReceiveRuntimeMessage` event, with + * the `parent_id` field set to the `message_id` given here. + */ + sendClientMessage(client_id: string, message_id: string, message: any): void; + + /** Reply to a prompt issued by the runtime */ + replyToPrompt(id: string, reply: string): void; + + /** + * Start the session; returns a Thenable that resolves with information about the runtime. + * If the runtime fails to start for any reason, the Thenable should reject with an error + * object containing a `message` field with a human-readable error message and an optional + * `details` field with additional information. + */ + start(): Thenable; + + /** + * Interrupt the runtime; returns a Thenable that resolves when the interrupt has been + * successfully sent to the runtime (not necessarily when it has been processed) + */ + interrupt(): Thenable; + + /** + * Restart the runtime; returns a Thenable that resolves when the runtime restart sequence + * has been successfully started (not necessarily when it has completed). A restart will + * cause the runtime to be shut down and then started again; its status will change from + * `Restarting` => `Exited` => `Initializing` => `Starting` => `Ready`. + */ + restart(): Thenable; + + /** + * Shut down the runtime; returns a Thenable that resolves when the + * runtime shutdown sequence has been successfully started (not + * necessarily when it has completed). + */ + shutdown(exitReason: RuntimeExitReason): Thenable; + + /** + * Forcibly quits the runtime; returns a Thenable that resolves when the + * runtime has been terminated. This may be called by Positron if the + * runtime fails to respond to an interrupt and/or shutdown call, and + * should forcibly terminate any underlying processes. + */ + forceQuit(): Thenable; + + /** + * Show runtime log in output panel. + */ + showOutput?(): void; + } + + + /** + * A data structure that describes a handler for a runtime client instance, + * and is called when an instance is created. + * + * @param client The client instance that was created + * @param params A set of parameters passed to the client + * @returns true if the handler took ownership of the client, false otherwise + */ + export type RuntimeClientHandlerCallback = ( + client: RuntimeClientInstance, + params: Object,) => boolean; + + /** + * A data structure that describes a handler for a runtime client instance. + */ + export interface RuntimeClientHandler { + /** + * The type of client that this handler handles. + */ + clientType: string; + + /** + * A callback that is called when a client of the given type is created; + * returns whether the handler took ownership of the client. + */ + callback: RuntimeClientHandlerCallback; + } + + /** + * Content settings for webviews hosted in the Preview panel. + * + * This interface mirrors the `WebviewOptions` & `WebviewPanelOptions` interfaces, with + * the following exceptions: + * + * - `enableFindWidget` is not supported (we never show it in previews) + * - `retainContextWhenHidden` is not supported (we always retain context) + * - `enableCommandUris` is not supported (we never allow commands in previews) + */ + export interface PreviewOptions { + /** + * Controls whether scripts are enabled in the webview content or not. + * + * Defaults to false (scripts-disabled). + */ + readonly enableScripts?: boolean; + + /** + * Controls whether forms are enabled in the webview content or not. + * + * Defaults to true if {@link PreviewOptions.enableScripts scripts are enabled}. Otherwise defaults to false. + * Explicitly setting this property to either true or false overrides the default. + */ + readonly enableForms?: boolean; + + /** + * Root paths from which the webview can load local (filesystem) resources using uris from `asWebviewUri` + * + * Default to the root folders of the current workspace plus the extension's install directory. + * + * Pass in an empty array to disallow access to any local resources. + */ + readonly localResourceRoots?: readonly vscode.Uri[]; + + /** + * Mappings of localhost ports used inside the webview. + * + * Port mapping allow webviews to transparently define how localhost ports are resolved. This can be used + * to allow using a static localhost port inside the webview that is resolved to random port that a service is + * running on. + * + * If a webview accesses localhost content, we recommend that you specify port mappings even if + * the `webviewPort` and `extensionHostPort` ports are the same. + * + * *Note* that port mappings only work for `http` or `https` urls. Websocket urls (e.g. `ws://localhost:3000`) + * cannot be mapped to another port. + */ + readonly portMapping?: readonly vscode.WebviewPortMapping[]; + } + + /** + * A preview panel that contains a webview. This interface mirrors the + * `WebviewPanel` interface, but omits elements that don't apply to + * preview panels, such as `viewColumn`. + */ + interface PreviewPanel { + /** + * Identifies the type of the preview panel, such as `'markdown.preview'`. + */ + readonly viewType: string; + + /** + * Title of the panel shown in UI. + */ + title: string; + + /** + * {@linkcode Webview} belonging to the panel. + */ + readonly webview: vscode.Webview; + + /** + * Whether the panel is active (focused by the user). + */ + readonly active: boolean; + + /** + * Whether the panel is visible. + */ + readonly visible: boolean; + + /** + * Fired when the panel's view state changes. + */ + readonly onDidChangeViewState: vscode.Event; + + /** + * Fired when the panel is disposed. + * + * This may be because the user closed the panel or because `.dispose()` was + * called on it. + * + * Trying to use the panel after it has been disposed throws an exception. + */ + readonly onDidDispose: vscode.Event; + + /** + * Show the preview panel + * + * Only one preview panel can be shown at a time. If a different preview + * is already showing, it will be hidden. + * + * @param preserveFocus When `true`, the webview will not take focus. + */ + reveal(preserveFocus?: boolean): void; + + /** + * Dispose of the preview panel. + * + * This closes the panel if it showing and disposes of the resources + * owned by the underlying webview. Preview panels are also disposed + * when the user closes the preview panel. Both cases fire the + * `onDispose` event. + */ + dispose(): any; + } + + /** + * Event fired when a preview panel's view state changes. + */ + export interface PreviewPanelOnDidChangeViewStateEvent { + /** + * Preview panel whose view state changed. + */ + readonly previewPanel: PreviewPanel; + } + + export interface StatementRangeProvider { + /** + * Given a cursor position, return the range of the statement that the + * cursor is within. If the cursor is not within a statement, return the + * range of the next statement, if one exists. + * + * @param document The document in which the command was invoked. + * @param position The position at which the command was invoked. + * @param token A cancellation token. + * @return The range of the statement at the given position. + */ + provideStatementRange(document: vscode.TextDocument, + position: vscode.Position, + token: vscode.CancellationToken): vscode.ProviderResult; + } + + /** + * The range of a statement, plus optionally the code for the range. + */ + export interface StatementRange { + /** + * The range of the statement at the given position. + */ + readonly range: vscode.Range; + + /** + * The code for this statement range, if different from the document contents at this range. + */ + readonly code?: string; + + } + + export interface HelpTopicProvider { + /** + * Given a cursor position, return the help topic relevant to the cursor + * position, or an empty string if no help topic is recommended or + * relevant. + * + * @param document The document in which the command was invoked. + * @param position The position at which the command was invoked. + * @param token A cancellation token. + * @return A string containing the help topic relevant to the cursor + * position + */ + provideHelpTopic(document: vscode.TextDocument, + position: vscode.Position, + token: vscode.CancellationToken): vscode.ProviderResult; + } + + export interface Console { + /** + * Pastes text into the console. + */ + pasteText(text: string): void; + } + + namespace languages { + /** + * Register a statement range provider. + * + * @param selector A selector that defines the documents this provider is applicable to. + * @param provider A statement range provider. + * @return A {@link Disposable} that unregisters this provider when being disposed. + */ + export function registerStatementRangeProvider( + selector: vscode.DocumentSelector, + provider: StatementRangeProvider): vscode.Disposable; + + /** + * Register a help topic provider. + * + * @param selector A selector that defines the documents this provider is applicable to. + * @param provider A help topic provider. + * @return A {@link Disposable} that unregisters this provider when being disposed. + */ + export function registerHelpTopicProvider( + selector: vscode.DocumentSelector, + provider: HelpTopicProvider): vscode.Disposable; + } + + namespace window { + /** + * Create and show a new preview panel. + * + * @param viewType Identifies the type of the preview panel. + * @param title Title of the panel. + * @param options Settings for the new panel. + * + * @return New preview panel. + */ + export function createPreviewPanel(viewType: string, title: string, preserveFocus?: boolean, options?: PreviewOptions): PreviewPanel; + + /** + * Create a log output channel from raw data. + * + * Variant of `createOutputChannel()` that creates a "raw log" output channel. + * Compared to a normal `LogOutputChannel`, this doesn't add timestamps or info + * level. It's meant for extensions that create fully formed log lines but still + * want to benefit from the colourised rendering of log output channels. + * + * @param name Human-readable string which will be used to represent the channel in the UI. + * + * @return New log output channel. + */ + export function createRawLogOutputChannel(name: string): vscode.OutputChannel; + + /** + * Create and show a simple modal dialog prompt. + * + * @param title The title of the dialog + * @param message The message to display in the dialog + * @param okButtonTitle The title of the OK button (optional; defaults to 'OK') + * @param cancelButtonTitle The title of the Cancel button (optional; defaults to 'Cancel') + * + * @returns A Thenable that resolves to true if the user clicked OK, or false + * if the user clicked Cancel. + */ + export function showSimpleModalDialogPrompt(title: string, + message: string, + okButtonTitle?: string, + cancelButtonTitle?: string): Thenable; + + /** + * Get the `Console` for a runtime language `id` + * + * @param id The runtime language `id` to retrieve a `Console` for, i.e. 'r' or 'python'. + * + * @returns A `Console`, or `undefined` if no `Console` for that language exists. + */ + export function getConsoleForLanguage(id: string): Console | undefined; + + /** + * Fires when the width of the console input changes. The new width is passed as + * a number, which represents the number of characters that can fit in the + * console horizontally. + */ + export const onDidChangeConsoleWidth: vscode.Event; + + /** + * Returns the current width of the console input, in characters. + */ + export function getConsoleWidth(): Thenable; + } + + namespace runtime { + + /** + * Executes code in a language runtime's console, as though it were typed + * interactively by the user. + * + * @param languageId The language ID of the code snippet + * @param code The code snippet to execute + * @param focus Whether to focus the runtime's console + * @param allowIncomplete Whether to bypass runtime code completeness checks. If true, the `code` + * will be executed by the runtime even if it is incomplete or invalid. Defaults to false + * @returns A Thenable that resolves with true if the code was sent to a + * runtime successfully, false otherwise. + */ + export function executeCode(languageId: string, + code: string, + focus: boolean, + allowIncomplete?: boolean): Thenable; + + /** + * Register a language runtime manager with Positron. Returns a + * disposable that unregisters the manager when disposed. + */ + export function registerLanguageRuntimeManager(manager: LanguageRuntimeManager): vscode.Disposable; + + /** + * List all registered runtimes. + */ + export function getRegisteredRuntimes(): Thenable; + + /** + * Get the preferred language runtime for a given language. + * + * @param languageId The language ID of the preferred runtime + */ + export function getPreferredRuntime(languageId: string): Thenable; + + /** + * Select and start a runtime previously registered with Positron. Any + * previously active runtimes for the language will be shut down. + * + * @param runtimeId The ID of the runtime to select and start. + */ + export function selectLanguageRuntime(runtimeId: string): Thenable; + + /** + * Start a new session for a runtime previously registered with Positron. + * + * @param runtimeId The ID of the runtime to select and start. + * @param sessionName A human-readable name for the new session. + * @param notebookUri If the session is associated with a notebook, + * the notebook URI. + * + * Returns a Thenable that resolves with the newly created session. + */ + export function startLanguageRuntime(runtimeId: string, + sessionName: string, + notebookUri?: vscode.Uri): Thenable; + + /** + * Restart a running session. + * + * @param sessionId The ID of the session to restart. + */ + export function restartSession(sessionId: string): Thenable; + + /** + * Register a handler for runtime client instances. This handler will be called + * whenever a new client instance is created by a language runtime of the given + * type. + * + * @param handler A handler for runtime client instances + */ + export function registerClientHandler(handler: RuntimeClientHandler): vscode.Disposable; + + /** + * An event that fires when a new runtime is registered. + */ + export const onDidRegisterRuntime: vscode.Event; + + } + + // FIXME: The current (and clearly not final) state of an experiment to bring in interface(s) + // here by referring to an external file. Such an external file will presumably be generated by + // the generate-comms.ts script. Two goals of the experiment: + // * Reduce the manual proliferation of these generated types. + // * Ideally a file is meant to edited by humans or by robots, but not both. + // Related to https://github.com/posit-dev/positron/issues/12 + type EC = import('./ui-comm').EditorContext; + export type EditorContext = EC; + + /** + * This namespace contains all frontend RPC methods available to a runtime. + */ + namespace methods { + /** + * Call a frontend method. + * + * `call()` is designed to be hooked up directly to an RPC mechanism. It takes + * `method` and `params` arguments as defined by the UI frontend OpenRPC contract + * and returns a JSON-RPC response. It never throws, all errors are returned as + * JSON-RPC error responses. + * + * @param method The method name. + * @param params An object of named parameters for `method`. + */ + export function call(method: string, params: Record): Thenable; + + /** + * Retrieve last active editor context. + * + * Returns a `EditorContext` for the last active editor. + */ + export function lastActiveEditorContext(): Thenable; + + /** + * Executes a Positron command. + * + * @param command The Positron command name. + */ + export function executeCommand(commandId: string): Thenable; + + } +} diff --git a/extensions/positron-python/positron-dts/ui-comm.d.ts b/extensions/positron-python/positron-dts/ui-comm.d.ts new file mode 100644 index 00000000000..da3780a1357 --- /dev/null +++ b/extensions/positron-python/positron-dts/ui-comm.d.ts @@ -0,0 +1,121 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +// +// Copied from src/vs/workbench/services/languageRuntime/common/positronUiComm.ts; do not edit. +// + +/** + * Editor metadata + */ +export interface EditorContext { + /** + * Document metadata + */ + document: TextDocument; + + /** + * Document contents + */ + contents: Array; + + /** + * The primary selection, i.e. selections[0] + */ + selection: Selection; + + /** + * The selections in this text editor. + */ + selections: Array; + +} + +/** + * Document metadata + */ +export interface TextDocument { + /** + * URI of the resource viewed in the editor + */ + path: string; + + /** + * End of line sequence + */ + eol: string; + + /** + * Whether the document has been closed + */ + is_closed: boolean; + + /** + * Whether the document has been modified + */ + is_dirty: boolean; + + /** + * Whether the document is untitled + */ + is_untitled: boolean; + + /** + * Language identifier + */ + language_id: string; + + /** + * Number of lines in the document + */ + line_count: number; + + /** + * Version number of the document + */ + version: number; + +} + +/** + * A line and character position, such as the position of the cursor. + */ +export interface Position { + /** + * The zero-based character value, as a Unicode code point offset. + */ + character: number; + + /** + * The zero-based line value. + */ + line: number; + +} + +/** + * Selection metadata + */ +export interface Selection { + /** + * Position of the cursor. + */ + active: Position; + + /** + * Start position of the selection + */ + start: Position; + + /** + * End position of the selection + */ + end: Position; + + /** + * Text of the selection + */ + text: string; + +} diff --git a/extensions/positron-python/pythonExtensionApi/.eslintrc b/extensions/positron-python/pythonExtensionApi/.eslintrc new file mode 100644 index 00000000000..8828c49002e --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/.eslintrc @@ -0,0 +1,11 @@ +{ + "overrides": [ + { + "files": ["**/main.d.ts"], + "rules": { + "@typescript-eslint/no-explicit-any": "off", + "padding-line-between-statements": ["error", { "blankLine": "always", "prev": "export", "next": "*" }] + } + } + ] +} diff --git a/extensions/positron-python/pythonExtensionApi/.npmignore b/extensions/positron-python/pythonExtensionApi/.npmignore new file mode 100644 index 00000000000..283d589ea5f --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/.npmignore @@ -0,0 +1,8 @@ +example/** +dist/ +out/**/*.map +out/**/*.tsbuildInfo +src/ +.eslintrc* +.eslintignore +tsconfig*.json diff --git a/extensions/positron-python/pythonExtensionApi/LICENSE.md b/extensions/positron-python/pythonExtensionApi/LICENSE.md new file mode 100644 index 00000000000..767f4076ba0 --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/LICENSE.md @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED _AS IS_, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/extensions/positron-python/pythonExtensionApi/README.md b/extensions/positron-python/pythonExtensionApi/README.md new file mode 100644 index 00000000000..5208d90cdfa --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/README.md @@ -0,0 +1,55 @@ +# Python extension's API + +This npm module implements an API facade for the Python extension in VS Code. + +## Example + +First we need to define a `package.json` for the extension that wants to use the API: + +```jsonc +{ + "name": "...", + ... + // depend on the Python extension + "extensionDependencies": [ + "ms-python.python" + ], + // Depend on the Python extension facade npm module to get easier API access to the + // core extension. + "dependencies": { + "@vscode/python-extension": "...", + "@types/vscode": "..." + }, +} +``` + +Update `"@types/vscode"` to [a recent version](https://code.visualstudio.com/updates/) of VS Code, say `"^1.81.0"` for VS Code version `"1.81"`, in case there are any conflicts. + +The actual source code to get the active environment to run some script could look like this: + +```typescript +// Import the API +import { PythonExtension } from '@vscode/python-extension'; + +... + +// Load the Python extension API +const pythonApi: PythonExtension = await PythonExtension.api(); + +// This will return something like /usr/bin/python +const environmentPath = pythonApi.environments.getActiveEnvironmentPath(); + +// `environmentPath.path` carries the value of the setting. Note that this path may point to a folder and not the +// python binary. Depends entirely on how the env was created. +// E.g., `conda create -n myenv python` ensures the env has a python binary +// `conda create -n myenv` does not include a python binary. +// Also, the path specified may not be valid, use the following to get complete details for this environment if +// need be. + +const environment = await pythonApi.environments.resolveEnvironment(environmentPath); +if (environment) { + // run your script here. +} +``` + +Check out [the wiki](https://aka.ms/pythonEnvironmentApi) for many more examples and usage. diff --git a/extensions/positron-python/pythonExtensionApi/SECURITY.md b/extensions/positron-python/pythonExtensionApi/SECURITY.md new file mode 100644 index 00000000000..a050f362c15 --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + diff --git a/extensions/positron-python/pythonExtensionApi/package-lock.json b/extensions/positron-python/pythonExtensionApi/package-lock.json new file mode 100644 index 00000000000..ef6914e0e78 --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/package-lock.json @@ -0,0 +1,155 @@ +{ + "name": "@vscode/python-extension", + "version": "1.0.5", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "@vscode/python-extension", + "version": "1.0.5", + "license": "MIT", + "devDependencies": { + "@types/vscode": "^1.78.0", + "source-map": "^0.8.0-beta.0", + "typescript": "5.0.4" + }, + "engines": { + "node": ">=18.17.1", + "vscode": "^1.78.0" + } + }, + "node_modules/@types/vscode": { + "version": "1.80.0", + "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.80.0.tgz", + "integrity": "sha512-qK/CmOdS2o7ry3k6YqU4zD3R2AYlJfbwBoSbKpBoP+GpXNE+0NEgJOli4n0bm0diK5kfBnchgCEj4igQz/44Hg==", + "dev": true + }, + "node_modules/lodash.sortby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", + "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==", + "dev": true + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/source-map": { + "version": "0.8.0-beta.0", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.8.0-beta.0.tgz", + "integrity": "sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==", + "dev": true, + "dependencies": { + "whatwg-url": "^7.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/tr46": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", + "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/typescript": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", + "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=12.20" + } + }, + "node_modules/webidl-conversions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", + "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==", + "dev": true + }, + "node_modules/whatwg-url": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", + "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", + "dev": true, + "dependencies": { + "lodash.sortby": "^4.7.0", + "tr46": "^1.0.1", + "webidl-conversions": "^4.0.2" + } + } + }, + "dependencies": { + "@types/vscode": { + "version": "1.80.0", + "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.80.0.tgz", + "integrity": "sha512-qK/CmOdS2o7ry3k6YqU4zD3R2AYlJfbwBoSbKpBoP+GpXNE+0NEgJOli4n0bm0diK5kfBnchgCEj4igQz/44Hg==", + "dev": true + }, + "lodash.sortby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", + "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==", + "dev": true + }, + "punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true + }, + "source-map": { + "version": "0.8.0-beta.0", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.8.0-beta.0.tgz", + "integrity": "sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==", + "dev": true, + "requires": { + "whatwg-url": "^7.0.0" + } + }, + "tr46": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", + "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "typescript": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", + "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", + "dev": true + }, + "webidl-conversions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", + "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==", + "dev": true + }, + "whatwg-url": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", + "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", + "dev": true, + "requires": { + "lodash.sortby": "^4.7.0", + "tr46": "^1.0.1", + "webidl-conversions": "^4.0.2" + } + } + } +} diff --git a/extensions/positron-python/pythonExtensionApi/package.json b/extensions/positron-python/pythonExtensionApi/package.json new file mode 100644 index 00000000000..9e58f1a2400 --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/package.json @@ -0,0 +1,43 @@ +{ + "name": "@vscode/python-extension", + "description": "An API facade for the Python extension in VS Code", + "version": "1.0.5", + "author": { + "name": "Microsoft Corporation" + }, + "keywords": [ + "Python", + "VSCode", + "API" + ], + "main": "./out/main.js", + "types": "./out/main.d.ts", + "engines": { + "node": ">=18.17.1", + "vscode": "^1.78.0" + }, + "license": "MIT", + "homepage": "https://github.com/microsoft/vscode-python/tree/main/pythonExtensionApi", + "repository": { + "type": "git", + "url": "https://github.com/Microsoft/vscode-python" + }, + "bugs": { + "url": "https://github.com/Microsoft/vscode-python/issues" + }, + "devDependencies": { + "typescript": "5.0.4", + "@types/vscode": "^1.78.0", + "source-map": "^0.8.0-beta.0" + }, + "scripts": { + "prepublishOnly": "echo \"⛔ Can only publish from a secure pipeline ⛔\" && node ../build/fail", + "prepack": "npm run all:publish", + "compile": "node ./node_modules/typescript/lib/tsc.js -b ./tsconfig.json", + "clean": "node ../node_modules/rimraf/bin.js out", + "lint": "node ../node_modules/eslint/bin/eslint.js --ext ts src", + "all": "npm run clean && npm run compile", + "formatTypings": "node ../node_modules/eslint/bin/eslint.js --fix ./out/main.d.ts", + "all:publish": "git clean -xfd . && npm install && npm run compile && npm run formatTypings" + } +} diff --git a/extensions/positron-python/pythonExtensionApi/src/main.ts b/extensions/positron-python/pythonExtensionApi/src/main.ts new file mode 100644 index 00000000000..4de554bf5a2 --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/src/main.ts @@ -0,0 +1,349 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { CancellationToken, Event, Uri, WorkspaceFolder, extensions } from 'vscode'; + +/* + * Do not introduce any breaking changes to this API. + * This is the public API for other extensions to interact with this extension. + */ +export interface PythonExtension { + /** + * Promise indicating whether all parts of the extension have completed loading or not. + */ + ready: Promise; + debug: { + /** + * Generate an array of strings for commands to pass to the Python executable to launch the debugger for remote debugging. + * Users can append another array of strings of what they want to execute along with relevant arguments to Python. + * E.g `['/Users/..../pythonVSCode/pythonFiles/lib/python/debugpy', '--listen', 'localhost:57039', '--wait-for-client']` + * @param host + * @param port + * @param waitUntilDebuggerAttaches Defaults to `true`. + */ + getRemoteLauncherCommand(host: string, port: number, waitUntilDebuggerAttaches: boolean): Promise; + + /** + * Gets the path to the debugger package used by the extension. + * @returns {Promise} + */ + getDebuggerPackagePath(): Promise; + }; + + /** + * These APIs provide a way for extensions to work with by python environments available in the user's machine + * as found by the Python extension. See + * https://github.com/microsoft/vscode-python/wiki/Python-Environment-APIs for usage examples and more. + */ + readonly environments: { + /** + * Returns the environment configured by user in settings. Note that this can be an invalid environment, use + * {@link resolveEnvironment} to get full details. + * @param resource : Uri of a file or workspace folder. This is used to determine the env in a multi-root + * scenario. If `undefined`, then the API returns what ever is set for the workspace. + */ + getActiveEnvironmentPath(resource?: Resource): EnvironmentPath; + /** + * Sets the active environment path for the python extension for the resource. Configuration target will always + * be the workspace folder. + * @param environment : If string, it represents the full path to environment folder or python executable + * for the environment. Otherwise it can be {@link Environment} or {@link EnvironmentPath} itself. + * @param resource : [optional] File or workspace to scope to a particular workspace folder. + */ + updateActiveEnvironmentPath( + environment: string | EnvironmentPath | Environment, + resource?: Resource, + ): Promise; + /** + * This event is triggered when the active environment setting changes. + */ + readonly onDidChangeActiveEnvironmentPath: Event; + /** + * Carries environments known to the extension at the time of fetching the property. Note this may not + * contain all environments in the system as a refresh might be going on. + * + * Only reports environments in the current workspace. + */ + readonly known: readonly Environment[]; + /** + * This event is triggered when the known environment list changes, like when a environment + * is found, existing environment is removed, or some details changed on an environment. + */ + readonly onDidChangeEnvironments: Event; + /** + * This API will trigger environment discovery, but only if it has not already happened in this VSCode session. + * Useful for making sure env list is up-to-date when the caller needs it for the first time. + * + * To force trigger a refresh regardless of whether a refresh was already triggered, see option + * {@link RefreshOptions.forceRefresh}. + * + * Note that if there is a refresh already going on then this returns the promise for that refresh. + * @param options Additional options for refresh. + * @param token A cancellation token that indicates a refresh is no longer needed. + */ + refreshEnvironments(options?: RefreshOptions, token?: CancellationToken): Promise; + /** + * Returns details for the given environment, or `undefined` if the env is invalid. + * @param environment : If string, it represents the full path to environment folder or python executable + * for the environment. Otherwise it can be {@link Environment} or {@link EnvironmentPath} itself. + */ + resolveEnvironment( + environment: Environment | EnvironmentPath | string, + ): Promise; + /** + * Returns the environment variables used by the extension for a resource, which includes the custom + * variables configured by user in `.env` files. + * @param resource : Uri of a file or workspace folder. This is used to determine the env in a multi-root + * scenario. If `undefined`, then the API returns what ever is set for the workspace. + */ + getEnvironmentVariables(resource?: Resource): EnvironmentVariables; + /** + * This event is fired when the environment variables for a resource change. Note it's currently not + * possible to detect if environment variables in the system change, so this only fires if custom + * environment variables are updated in `.env` files. + */ + readonly onDidEnvironmentVariablesChange: Event; + }; +} + +export type RefreshOptions = { + /** + * When `true`, force trigger a refresh regardless of whether a refresh was already triggered. Note this can be expensive so + * it's best to only use it if user manually triggers a refresh. + */ + forceRefresh?: boolean; +}; + +/** + * Details about the environment. Note the environment folder, type and name never changes over time. + */ +export type Environment = EnvironmentPath & { + /** + * Carries details about python executable. + */ + readonly executable: { + /** + * Uri of the python interpreter/executable. Carries `undefined` in case an executable does not belong to + * the environment. + */ + readonly uri: Uri | undefined; + /** + * Bitness if known at this moment. + */ + readonly bitness: Bitness | undefined; + /** + * Value of `sys.prefix` in sys module if known at this moment. + */ + readonly sysPrefix: string | undefined; + }; + /** + * Carries details if it is an environment, otherwise `undefined` in case of global interpreters and others. + */ + readonly environment: + | { + /** + * Type of the environment. + */ + readonly type: EnvironmentType; + /** + * Name to the environment if any. + */ + readonly name: string | undefined; + /** + * Uri of the environment folder. + */ + readonly folderUri: Uri; + /** + * Any specific workspace folder this environment is created for. + */ + readonly workspaceFolder: WorkspaceFolder | undefined; + } + | undefined; + /** + * Carries Python version information known at this moment, carries `undefined` for envs without python. + */ + readonly version: + | (VersionInfo & { + /** + * Value of `sys.version` in sys module if known at this moment. + */ + readonly sysVersion: string | undefined; + }) + | undefined; + /** + * Tools/plugins which created the environment or where it came from. First value in array corresponds + * to the primary tool which manages the environment, which never changes over time. + * + * Array is empty if no tool is responsible for creating/managing the environment. Usually the case for + * global interpreters. + */ + readonly tools: readonly EnvironmentTools[]; +}; + +/** + * Derived form of {@link Environment} where certain properties can no longer be `undefined`. Meant to represent an + * {@link Environment} with complete information. + */ +export type ResolvedEnvironment = Environment & { + /** + * Carries complete details about python executable. + */ + readonly executable: { + /** + * Uri of the python interpreter/executable. Carries `undefined` in case an executable does not belong to + * the environment. + */ + readonly uri: Uri | undefined; + /** + * Bitness of the environment. + */ + readonly bitness: Bitness; + /** + * Value of `sys.prefix` in sys module. + */ + readonly sysPrefix: string; + }; + /** + * Carries complete Python version information, carries `undefined` for envs without python. + */ + readonly version: + | (ResolvedVersionInfo & { + /** + * Value of `sys.version` in sys module if known at this moment. + */ + readonly sysVersion: string; + }) + | undefined; +}; + +export type EnvironmentsChangeEvent = { + readonly env: Environment; + /** + * * "add": New environment is added. + * * "remove": Existing environment in the list is removed. + * * "update": New information found about existing environment. + */ + readonly type: 'add' | 'remove' | 'update'; +}; + +export type ActiveEnvironmentPathChangeEvent = EnvironmentPath & { + /** + * Workspace folder the environment changed for. + */ + readonly resource: WorkspaceFolder | undefined; +}; + +/** + * Uri of a file inside a workspace or workspace folder itself. + */ +export type Resource = Uri | WorkspaceFolder; + +export type EnvironmentPath = { + /** + * The ID of the environment. + */ + readonly id: string; + /** + * Path to environment folder or path to python executable that uniquely identifies an environment. Environments + * lacking a python executable are identified by environment folder paths, whereas other envs can be identified + * using python executable path. + */ + readonly path: string; +}; + +/** + * Tool/plugin where the environment came from. It can be {@link KnownEnvironmentTools} or custom string which + * was contributed. + */ +export type EnvironmentTools = KnownEnvironmentTools | string; +/** + * Tools or plugins the Python extension currently has built-in support for. Note this list is expected to shrink + * once tools have their own separate extensions. + */ +export type KnownEnvironmentTools = + | 'Conda' + | 'Pipenv' + | 'Poetry' + | 'VirtualEnv' + | 'Venv' + | 'VirtualEnvWrapper' + | 'Pyenv' + | 'Unknown'; + +/** + * Type of the environment. It can be {@link KnownEnvironmentTypes} or custom string which was contributed. + */ +export type EnvironmentType = KnownEnvironmentTypes | string; +/** + * Environment types the Python extension is aware of. Note this list is expected to shrink once tools have their + * own separate extensions, in which case they're expected to provide the type themselves. + */ +export type KnownEnvironmentTypes = 'VirtualEnvironment' | 'Conda' | 'Unknown'; + +/** + * Carries bitness for an environment. + */ +export type Bitness = '64-bit' | '32-bit' | 'Unknown'; + +/** + * The possible Python release levels. + */ +export type PythonReleaseLevel = 'alpha' | 'beta' | 'candidate' | 'final'; + +/** + * Release information for a Python version. + */ +export type PythonVersionRelease = { + readonly level: PythonReleaseLevel; + readonly serial: number; +}; + +export type VersionInfo = { + readonly major: number | undefined; + readonly minor: number | undefined; + readonly micro: number | undefined; + readonly release: PythonVersionRelease | undefined; +}; + +export type ResolvedVersionInfo = { + readonly major: number; + readonly minor: number; + readonly micro: number; + readonly release: PythonVersionRelease; +}; + +/** + * A record containing readonly keys. + */ +export type EnvironmentVariables = { readonly [key: string]: string | undefined }; + +export type EnvironmentVariablesChangeEvent = { + /** + * Workspace folder the environment variables changed for. + */ + readonly resource: WorkspaceFolder | undefined; + /** + * Updated value of environment variables. + */ + readonly env: EnvironmentVariables; +}; + +export const PVSC_EXTENSION_ID = 'ms-python.python'; + +// eslint-disable-next-line @typescript-eslint/no-namespace +export namespace PythonExtension { + /** + * Returns the API exposed by the Python extension in VS Code. + */ + export async function api(): Promise { + const extension = extensions.getExtension(PVSC_EXTENSION_ID); + if (extension === undefined) { + throw new Error(`Python extension is not installed or is disabled`); + } + if (!extension.isActive) { + await extension.activate(); + } + const pythonApi: PythonExtension = extension.exports; + return pythonApi; + } +} diff --git a/extensions/positron-python/pythonExtensionApi/tsconfig.json b/extensions/positron-python/pythonExtensionApi/tsconfig.json new file mode 100644 index 00000000000..9ab7617023d --- /dev/null +++ b/extensions/positron-python/pythonExtensionApi/tsconfig.json @@ -0,0 +1,34 @@ +{ + "compilerOptions": { + "baseUrl": ".", + "paths": { + "*": ["types/*"] + }, + "module": "commonjs", + "target": "es2018", + "outDir": "./out", + "lib": [ + "es6", + "es2018", + "dom", + "ES2019", + "ES2020" + ], + "sourceMap": true, + "rootDir": "src", + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "noImplicitAny": true, + "noImplicitThis": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "resolveJsonModule": true, + "declaration": true + }, + "exclude": [ + "node_modules", + "out" + ] +} diff --git a/extensions/positron-python/pythonFiles/.env b/extensions/positron-python/pythonFiles/.env new file mode 100644 index 00000000000..8ae3557bcd8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/.env @@ -0,0 +1 @@ +PYTHONPATH=./lib/python diff --git a/extensions/positron-python/pythonFiles/.vscode/launch.json b/extensions/positron-python/pythonFiles/.vscode/launch.json new file mode 100644 index 00000000000..c525016b309 --- /dev/null +++ b/extensions/positron-python/pythonFiles/.vscode/launch.json @@ -0,0 +1,15 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug unit tests", + "type": "python", + "request": "test", + "console": "integratedTerminal", + "justMyCode": false + } + ] +} diff --git a/extensions/positron-python/pythonFiles/.vscode/settings.json b/extensions/positron-python/pythonFiles/.vscode/settings.json new file mode 100644 index 00000000000..480631710e8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/.vscode/settings.json @@ -0,0 +1,16 @@ +{ + "files.exclude": { + "**/__pycache__/**": true, + "**/**/*.pyc": true + }, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.formatOnSave": true, + "editor.rulers": [100], + }, + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, +} diff --git a/extensions/positron-python/pythonFiles/Notebooks intro.ipynb b/extensions/positron-python/pythonFiles/Notebooks intro.ipynb new file mode 100644 index 00000000000..850d7f5a86f --- /dev/null +++ b/extensions/positron-python/pythonFiles/Notebooks intro.ipynb @@ -0,0 +1,153 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Creating a new notebook" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Open the command palette with the shortcut: `Ctrl/Command` + `Shift` + `P`\r\n", + "2. Search for the command `Create New Blank Notebook`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to get back to the start page" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Open the command palette with the shortcut: `Ctrl/Command` + `Shift` + `P`\r\n", + "\r\n", + "2. Search for the command `Python: Open Start Page`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You are currently viewing what we call our Notebook Editor. It is an interactive document based on Jupyter Notebooks that supports the intermixing of code, outputs and markdown documentation. \r\n", + "\r\n", + "This cell is a markdown cell. To edit the text in this cell, simply double click on the cell to change it into edit mode.\r\n", + "\r\n", + "The next cell below is a code cell. You can switch a cell between code and markdown by clicking on the code ![code icon](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/codeIcon.PNG) /markdown ![markdown icon](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/markdownIcon.PNG) icons or using the keyboard shortcut `M` and `Y` respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('hello world')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* To execute the code in the cell above, click on the cell to select it and then either press the play ![play](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/playIcon.PNG) button in the cell toolbar, or use the keyboard shortcut `Ctrl/Command` + `Enter`.\r\n", + "* To edit the code, just click in cell and start editing.\r\n", + "* To add a new cell below, click the `Add Cell` icon ![add cell](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/addIcon.PNG) at the bottom left of the cell or enter command mode with the `ESC` Key and then use the keyboard shortcut `B` to create the new cell below.\r\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Variable explorer**\r\n", + "\r\n", + "To view all your active variables and their current values in the notebook, click on the variable explorer icon ![variable explorer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/variableExplorerIcon.PNG) in the top toolbar.\r\n", + "\r\n", + "![Variable Explorer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/variableexplorer.png)\r\n", + "\r\n", + "**Data Viewer**\r\n", + "\r\n", + "To view your data frame in a more visual \"Excel\" like format, open the variable explorer and to the left of any dataframe object, you will see the data viewer icon ![data viewer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/dataViewerIcon.PNG) which you can click to open the data viewer.\r\n", + "\r\n", + "![Data Viewer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/dataviewer.gif)\r\n", + "\r\n", + "**Convert to Python File**\r\n", + "\r\n", + "To export your notebook to a Python file (.py), click on the `Convert to Python script` icon ![Export icon](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/exportIcon.PNG) in the top toolbar \r\n", + "\r\n", + "![Export](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/savetopythonfile.png)\r\n", + "\r\n", + "**Plot Viewer**\r\n", + "\r\n", + "If you have a graph (such as matplotlib) in your output, you'll notice if you hover over the graph, the `Plot Viewer` icon ![Plot Viewer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/plotViewerIcon.PNG) will appear in the top left. Click the icon to open up the graph in the Plotviewer which allows you to zoom on your plots and export it in formats such as png and jpeg.\r\n", + "\r\n", + "![Plot Viewer](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/plotviewer.gif)\r\n", + "\r\n", + "**Switching Kernels**\r\n", + "\r\n", + "The notebook editor will detect all kernels in your system by default. To change your notebook kernel, click on the kernel status in the top toolbar at the far right. For example, your kernel status may say \"Python 3: Idle\". This will open up the kernel selector where you can choose your desired kernel.\r\n", + "\r\n", + "![Switching Kernels](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/kernelchange.gif)\r\n", + "\r\n", + "**Remote Jupyter Server**\r\n", + "\r\n", + "To connect to a remote Jupyter server, open the command prompt and search for the command `Specify remote or local Jupyter server for connections`. Then select `Existing` and enter the remote Jupyter server URL. Afterwards, you'll be prompted to reload the window and the Notebook will be opened connected to the remote Jupyter server.\r\n", + "\r\n", + "![Remote](https://raw.githubusercontent.com/microsoft/vscode-python/main/images/remoteserver.gif)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-Rh3-Vt9Nev9" + }, + "source": [ + "# More Resources" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- [Data science tutorial for Visual Studio Code](https://code.visualstudio.com/docs/python/data-science-tutorial)\r\n", + "- [Jupyter Notebooks in Visual Studio Code documentation](https://code.visualstudio.com/docs/python/jupyter-support)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.6 64-bit", + "metadata": { + "interpreter": { + "hash": "5c7437588f5ad65b3fb2510dff59138dda524824913550626013373b675d5274" + } + }, + "name": "python3" + }, + "language_info": { + "version": "3.8.6-final" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/extensions/positron-python/pythonFiles/create_conda.py b/extensions/positron-python/pythonFiles/create_conda.py new file mode 100644 index 00000000000..15320a8a1ce --- /dev/null +++ b/extensions/positron-python/pythonFiles/create_conda.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import os +import pathlib +import subprocess +import sys +from typing import Optional, Sequence, Union + +CONDA_ENV_NAME = ".conda" +CWD = pathlib.Path.cwd() + + +class VenvError(Exception): + pass + + +def parse_args(argv: Sequence[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument( + "--python", + action="store", + help="Python version to install in the virtual environment.", + default=f"{sys.version_info.major}.{sys.version_info.minor}", + ) + parser.add_argument( + "--install", + action="store_true", + default=False, + help="Install packages into the virtual environment.", + ) + parser.add_argument( + "--git-ignore", + action="store_true", + default=False, + help="Add .gitignore to the newly created virtual environment.", + ) + parser.add_argument( + "--name", + default=CONDA_ENV_NAME, + type=str, + help="Name of the virtual environment.", + metavar="NAME", + action="store", + ) + return parser.parse_args(argv) + + +def file_exists(path: Union[str, pathlib.PurePath]) -> bool: + return os.path.exists(path) + + +def conda_env_exists(name: Union[str, pathlib.PurePath]) -> bool: + return os.path.exists(CWD / name) + + +def run_process(args: Sequence[str], error_message: str) -> None: + try: + print("Running: " + " ".join(args)) + subprocess.run(args, cwd=os.getcwd(), check=True) + except subprocess.CalledProcessError: + raise VenvError(error_message) + + +def get_conda_env_path(name: str) -> str: + return os.fspath(CWD / name) + + +def install_packages(env_path: str) -> None: + yml = os.fspath(CWD / "environment.yml") + if file_exists(yml): + print(f"CONDA_INSTALLING_YML: {yml}") + run_process( + [ + sys.executable, + "-m", + "conda", + "env", + "update", + "--prefix", + env_path, + "--file", + yml, + ], + "CREATE_CONDA.FAILED_INSTALL_YML", + ) + print("CREATE_CONDA.INSTALLED_YML") + + +def add_gitignore(name: str) -> None: + git_ignore = os.fspath(CWD / name / ".gitignore") + if not file_exists(git_ignore): + print(f"Creating: {git_ignore}") + with open(git_ignore, "w") as f: + f.write("*") + + +def main(argv: Optional[Sequence[str]] = None) -> None: + if argv is None: + argv = [] + args = parse_args(argv) + + if conda_env_exists(args.name): + env_path = get_conda_env_path(args.name) + print(f"EXISTING_CONDA_ENV:{env_path}") + else: + run_process( + [ + sys.executable, + "-m", + "conda", + "create", + "--yes", + "--prefix", + args.name, + f"python={args.python}", + ], + "CREATE_CONDA.ENV_FAILED_CREATION", + ) + env_path = get_conda_env_path(args.name) + print(f"CREATED_CONDA_ENV:{env_path}") + if args.git_ignore: + add_gitignore(args.name) + + if args.install: + install_packages(env_path) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/extensions/positron-python/pythonFiles/create_microvenv.py b/extensions/positron-python/pythonFiles/create_microvenv.py new file mode 100644 index 00000000000..10eae38ab97 --- /dev/null +++ b/extensions/positron-python/pythonFiles/create_microvenv.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import os +import pathlib +import subprocess +import sys +from typing import Optional, Sequence + +VENV_NAME = ".venv" +LIB_ROOT = pathlib.Path(__file__).parent / "lib" / "python" +CWD = pathlib.Path.cwd() + + +class MicroVenvError(Exception): + pass + + +def run_process(args: Sequence[str], error_message: str) -> None: + try: + print("Running: " + " ".join(args)) + subprocess.run(args, cwd=os.getcwd(), check=True) + except subprocess.CalledProcessError: + raise MicroVenvError(error_message) + + +def parse_args(argv: Sequence[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser() + + parser.add_argument( + "--name", + default=VENV_NAME, + type=str, + help="Name of the virtual environment.", + metavar="NAME", + action="store", + ) + return parser.parse_args(argv) + + +def create_microvenv(name: str): + run_process( + [sys.executable, os.fspath(LIB_ROOT / "microvenv.py"), name], + "CREATE_MICROVENV.MICROVENV_FAILED_CREATION", + ) + + +def main(argv: Optional[Sequence[str]] = None) -> None: + if argv is None: + argv = [] + args = parse_args(argv) + + print("CREATE_MICROVENV.CREATING_MICROVENV") + create_microvenv(args.name) + print("CREATE_MICROVENV.CREATED_MICROVENV") + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/extensions/positron-python/pythonFiles/create_venv.py b/extensions/positron-python/pythonFiles/create_venv.py new file mode 100644 index 00000000000..092286f986c --- /dev/null +++ b/extensions/positron-python/pythonFiles/create_venv.py @@ -0,0 +1,250 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import importlib.util as import_util +import json +import os +import pathlib +import subprocess +import sys +import urllib.request as url_lib +from typing import List, Optional, Sequence, Union + +VENV_NAME = ".venv" +CWD = pathlib.Path.cwd() +MICROVENV_SCRIPT_PATH = pathlib.Path(__file__).parent / "create_microvenv.py" + + +class VenvError(Exception): + pass + + +def parse_args(argv: Sequence[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser() + + parser.add_argument( + "--requirements", + action="append", + default=[], + help="Install additional dependencies into the virtual environment.", + ) + + parser.add_argument( + "--toml", + action="store", + default=None, + help="Install additional dependencies from sources like `pyproject.toml` into the virtual environment.", + ) + parser.add_argument( + "--extras", + action="append", + default=[], + help="Install specific package groups from `pyproject.toml` into the virtual environment.", + ) + + parser.add_argument( + "--git-ignore", + action="store_true", + default=False, + help="Add .gitignore to the newly created virtual environment.", + ) + parser.add_argument( + "--name", + default=VENV_NAME, + type=str, + help="Name of the virtual environment.", + metavar="NAME", + action="store", + ) + parser.add_argument( + "--stdin", + action="store_true", + default=False, + help="Read arguments from stdin.", + ) + return parser.parse_args(argv) + + +def is_installed(module: str) -> bool: + return import_util.find_spec(module) is not None + + +def file_exists(path: Union[str, pathlib.PurePath]) -> bool: + return os.path.exists(path) + + +def venv_exists(name: str) -> bool: + return os.path.exists(CWD / name) and file_exists(get_venv_path(name)) + + +def run_process(args: Sequence[str], error_message: str) -> None: + try: + print("Running: " + " ".join(args)) + subprocess.run(args, cwd=os.getcwd(), check=True) + except subprocess.CalledProcessError: + raise VenvError(error_message) + + +def get_venv_path(name: str) -> str: + # See `venv` doc here for more details on binary location: + # https://docs.python.org/3/library/venv.html#creating-virtual-environments + if sys.platform == "win32": + return os.fspath(CWD / name / "Scripts" / "python.exe") + else: + return os.fspath(CWD / name / "bin" / "python") + + +def install_requirements(venv_path: str, requirements: List[str]) -> None: + if not requirements: + return + + for requirement in requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirement}") + run_process( + [venv_path, "-m", "pip", "install", "-r", requirement], + "CREATE_VENV.PIP_FAILED_INSTALL_REQUIREMENTS", + ) + print("CREATE_VENV.PIP_INSTALLED_REQUIREMENTS") + + +def install_toml(venv_path: str, extras: List[str]) -> None: + args = "." if len(extras) == 0 else f".[{','.join(extras)}]" + run_process( + [venv_path, "-m", "pip", "install", "-e", args], + "CREATE_VENV.PIP_FAILED_INSTALL_PYPROJECT", + ) + print("CREATE_VENV.PIP_INSTALLED_PYPROJECT") + + +def upgrade_pip(venv_path: str) -> None: + print("CREATE_VENV.UPGRADING_PIP") + run_process( + [venv_path, "-m", "pip", "install", "--upgrade", "pip"], + "CREATE_VENV.UPGRADE_PIP_FAILED", + ) + print("CREATE_VENV.UPGRADED_PIP") + + +def add_gitignore(name: str) -> None: + git_ignore = CWD / name / ".gitignore" + if not file_exists(git_ignore): + print("Creating: " + os.fspath(git_ignore)) + with open(git_ignore, "w") as f: + f.write("*") + + +def download_pip_pyz(name: str): + url = "https://bootstrap.pypa.io/pip/pip.pyz" + print("CREATE_VENV.DOWNLOADING_PIP") + + try: + with url_lib.urlopen(url) as response: + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + with open(pip_pyz_path, "wb") as out_file: + data = response.read() + out_file.write(data) + out_file.flush() + except Exception: + raise VenvError("CREATE_VENV.DOWNLOAD_PIP_FAILED") + + +def install_pip(name: str): + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + executable = get_venv_path(name) + print("CREATE_VENV.INSTALLING_PIP") + run_process( + [executable, pip_pyz_path, "install", "pip"], + "CREATE_VENV.INSTALL_PIP_FAILED", + ) + + +def get_requirements_from_args(args: argparse.Namespace) -> List[str]: + requirements = [] + if args.stdin: + data = json.loads(sys.stdin.read()) + requirements = data.get("requirements", []) + if args.requirements: + requirements.extend(args.requirements) + return requirements + + +def main(argv: Optional[Sequence[str]] = None) -> None: + if argv is None: + argv = [] + args = parse_args(argv) + + use_micro_venv = False + venv_installed = is_installed("venv") + pip_installed = is_installed("pip") + ensure_pip_installed = is_installed("ensurepip") + distutils_installed = is_installed("distutils") + + if not venv_installed: + if sys.platform == "win32": + raise VenvError("CREATE_VENV.VENV_NOT_FOUND") + else: + use_micro_venv = True + if not distutils_installed: + print("Install `python3-distutils` package or equivalent for your OS.") + print("On Debian/Ubuntu: `sudo apt install python3-distutils`") + raise VenvError("CREATE_VENV.DISTUTILS_NOT_INSTALLED") + + if venv_exists(args.name): + # A virtual environment with same name exists. + # We will use the existing virtual environment. + venv_path = get_venv_path(args.name) + print(f"EXISTING_VENV:{venv_path}") + else: + if use_micro_venv: + # `venv` was not found but on this platform we can use `microvenv` + run_process( + [ + sys.executable, + os.fspath(MICROVENV_SCRIPT_PATH), + "--name", + args.name, + ], + "CREATE_VENV.MICROVENV_FAILED_CREATION", + ) + elif not pip_installed or not ensure_pip_installed: + # `venv` was found but `pip` or `ensurepip` was not found. + # We create a venv without `pip` in it. We will later install `pip`. + run_process( + [sys.executable, "-m", "venv", "--without-pip", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + else: + # Both `venv` and `pip` were found. So create a .venv normally + run_process( + [sys.executable, "-m", "venv", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + + venv_path = get_venv_path(args.name) + print(f"CREATED_VENV:{venv_path}") + + if args.git_ignore: + add_gitignore(args.name) + + # At this point we have a .venv. Now we handle installing `pip`. + if pip_installed and ensure_pip_installed: + # We upgrade pip if it is already installed. + upgrade_pip(venv_path) + else: + # `pip` was not found, so we download it and install it. + download_pip_pyz(args.name) + install_pip(args.name) + + requirements = get_requirements_from_args(args) + if requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirements}") + install_requirements(venv_path, requirements) + + if args.toml: + print(f"VENV_INSTALLING_PYPROJECT: {args.toml}") + install_toml(venv_path, args.extras) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/extensions/positron-python/pythonFiles/deactivate/bash/deactivate b/extensions/positron-python/pythonFiles/deactivate/bash/deactivate new file mode 100755 index 00000000000..f6dd33425d1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/deactivate/bash/deactivate @@ -0,0 +1,44 @@ +# Same as deactivate in "/bin/activate" +deactivate () { + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + unset -f deactivate + fi +} + +# Get the directory of the current script +SCRIPT_DIR=$(dirname "$0") +# Construct the path to envVars.txt relative to the script directory +ENV_FILE="$SCRIPT_DIR/envVars.txt" + +# Read the JSON file and set the variables +TEMP_PS1=$(grep '^PS1=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PATH=$(grep '^PATH=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PYTHONHOME=$(grep '^PYTHONHOME=' $ENV_FILE | cut -d '=' -f 2) +# Initialize the variables required by deactivate function +_OLD_VIRTUAL_PS1="${TEMP_PS1:-}" +_OLD_VIRTUAL_PATH="$TEMP_PATH" +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${TEMP_PYTHONHOME:-}" +fi +deactivate +bash diff --git a/extensions/positron-python/pythonFiles/deactivate/fish/deactivate b/extensions/positron-python/pythonFiles/deactivate/fish/deactivate new file mode 100755 index 00000000000..3a9d50ccde2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/deactivate/fish/deactivate @@ -0,0 +1,44 @@ +# Same as deactivate in "/bin/activate" +deactivate () { + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + unset -f deactivate + fi +} + +# Get the directory of the current script +SCRIPT_DIR=$(dirname "$0") +# Construct the path to envVars.txt relative to the script directory +ENV_FILE="$SCRIPT_DIR/envVars.txt" + +# Read the JSON file and set the variables +TEMP_PS1=$(grep '^PS1=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PATH=$(grep '^PATH=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PYTHONHOME=$(grep '^PYTHONHOME=' $ENV_FILE | cut -d '=' -f 2) +# Initialize the variables required by deactivate function +_OLD_VIRTUAL_PS1="${TEMP_PS1:-}" +_OLD_VIRTUAL_PATH="$TEMP_PATH" +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${TEMP_PYTHONHOME:-}" +fi +deactivate +fish diff --git a/extensions/positron-python/pythonFiles/deactivate/powershell/deactivate.ps1 b/extensions/positron-python/pythonFiles/deactivate/powershell/deactivate.ps1 new file mode 100644 index 00000000000..49365e0fbef --- /dev/null +++ b/extensions/positron-python/pythonFiles/deactivate/powershell/deactivate.ps1 @@ -0,0 +1,11 @@ +# Load dotenv-style file and restore environment variables +Get-Content -Path "$PSScriptRoot\envVars.txt" | ForEach-Object { + # Split each line into key and value at the first '=' + $parts = $_ -split '=', 2 + if ($parts.Count -eq 2) { + $key = $parts[0].Trim() + $value = $parts[1].Trim() + # Set the environment variable + Set-Item -Path "env:$key" -Value $value + } +} diff --git a/extensions/positron-python/pythonFiles/deactivate/zsh/deactivate b/extensions/positron-python/pythonFiles/deactivate/zsh/deactivate new file mode 100755 index 00000000000..8b059318f98 --- /dev/null +++ b/extensions/positron-python/pythonFiles/deactivate/zsh/deactivate @@ -0,0 +1,44 @@ +# Same as deactivate in "/bin/activate" +deactivate () { + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + unset -f deactivate + fi +} + +# Get the directory of the current script +SCRIPT_DIR=$(dirname "$0") +# Construct the path to envVars.txt relative to the script directory +ENV_FILE="$SCRIPT_DIR/envVars.txt" + +# Read the JSON file and set the variables +TEMP_PS1=$(grep '^PS1=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PATH=$(grep '^PATH=' $ENV_FILE | cut -d '=' -f 2) +TEMP_PYTHONHOME=$(grep '^PYTHONHOME=' $ENV_FILE | cut -d '=' -f 2) +# Initialize the variables required by deactivate function +_OLD_VIRTUAL_PS1="${TEMP_PS1:-}" +_OLD_VIRTUAL_PATH="$TEMP_PATH" +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${TEMP_PYTHONHOME:-}" +fi +deactivate +zsh diff --git a/extensions/positron-python/pythonFiles/download_get_pip.py b/extensions/positron-python/pythonFiles/download_get_pip.py new file mode 100644 index 00000000000..b8238d60f26 --- /dev/null +++ b/extensions/positron-python/pythonFiles/download_get_pip.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import os +import pathlib +import urllib.request as url_lib +from packaging.version import parse as version_parser + +EXTENSION_ROOT = pathlib.Path(__file__).parent.parent +GET_PIP_DEST = EXTENSION_ROOT / "pythonFiles" +PIP_PACKAGE = "pip" +PIP_VERSION = "latest" # Can be "latest", or specific version "23.1.2" + + +def _get_package_data(): + json_uri = "https://pypi.org/pypi/{0}/json".format(PIP_PACKAGE) + # Response format: https://warehouse.readthedocs.io/api-reference/json/#project + # Release metadata format: https://github.com/pypa/interoperability-peps/blob/master/pep-0426-core-metadata.rst + with url_lib.urlopen(json_uri) as response: + return json.loads(response.read()) + + +def _download_and_save(root, version): + root = os.getcwd() if root is None or root == "." else root + url = f"https://raw.githubusercontent.com/pypa/get-pip/{version}/public/get-pip.py" + print(url) + with url_lib.urlopen(url) as response: + data = response.read() + get_pip_file = pathlib.Path(root) / "get-pip.py" + get_pip_file.write_bytes(data) + + +def main(root): + data = _get_package_data() + + if PIP_VERSION == "latest": + use_version = max(data["releases"].keys(), key=version_parser) + else: + use_version = PIP_VERSION + + _download_and_save(root, use_version) + + +if __name__ == "__main__": + main(GET_PIP_DEST) diff --git a/extensions/positron-python/pythonFiles/get_output_via_markers.py b/extensions/positron-python/pythonFiles/get_output_via_markers.py new file mode 100644 index 00000000000..00dd57065b3 --- /dev/null +++ b/extensions/positron-python/pythonFiles/get_output_via_markers.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import runpy +import sys + +# Sometimes executing scripts can print out stuff before the actual output is +# printed. For eg. when activating conda. Hence, printing out markers to make +# it more resilient to pull the output. +print(">>>PYTHON-EXEC-OUTPUT") + +module = sys.argv[1] +try: + if module == "-c": + ns = {} + code = sys.argv[2] + del sys.argv[2] + del sys.argv[0] + exec(code, ns, ns) + elif module.startswith("-m"): + moduleName = sys.argv[2] + sys.argv = sys.argv[2:] # It should begin with the module name. + runpy.run_module(moduleName, run_name="__main__", alter_sys=True) + elif module.endswith(".py"): + sys.argv = sys.argv[1:] + runpy.run_path(module, run_name="__main__") + elif module.startswith("-"): + raise NotImplementedError(sys.argv) + else: + runpy.run_module(module, run_name="__main__", alter_sys=True) +finally: + print("<< Optional[Requirement]: + try: + req = Requirement(line.strip("\\")) + if req.marker is None: + return req + elif req.marker.evaluate(): + return req + except Exception: + pass + return None + + +def process_requirements(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + for n, line in enumerate(req_file.read_text(encoding="utf-8").splitlines()): + if line.startswith(("#", "-", " ")) or line == "": + continue + + req = parse_requirements(line) + if req: + try: + # Check if package is installed + metadata(req.name) + except Exception: + diagnostics.append( + { + "line": n, + "character": 0, + "endLine": n, + "endCharacter": len(req.name), + "package": req.name, + "code": "not-installed", + "severity": SEVERITY, + } + ) + return diagnostics + + +def get_pos(lines: List[str], text: str) -> Tuple[int, int, int, int]: + for n, line in enumerate(lines): + index = line.find(text) + if index >= 0: + return n, index, n, index + len(text) + return (0, 0, 0, 0) + + +def process_pyproject(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + try: + raw_text = req_file.read_text(encoding="utf-8") + pyproject = tomli.loads(raw_text) + except Exception: + return diagnostics + + lines = raw_text.splitlines() + reqs = pyproject.get("project", {}).get("dependencies", []) + for raw_req in reqs: + req = parse_requirements(raw_req) + n, start, _, end = get_pos(lines, raw_req) + if req: + try: + # Check if package is installed + metadata(req.name) + except Exception: + diagnostics.append( + { + "line": n, + "character": start, + "endLine": n, + "endCharacter": end, + "package": req.name, + "code": "not-installed", + "severity": SEVERITY, + } + ) + return diagnostics + + +def get_diagnostics(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + if not req_file.exists(): + return diagnostics + + if req_file.name == "pyproject.toml": + diagnostics = process_pyproject(req_file) + else: + diagnostics = process_requirements(req_file) + + return diagnostics + + +def main(): + args = parse_args() + diagnostics = get_diagnostics(pathlib.Path(args.FILEPATH)) + print(json.dumps(diagnostics, ensure_ascii=False)) + + +if __name__ == "__main__": + main() diff --git a/extensions/positron-python/pythonFiles/interpreterInfo.py b/extensions/positron-python/pythonFiles/interpreterInfo.py new file mode 100644 index 00000000000..f15da9e48ea --- /dev/null +++ b/extensions/positron-python/pythonFiles/interpreterInfo.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import sys + +obj = {} +obj["versionInfo"] = tuple(sys.version_info) +obj["sysPrefix"] = sys.prefix +obj["sysVersion"] = sys.version +obj["is64Bit"] = sys.maxsize > 2**32 + +print(json.dumps(obj)) diff --git a/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.in b/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.in new file mode 100644 index 00000000000..826bcf1e158 --- /dev/null +++ b/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.in @@ -0,0 +1,18 @@ +# This file is used to generate requirements.txt. +# To update requirements.txt, run the following commands. +# Use Python 3.8 when creating the environment or using pip-tools +# 1) pip install pip-tools +# 2) pip-compile --generate-hashes --upgrade pythonFiles\jedilsp_requirements\requirements.in + +jedi-language-server>=0.34.3 +pygls>=0.10.3 +# --- Start Positron --- +docstring-to-markdown==0.13 +markdown-it-py +# TODO(seem): We're sticking to pydantic v1 since v2 depends on pydantic-core, which only ships +# wheels for CPython and PyPy, and we require impementation-agnostic wheels (via the 'pip install +# --implementation py' arg in scripts/vendoring.py). We inherited that requirement from upstream, +# but we could revisit it. +pydantic<2.0.0 +pygments +# --- End Positron --- diff --git a/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.txt b/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.txt new file mode 100644 index 00000000000..fdaa547f714 --- /dev/null +++ b/extensions/positron-python/pythonFiles/jedilsp_requirements/requirements.txt @@ -0,0 +1,109 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --generate-hashes pythonFiles/jedilsp_requirements/requirements.in +# +attrs==23.2.0 \ + --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ + --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 + # via + # cattrs + # lsprotocol +cattrs==23.2.3 \ + --hash=sha256:0341994d94971052e9ee70662542699a3162ea1e0c62f7ce1b4a57f563685108 \ + --hash=sha256:a934090d95abaa9e911dac357e3a8699e0b4b14f8529bcc7d2b1ad9d51672b9f + # via + # jedi-language-server + # lsprotocol +docstring-to-markdown==0.13 \ + --hash=sha256:3025c428638ececae920d6d26054546a20335af3504a145327e657e7ad7ce1ce \ + --hash=sha256:aa487059d0883e70e54da25c7b230e918d9e4d40f23d6dfaa2b73e4225b2d7dd + # via + # -r pythonFiles/jedilsp_requirements/requirements.in + # jedi-language-server +exceptiongroup==1.2.0 \ + --hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \ + --hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68 + # via cattrs +jedi==0.19.1 \ + --hash=sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd \ + --hash=sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0 + # via jedi-language-server +jedi-language-server==0.41.2 \ + --hash=sha256:865a93546b3711713eaca1a76b3a5d4aaacd4594c87f777aa8d54da37ae5aeca \ + --hash=sha256:bfa112d88c0bd21872522b96c64e8054854f767449bbb3d858be04fb2cda0606 + # via -r pythonFiles/jedilsp_requirements/requirements.in +lsprotocol==2023.0.0 \ + --hash=sha256:c9d92e12a3f4ed9317d3068226592860aab5357d93cf5b2451dc244eee8f35f2 \ + --hash=sha256:e85fc87ee26c816adca9eb497bb3db1a7c79c477a11563626e712eaccf926a05 + # via + # jedi-language-server + # pygls +markdown-it-py==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via -r pythonFiles/jedilsp_requirements/requirements.in +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +parso==0.8.3 \ + --hash=sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0 \ + --hash=sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75 + # via jedi +pydantic==1.10.14 \ + --hash=sha256:08b6ec0917c30861e3fe71a93be1648a2aa4f62f866142ba21670b24444d7fd8 \ + --hash=sha256:0fbb503bbbbab0c588ed3cd21975a1d0d4163b87e360fec17a792f7d8c4ff29f \ + --hash=sha256:1245f4f61f467cb3dfeced2b119afef3db386aec3d24a22a1de08c65038b255f \ + --hash=sha256:13e86a19dca96373dcf3190fcb8797d40a6f12f154a244a8d1e8e03b8f280593 \ + --hash=sha256:21efacc678a11114c765eb52ec0db62edffa89e9a562a94cbf8fa10b5db5c046 \ + --hash=sha256:23d47a4b57a38e8652bcab15a658fdb13c785b9ce217cc3a729504ab4e1d6bc9 \ + --hash=sha256:24a7679fab2e0eeedb5a8924fc4a694b3bcaac7d305aeeac72dd7d4e05ecbebf \ + --hash=sha256:282613a5969c47c83a8710cc8bfd1e70c9223feb76566f74683af889faadc0ea \ + --hash=sha256:336709883c15c050b9c55a63d6c7ff09be883dbc17805d2b063395dd9d9d0022 \ + --hash=sha256:412ab4a3f6dbd2bf18aefa9f79c7cca23744846b31f1d6555c2ee2b05a2e14ca \ + --hash=sha256:466669501d08ad8eb3c4fecd991c5e793c4e0bbd62299d05111d4f827cded64f \ + --hash=sha256:46f17b832fe27de7850896f3afee50ea682220dd218f7e9c88d436788419dca6 \ + --hash=sha256:49a46a0994dd551ec051986806122767cf144b9702e31d47f6d493c336462597 \ + --hash=sha256:4ae57b4d8e3312d486e2498d42aed3ece7b51848336964e43abbf9671584e67f \ + --hash=sha256:53e3819bd20a42470d6dd0fe7fc1c121c92247bca104ce608e609b59bc7a77ee \ + --hash=sha256:596f12a1085e38dbda5cbb874d0973303e34227b400b6414782bf205cc14940c \ + --hash=sha256:646b2b12df4295b4c3148850c85bff29ef6d0d9621a8d091e98094871a62e5c7 \ + --hash=sha256:798a3d05ee3b71967844a1164fd5bdb8c22c6d674f26274e78b9f29d81770c4e \ + --hash=sha256:7f4fcec873f90537c382840f330b90f4715eebc2bc9925f04cb92de593eae054 \ + --hash=sha256:82d886bd3c3fbeaa963692ef6b643159ccb4b4cefaf7ff1617720cbead04fd1d \ + --hash=sha256:8e3a76f571970fcd3c43ad982daf936ae39b3e90b8a2e96c04113a369869dc87 \ + --hash=sha256:8ee853cd12ac2ddbf0ecbac1c289f95882b2d4482258048079d13be700aa114c \ + --hash=sha256:9d578ac4bf7fdf10ce14caba6f734c178379bd35c486c6deb6f49006e1ba78a7 \ + --hash=sha256:a42c7d17706911199798d4c464b352e640cab4351efe69c2267823d619a937e5 \ + --hash=sha256:aad4e10efa5474ed1a611b6d7f0d130f4aafadceb73c11d9e72823e8f508e663 \ + --hash=sha256:ad8c2bc677ae5f6dbd3cf92f2c7dc613507eafe8f71719727cbc0a7dec9a8c01 \ + --hash=sha256:bc3ed06ab13660b565eed80887fcfbc0070f0aa0691fbb351657041d3e874efe \ + --hash=sha256:bfb113860e9288d0886e3b9e49d9cf4a9d48b441f52ded7d96db7819028514cc \ + --hash=sha256:c37c28449752bb1f47975d22ef2882d70513c546f8f37201e0fec3a97b816eee \ + --hash=sha256:c66609e138c31cba607d8e2a7b6a5dc38979a06c900815495b2d90ce6ded35b4 \ + --hash=sha256:d604be0f0b44d473e54fdcb12302495fe0467c56509a2f80483476f3ba92b33c \ + --hash=sha256:d986e115e0b39604b9eee3507987368ff8148222da213cd38c359f6f57b3b347 \ + --hash=sha256:dba49d52500c35cfec0b28aa8b3ea5c37c9df183ffc7210b10ff2a415c125c4a \ + --hash=sha256:e897c9f35281f7889873a3e6d6b69aa1447ceb024e8495a5f0d02ecd17742a7f \ + --hash=sha256:f9f674b5c3bebc2eba401de64f29948ae1e646ba2735f884d1594c5f675d6f2a \ + --hash=sha256:fa7790e94c60f809c95602a26d906eba01a0abee9cc24150e4ce2189352deb1b + # via -r pythonFiles/jedilsp_requirements/requirements.in +pygls==1.2.1 \ + --hash=sha256:04f9b9c115b622dcc346fb390289066565343d60245a424eca77cb429b911ed8 \ + --hash=sha256:7dcfcf12b6f15beb606afa46de2ed348b65a279c340ef2242a9a35c22eeafe94 + # via + # -r pythonFiles/jedilsp_requirements/requirements.in + # jedi-language-server +pygments==2.17.2 \ + --hash=sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c \ + --hash=sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367 + # via -r pythonFiles/jedilsp_requirements/requirements.in +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd + # via + # cattrs + # jedi-language-server + # pydantic diff --git a/extensions/positron-python/pythonFiles/linter.py b/extensions/positron-python/pythonFiles/linter.py new file mode 100644 index 00000000000..58ad9397f58 --- /dev/null +++ b/extensions/positron-python/pythonFiles/linter.py @@ -0,0 +1,56 @@ +import subprocess +import sys + + +linter_settings = { + "pylint": { + "args": ["--reports=n", "--output-format=json"], + }, + "flake8": { + "args": ["--format", "%(row)d,%(col)d,%(code).1s,%(code)s:%(text)s"], + }, + "bandit": { + "args": [ + "-f", + "custom", + "--msg-template", + "{line},{col},{severity},{test_id}:{msg}", + "-n", + "-1", + ], + }, + "mypy": {"args": []}, + "prospector": { + "args": ["--absolute-paths", "--output-format=json"], + }, + "pycodestyle": { + "args": ["--format", "%(row)d,%(col)d,%(code).1s,%(code)s:%(text)s"], + }, + "pydocstyle": { + "args": [], + }, + "pylama": {"args": ["--format=parsable"]}, +} + + +def main(): + invoke = sys.argv[1] + if invoke == "-m": + linter = sys.argv[2] + args = ( + [sys.executable, "-m", linter] + + linter_settings[linter]["args"] + + sys.argv[3:] + ) + else: + linter = sys.argv[2] + args = [sys.argv[3]] + linter_settings[linter]["args"] + sys.argv[4:] + + if hasattr(subprocess, "run"): + subprocess.run(args, encoding="utf-8", stdout=sys.stdout, stderr=sys.stderr) + else: + subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/extensions/positron-python/pythonFiles/normalizeSelection.py b/extensions/positron-python/pythonFiles/normalizeSelection.py new file mode 100644 index 00000000000..f0397b7db3a --- /dev/null +++ b/extensions/positron-python/pythonFiles/normalizeSelection.py @@ -0,0 +1,335 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import ast +import json +import re +import sys +import textwrap +from typing import Iterable + + +def split_lines(source): + """ + Split selection lines in a version-agnostic way. + + Python grammar only treats \r, \n, and \r\n as newlines. + But splitlines() in Python 3 has a much larger list: for example, it also includes \v, \f. + As such, this function will split lines across all Python versions. + """ + return re.split(r"[\n\r]+", source) + + +def _get_statements(selection): + """ + Process a multiline selection into a list of its top-level statements. + This will remove empty newlines around and within the selection, dedent it, + and split it using the result of `ast.parse()`. + """ + + # Remove blank lines within the selection to prevent the REPL from thinking the block is finished. + lines = (line for line in split_lines(selection) if line.strip() != "") + + # Dedent the selection and parse it using the ast module. + # Note that leading comments in the selection will be discarded during parsing. + source = textwrap.dedent("\n".join(lines)) + tree = ast.parse(source) + + # We'll need the dedented lines to rebuild the selection. + lines = split_lines(source) + + # Get the line ranges for top-level blocks returned from parsing the dedented text + # and split the selection accordingly. + # tree.body is a list of AST objects, which we rely on to extract top-level statements. + # If we supported Python 3.8+ only we could use the lineno and end_lineno attributes of each object + # to get the boundaries of each block. + # However, earlier Python versions only have the lineno attribute, which is the range start position (1-indexed). + # Therefore, to retrieve the end line of each block in a version-agnostic way we need to do + # `end = next_block.lineno - 1` + # for all blocks except the last one, which will will just run until the last line. + ends = [] + for node in tree.body[1:]: + line_end = node.lineno - 1 + # Special handling of decorators: + # In Python 3.8 and higher, decorators are not taken into account in the value returned by lineno, + # and we have to use the length of the decorator_list array to compute the actual start line. + # Before that, lineno takes into account decorators, so this offset check is unnecessary. + # Also, not all AST objects can have decorators. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + line_end -= len(getattr(node, "decorator_list")) + ends.append(line_end) + ends.append(len(lines)) + + for node, end in zip(tree.body, ends): + # Given this selection: + # 1: if (m > 0 and + # 2: n < 3): + # 3: print('foo') + # 4: value = 'bar' + # + # The first block would have lineno = 1,and the second block lineno = 4 + start = node.lineno - 1 + + # Special handling of decorators similar to what's above. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + start -= len(getattr(node, "decorator_list")) + block = "\n".join(lines[start:end]) + + # If the block is multiline, add an extra newline character at its end. + # This way, when joining blocks back together, there will be a blank line between each multiline statement + # and no blank lines between single-line statements, or it would look like this: + # >>> x = 22 + # >>> + # >>> total = x + 30 + # >>> + # Note that for the multiline parentheses case this newline is redundant, + # since the closing parenthesis terminates the statement already. + # This means that for this pattern we'll end up with: + # >>> x = [ + # ... 1 + # ... ] + # >>> + # >>> y = [ + # ... 2 + # ...] + if end - start > 1: + block += "\n" + + yield block + + +def normalize_lines(selection): + """ + Normalize the text selection received from the extension. + + If it is a single line selection, dedent it and append a newline and + send it back to the extension. + Otherwise, sanitize the multiline selection before returning it: + split it in a list of top-level statements + and add newlines between each of them so the REPL knows where each block ends. + """ + try: + # Parse the selection into a list of top-level blocks. + # We don't differentiate between single and multiline statements + # because it's not a perf bottleneck, + # and the overhead from splitting and rejoining strings in the multiline case is one-off. + statements = _get_statements(selection) + + # Insert a newline between each top-level statement, and append a newline to the selection. + source = "\n".join(statements) + "\n" + if selection[-2] == "}" or selection[-2] == "]": + source = source[:-1] + except Exception: + # If there's a problem when parsing statements, + # append a blank line to end the block and send it as-is. + source = selection + "\n\n" + + return source + + +top_level_nodes = [] +min_key = None + + +def check_exact_exist(top_level_nodes, start_line, end_line): + exact_nodes = [] + for node in top_level_nodes: + if node.lineno == start_line and node.end_lineno == end_line: + exact_nodes.append(node) + + return exact_nodes + + +def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): + """ + Intended to traverse through a user's given file content and find, collect all appropriate lines + that should be sent to the REPL in case of smart selection. + This could be exact statement such as just a single line print statement, + or a multiline dictionary, or differently styled multi-line list comprehension, etc. + Then call the normalize_lines function to normalize our smartly selected code block. + """ + parsed_file_content = None + + try: + parsed_file_content = ast.parse(wholeFileContent) + except Exception: + # Handle case where user is attempting to run code where file contains deprecated Python code. + # Let typescript side know and show warning message. + return { + "normalized_smart_result": "deprecated", + "which_line_next": 0, + } + + smart_code = "" + should_run_top_blocks = [] + + # Purpose of this loop is to fetch and collect all the + # AST top level nodes, and its node.body as child nodes. + # Individual nodes will contain information like + # the start line, end line and get source segment information + # that will be used to smartly select, and send normalized code. + for node in ast.iter_child_nodes(parsed_file_content): + top_level_nodes.append(node) + + ast_types_with_nodebody = ( + ast.Module, + ast.Interactive, + ast.Expression, + ast.FunctionDef, + ast.AsyncFunctionDef, + ast.ClassDef, + ast.For, + ast.AsyncFor, + ast.While, + ast.If, + ast.With, + ast.AsyncWith, + ast.Try, + ast.Lambda, + ast.IfExp, + ast.ExceptHandler, + ) + if isinstance(node, ast_types_with_nodebody) and isinstance(node.body, Iterable): + for child_nodes in node.body: + top_level_nodes.append(child_nodes) + + exact_nodes = check_exact_exist(top_level_nodes, start_line, end_line) + + # Just return the exact top level line, if present. + if len(exact_nodes) > 0: + which_line_next = 0 + for same_line_node in exact_nodes: + should_run_top_blocks.append(same_line_node) + smart_code += f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n" + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": smart_code, + "which_line_next": which_line_next, + # --- Start Positron --- + # Return additional info required by a Positron statement range provider. + "start_line": should_run_top_blocks[0].lineno, + "start_character": should_run_top_blocks[0].col_offset, + "end_line": should_run_top_blocks[-1].end_lineno, + "end_character": should_run_top_blocks[-1].end_col_offset, + # --- End Positron --- + } + + # For each of the nodes in the parsed file content, + # add the appropriate source code line(s) to be sent to the REPL, dependent on + # user is trying to send and execute single line/statement or multiple with smart selection. + for top_node in ast.iter_child_nodes(parsed_file_content): + if start_line == top_node.lineno and end_line == top_node.end_lineno: + should_run_top_blocks.append(top_node) + + smart_code += f"{ast.get_source_segment(wholeFileContent, top_node)}\n" + break # If we found exact match, don't waste computation in parsing extra nodes. + elif start_line >= top_node.lineno and end_line <= top_node.end_lineno: + # Case to apply smart selection for multiple line. + # This is the case for when we have to add multiple lines that should be included in the smart send. + # For example: + # 'my_dictionary': { + # 'Audi': 'Germany', + # 'BMW': 'Germany', + # 'Genesis': 'Korea', + # } + # with the mouse cursor at 'BMW': 'Germany', should send all of the lines that pertains to my_dictionary. + + should_run_top_blocks.append(top_node) + + smart_code += str(ast.get_source_segment(wholeFileContent, top_node)) + smart_code += "\n" + + # --- Start Positron --- + # If we get here, we may still be between top-level nodes -- try to find the next one. + if not should_run_top_blocks: + for top_node in ast.iter_child_nodes(parsed_file_content): + if top_node.lineno > start_line: + should_run_top_blocks.append(top_node) + smart_code += f"{ast.get_source_segment(wholeFileContent, top_node)}\n" + break + # --- End Positron --- + + normalized_smart_result = normalize_lines(smart_code) + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": normalized_smart_result, + "which_line_next": which_line_next, + # --- Start Positron --- + # Return additional info required by a Positron statement range provider. + "start_line": should_run_top_blocks[0].lineno, + "start_character": should_run_top_blocks[0].col_offset, + "end_line": should_run_top_blocks[-1].end_lineno, + "end_character": should_run_top_blocks[-1].end_col_offset, + # --- End Positron --- + } + + +# Look at the last top block added, find lineno for the next upcoming block, +# This will be used in calculating lineOffset to move cursor in VS Code. +def get_next_block_lineno(which_line_next): + last_ran_lineno = int(which_line_next[-1].end_lineno) + next_lineno = int(which_line_next[-1].end_lineno) + + for reverse_node in top_level_nodes: + if reverse_node.lineno > last_ran_lineno: + next_lineno = reverse_node.lineno + break + return next_lineno + + +if __name__ == "__main__": + # Content is being sent from the extension as a JSON object. + # Decode the data from the raw bytes. + stdin = sys.stdin if sys.version_info < (3,) else sys.stdin.buffer + raw = stdin.read() + contents = json.loads(raw.decode("utf-8")) + # Empty highlight means user has not explicitly selected specific text. + empty_Highlight = contents.get("emptyHighlight", False) + + # We also get the activeEditor selection start line and end line from the typescript VS Code side. + # Remember to add 1 to each of the received since vscode starts line counting from 0 . + vscode_start_line = contents["startLine"] + 1 + vscode_end_line = contents["endLine"] + 1 + + # Send the normalized code back to the extension in a JSON object. + data = None + which_line_next = 0 + + if ( + empty_Highlight + and contents.get("smartSendExperimentEnabled") + and contents.get("smartSendSettingsEnabled") + ): + result = traverse_file( + contents["wholeFileContent"], + vscode_start_line, + vscode_end_line, + not empty_Highlight, + ) + normalized = result["normalized_smart_result"] + which_line_next = result["which_line_next"] + if normalized == "deprecated": + data = json.dumps({"normalized": normalized}) + else: + data = json.dumps( + # --- Start Positron --- + # Return additional info required by a Positron statement range provider. + { + "normalized": normalized, + "nextBlockLineno": result["which_line_next"], + "startLine": result["start_line"], + "endLine": result["end_line"], + "startCharacter": result["start_character"], + "endCharacter": result["end_character"], + } + # --- End Positron --- + ) + else: + normalized = normalize_lines(contents["code"]) + data = json.dumps({"normalized": normalized}) + + stdout = sys.stdout if sys.version_info < (3,) else sys.stdout.buffer + stdout.write(data.encode("utf-8")) + stdout.close() diff --git a/extensions/positron-python/pythonFiles/positron/data-science-requirements.txt b/extensions/positron-python/pythonFiles/positron/data-science-requirements.txt new file mode 100644 index 00000000000..cad010f85b6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/data-science-requirements.txt @@ -0,0 +1,7 @@ +pandas +polars +matplotlib +numpy +torch +fastapi +sqlalchemy diff --git a/extensions/positron-python/pythonFiles/positron/pinned-test-requirements.txt b/extensions/positron-python/pythonFiles/positron/pinned-test-requirements.txt new file mode 100644 index 00000000000..c6aaa5b24b5 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/pinned-test-requirements.txt @@ -0,0 +1,15 @@ +fastcore==1.5.29 +ipykernel==6.29.3 +ipywidgets==8.1.2 +matplotlib==3.8.2; python_version >= '3.9' +matplotlib==3.7.4; python_version < '3.9' +numpy==1.26.3; python_version >= '3.9' +numpy==1.24.4; python_version < '3.9' +pandas==2.2.0; python_version >= '3.9' +pandas==2.0.3; python_version < '3.9' +pytest==8.0.2 +pytest-asyncio==0.23.5 +pytest-mock==3.12.0 +polars==0.20.13 +torch==2.1.2; python_version < '3.12' +sqlalchemy==2.0.28 diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/__init__.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/__init__.py new file mode 100644 index 00000000000..680fa06b67c --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/__init__.py @@ -0,0 +1,3 @@ +# +# Copyright (C) 2023 Posit Software, PBC. All rights reserved. +# diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/_pydoc.css b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/_pydoc.css new file mode 100644 index 00000000000..8e5f0a81b50 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/_pydoc.css @@ -0,0 +1,162 @@ +/* + Copyright (C) 2023 Posit Software, PBC. All rights reserved. + CSS file for Help Page +*/ + +:root { + /* Provide fallbacks if we're running outside of Positron */ + --editor-font-family: var(--vscode-editor-font-family, "SF Mono", Monaco, Menlo, Courier, monospace); + --font-family: var(--vscode-font-family, -apple-system, BlinkMacSystemFont, sans-serif); + --foreground: var(--vscode-foreground, black); + --theme-foreground: var(--vscode-theme-foreground, black); + --text-code-block-background: var(--vscode-text-code-block-background, white); + --text-code-block-border: #ddd; +} + +body { + font-family: var(--vscode-font-family, -apple-system, BlinkMacSystemFont, sans-serif); + font-size: var(--vscode-font-size, 13px); + background: var(--vscode-editor-background, white); + color: var(--vscode-editor-foreground, black); +} + +a { + color: var(--vscode-textLink-foreground); +} + + +/* Autosummary tables (e.g. attribute lists in class docs) */ + +table.autosummary { + width: 100%; + border-spacing: 0; + border-collapse: collapse; + border: 0; +} + +table.autosummary tr td { + padding: 0.75rem 0; +} + +table.autosummary tr:not(:first-child) { + border-top: 1px solid var(--vscode-textBlockQuote-border); +} + +table.autosummary tr td:first-child { + padding-left: 0.75rem; + padding-right: 0.75rem; + white-space: nowrap; +} + +/* Multicolumn tables (e.g. modules in the root index page) */ + +td.multicolumn { + width: 25%; + vertical-align: bottom; +} + +/* Code blocks */ + +pre code { + /* Fix vertical space between elements, see: https://stackoverflow.com/questions/5078239/how-to-remove-the-space-between-inline-inline-block-elements/5078350 */ + display: flex; + flex-direction: column; +} + +code, .code, div.highlight { + font-family: var(--vscode-editor-font-family); + font-size: var(--vscode-editor-font-size, 12px); +} + +div.highlight pre { + margin: 0; +} + +div.highlight { + padding: 0.7rem; + background-color: var(--vscode-textBlockQuote-background); + border: 1px solid var(--vscode-textBlockQuote-border); + border-radius: .25rem; + line-height: 125%; + overflow-x: auto; +} + +div.package-version{ + font-size: 0.6rem; + text-align: right; +} + +/* + Pygments (syntax highlighting) + Source: https://github.com/richleland/pygments-css/blob/master/default.css +*/ + +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #408080; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #BC7A00 } /* Comment.Preproc */ +.highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #408080; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #7D9029 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #A0A000 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #BB6688 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/access_keys.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/access_keys.py new file mode 100644 index 00000000000..b98f6063d87 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/access_keys.py @@ -0,0 +1,64 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +import json +from typing import Any, Dict, Hashable, cast + +from .inspectors import INSPECTOR_CLASSES, PositronInspector, get_inspector +from .utils import JsonData + + +def encode_access_key(key: Any) -> str: + # If it's not hashable, raise an error. + if not isinstance(key, Hashable): + raise TypeError(f"Key {key} is not hashable.") + + # If it's a blank string, return it as-is. + if isinstance(key, str) and key == "": + return key + + # Get the key's inspector and serialize the key. + json_data = get_inspector(key).to_json() + # Pass separators to json.dumps to remove whitespace after "," and ":". + return json.dumps(json_data, separators=(",", ":")) + + +# Since access keys are serialized to JSON, we can't use get_inspector to find the inspector +# corresponding to a serialized access key. We instead use the key's type's qualname, but need this +# dict to map known and supported qualnames to keys that are accepted by get_inspector. +_ACCESS_KEY_QUALNAME_TO_INSPECTOR_KEY: Dict[str, str] = { + "int": "number", + "float": "number", + "complex": "number", + "bool": "boolean", + "str": "string", + "range": "collection", + "type": "class", +} + + +def decode_access_key(access_key: str) -> Any: + # If it's a blank string, return it as-is. + if access_key == "": + return access_key + + # Deserialize the access key. + json_data: JsonData = json.loads(access_key) + + # Validate the json data structure. + if ( + not isinstance(json_data, dict) + or not isinstance(json_data["type"], str) + or not isinstance(json_data["data"], (dict, list, str, int, float, bool, type(None))) + ): + raise ValueError(f"Unexpected json data structure: {json_data}") + + # Get the inspector for this type. + # TODO(pyright): cast shouldn't be necessary, recheck in a future version of pyright + type_name = cast(str, json_data["type"]) + inspector_key = _ACCESS_KEY_QUALNAME_TO_INSPECTOR_KEY.get(type_name, type_name) + inspector_cls = INSPECTOR_CLASSES.get(inspector_key, PositronInspector) + + # Reconstruct the access key's original object using the deserialized JSON data. + return inspector_cls.from_json(json_data) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections.py new file mode 100644 index 00000000000..8bf793dfd34 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections.py @@ -0,0 +1,529 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# +from __future__ import annotations + +import logging +import uuid +from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypedDict, Union + +import comm + +from .connections_comm import ( + ConnectionsBackendMessageContent, + ContainsDataRequest, + GetIconRequest, + ListFieldsRequest, + ListObjectsRequest, + ObjectSchema, + PreviewObjectRequest, +) +from .positron_comm import CommMessage, JsonRpcErrorCode, PositronComm +from .third_party import pd_, sqlalchemy_ +from .utils import JsonData, JsonRecord, safe_isinstance + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + from comm.base_comm import BaseComm + + from .positron_ipkernel import PositronIPyKernel + + +logger = logging.getLogger(__name__) + + +class ConnectionObjectInfo(TypedDict): + icon: Optional[str] + contains: Union[Dict[str, "ConnectionObjectInfo"], Optional[str]] + + +class ConnectionObject(TypedDict): + name: str + kind: str + + +class ConnectionObjectFields(TypedDict): + name: str + dtype: str + + +class Connection: + """ + Base class representing a connection to a data source. + """ + + type: str + host: str + display_name: Optional[str] = None + icon: Optional[str] = None + conn: Any = None + actions: Any = None + + def disconnect(self) -> None: + "Callback executed when the connection is closed in the UI." + raise NotImplementedError() + + def list_object_types(self) -> Dict[str, ConnectionObjectInfo]: + """ + Returns a dictionary of object types and their properties. + + We expect the `contains` to be the string `"data"` if the object + contains data (eg is a table or a view). `contains` can also + be a dictionary listing sub objects in the hirarchy in this same + format, but this is currently not used. + + The `icon` property is the path to an icon to be used by the UI. + """ + raise NotImplementedError() + + def list_objects(self, path: List[ObjectSchema]) -> List[ConnectionObject]: + """ + Returns the list of objects at the given path. + + The returned object is a list of dictionaries with the: + - name: The name of the object. + - kind: The kind of the object. + + Args: + path: The path to the object. + """ + raise NotImplementedError() + + def list_fields(self, path: List[ObjectSchema]) -> List[ConnectionObjectFields]: + """ + Returns the list of fields for the given object. + + The returned object is a list of dictionaries with the: + - name: The name of the field. + - dtype: The data type of the field. + + Args: + path: The path to the object. + """ + raise NotImplementedError() + + def preview_object(self, path: List[ObjectSchema]) -> Any: + """ + Returns a small sample of the object's data for previewing. + + The returned object must be a pandas dataframe or other types of + objects that can be previewed with Positron's Data Explorer. + + Args: + path: The path to the object. + """ + raise NotImplementedError() + + +class ConnectionsService: + """ + A service that manages connections to data sources. + """ + + def __init__(self, kernel: PositronIPyKernel, comm_target_name: str): + self.comms: Dict[str, PositronComm] = {} + self.comm_id_to_connection: Dict[str, Connection] = {} + self._kernel = kernel + self._comm_target_name = comm_target_name + + def register_connection(self, connection: Any) -> str: + """ + Opens a connection to the given data source. + + Args: + connection: A subclass of Connection implementing the + necessary methods. + """ + + if not isinstance(connection, Connection): + connection = self._wrap_connection(connection) + + # check if there's already a connection registered with the same type and host + # just like RStudio we use the `type` and `host` properties to identify the connection + # and we don't allow multiple connections to the same data source. + # https://github.com/rstudio/rstudio/blob/2344a0bf04657a13c36053eb04bcc47616a623dc/src/cpp/session/modules/SessionConnections.R#L52-L53 + for comm_id, conn in self.comm_id_to_connection.items(): + if conn.type == connection.type and conn.host == connection.host: + logger.info( + "Connection to host '%s' of type '%s' already opened with comm_id '%s'", + conn.host, + conn.type, + comm_id, + ) + return comm_id + + comm_id = str(uuid.uuid4()) + base_comm = comm.create_comm( + target_name=self._comm_target_name, + comm_id=comm_id, + data={"name": connection.display_name}, + ) + + self.comm_id_to_connection[comm_id] = connection + self.on_comm_open(base_comm) + return comm_id + + def on_comm_open(self, comm: BaseComm): + comm_id = comm.comm_id + comm.on_close(lambda msg: self._close_connection(comm_id)) + connections_comm = PositronComm(comm) + connections_comm.on_msg(self.handle_msg, ConnectionsBackendMessageContent) + self.comms[comm_id] = connections_comm + + def _wrap_connection(self, obj: Any) -> Connection: + # we don't want to import sqlalchemy for that + type_name = type(obj).__name__ + + if safe_isinstance(obj, "sqlite3", "Connection"): + return SQLite3Connection(obj) + elif safe_isinstance(obj, "sqlalchemy", "Engine"): + return SQLAlchemyConnection(obj) + + raise ValueError(f"Unsupported connection type {type_name}") + + def _close_connection(self, comm_id: str): + + try: + # calling disconnect can fail if the connection has already been closed or + # if it's called from a different thread. + # however, this shound't be fatal as we won't use it anymore in the connections + # pane. + self.comm_id_to_connection[comm_id].disconnect() + except Exception as err: + logger.warning(err, exc_info=True) + + try: + self.comms[comm_id].close() + except Exception as err: + logger.warning(err, exc_info=True) + + del self.comms[comm_id] + del self.comm_id_to_connection[comm_id] + + def shutdown(self): + """ + Closes all comms and runs the `disconnect` callback. + """ + for comm_id in self.comms.keys(): + self._close_connection(comm_id) + + self.comms = {} # implicitly deleting comms + self.comm_id_to_connection = {} + + def handle_msg( + self, msg: CommMessage[ConnectionsBackendMessageContent], raw_msg: JsonRecord + ) -> None: + """ + Handles messages from the frontend. + """ + + try: + return self._handle_msg(msg, raw_msg) + except Exception as err: + # Any exception when handling messages is forwarded to the frontend which + # will display an error message in the UI if fatal. + + try: + comm_id = msg.content.comm_id + except AttributeError: + logger.error( + "Failed to process positron.connection request. No comm_id found in the message." + ) + return + + logger.warning(err, exc_info=True) + self.comms[comm_id].send_error( + JsonRpcErrorCode.INTERNAL_ERROR, + f"Failed process positron.connection request: {err}", + ) + + def _handle_msg( + self, msg: CommMessage[ConnectionsBackendMessageContent], raw_msg: JsonRecord + ) -> None: + comm_id = msg.content.comm_id + request = msg.content.data + connection = self.comm_id_to_connection[comm_id] + comm = self.comms[comm_id] + + result: JsonData = None + if isinstance(request, ContainsDataRequest): + result = self.handle_contains_data_request(connection, request) + elif isinstance(request, ListObjectsRequest): + # both list_objects_request and list_fields_request return list of + # TypedDict objects that only contain strings. But pyright is not + # able to infer that. + result = self.handle_list_objects_request(connection, request) # type: ignore + elif isinstance(request, ListFieldsRequest): + result = self.handle_list_fields_request(connection, request) # type: ignore + elif isinstance(request, GetIconRequest): + result = self.handle_get_icon_request(connection, request) + elif isinstance(request, PreviewObjectRequest): + self.handle_preview_object_request(connection, request) + result = None + else: + raise NotImplementedError(f"Unhandled request: {request}") + + comm.send_result(result) + + def handle_contains_data_request(self, conn: Connection, request: ContainsDataRequest) -> bool: + path = request.params.path + if len(path) == 0: + return False + + object_types: Dict[str, Any] = conn.list_object_types() + contains = object_types[path[-1].kind].get("contains", "not_data") + return isinstance(contains, str) and contains == "data" + + def handle_get_icon_request(self, conn: Connection, request: GetIconRequest) -> str: + path = request.params.path + + icon = None + if len(path) == 0: + icon = getattr(conn, "icon", None) + else: + object_types: Dict[str, Any] = conn.list_object_types() + icon = object_types[path[-1].kind].get("icon", "") + + if icon is None: + return "" + return icon + + def handle_list_objects_request( + self, conn: Connection, request: ListObjectsRequest + ) -> List[ConnectionObject]: + return conn.list_objects(request.params.path) + + def handle_list_fields_request( + self, conn: Connection, request: ListFieldsRequest + ) -> List[ConnectionObjectFields]: + return conn.list_fields(request.params.path) + + def handle_preview_object_request( + self, conn: Connection, request: PreviewObjectRequest + ) -> None: + res = conn.preview_object(request.params.path) + title = request.params.path[-1].name + self._kernel.data_explorer_service.register_table(res, title) + + +class SQLite3Connection(Connection): + """ + Support for sqlite3 connections to databases. + """ + + def __init__(self, conn: sqlite3.Connection): + self.conn = conn + self.display_name = "SQLite Connection" + self.host = self._find_path(conn) + self.type = "SQLite" + + def _find_path(self, conn: sqlite3.Connection): + """ + Find the path to the database file or the in-memory database. + The path is used as the `host` property and is important to indentify + a unique sqlite3 connection. + """ + cursor = conn.cursor() + cursor.execute("PRAGMA database_list;") + # this returns a tuple containing row_number, databasename and filepath + row = cursor.fetchone() + return row[2] + + def list_objects(self, path: List[ObjectSchema]): + if len(path) == 0: + # we are at the root of the database, thus we return the list of attached 'databases' + # in general there's only `main` and `temp` but it seems users can attach other + # dbs to the connection + res = self.conn.cursor().execute("PRAGMA database_list;") + schemas: List[ConnectionObject] = [] + for _, name, _ in res.fetchall(): + schemas.append(ConnectionObject({"name": name, "kind": "schema"})) + return schemas + + if len(path) == 1: + # we must have a schema on the path. and we return the list of tables and views + # in that schema + schema = path[0] + if schema.kind != "schema": + raise ValueError( + f"Invalid path. Expected it to include a schema, but got '{schema.kind}'", + f"Path: {path}", + ) + + # https://www.sqlite.org/schematab.html + res = self.conn.cursor().execute( + f""" + SELECT name, type FROM {schema.name}.sqlite_schema WHERE type IN ('table', 'view'); + """ + ) + + tables: List[ConnectionObject] = [] + for name, kind in res.fetchall(): + # We drop the internal schema objects as defined in: + # https://www.sqlite.org/fileformat.html#internal_schema_objects + # ie, objects that start with `sqlite_` + if name.startswith("sqlite_"): + continue + tables.append(ConnectionObject({"name": name, "kind": kind})) + + return tables + + # there is no additional hierarchies in SQLite databases. If we get to this point + # it means the path is invalid. + raise ValueError(f"Path length must be at most 1, but got {len(path)}. Path: {path}") + + def list_fields(self, path: List[ObjectSchema]): + if len(path) != 2: + raise ValueError(f"Path length must be 2, but got {len(path)}. Path: {path}") + + schema, table = path + if schema.kind != "schema" or table.kind not in ["table", "view"]: + raise ValueError( + "Path must include a schema and a table/view in this order.", f"Path: {path}" + ) + + # https://www.sqlite.org/pragma.html#pragma_table_info + res = self.conn.cursor().execute(f"PRAGMA {schema.name}.table_info({table.name});") + fields: List[ConnectionObjectFields] = [] + for _, name, dtype, _, _, _ in res.fetchall(): + fields.append(ConnectionObjectFields({"name": name, "dtype": dtype})) + + return fields + + def disconnect(self): + self.conn.close() + + def preview_object(self, path: List[ObjectSchema]): + + if pd_ is None: + raise ModuleNotFoundError("Pandas is required for previewing SQLite tables.") + + if len(path) != 2: + raise ValueError(f"Path length must be 2, but got {len(path)}. Path: {path}") + + schema, table = path + if schema.kind != "schema" or table.kind not in ["table", "view"]: + raise ValueError( + "Path must include a schema and a table/view in this order.", f"Path: {path}" + ) + + return pd_.read_sql( + f"SELECT * FROM {schema.name}.{table.name} LIMIT 1000;", + self.conn, + ) + + def list_object_types(self): + return { + "table": ConnectionObjectInfo({"contains": "data", "icon": None}), + "view": ConnectionObjectInfo({"contains": "data", "icon": None}), + "schema": ConnectionObjectInfo({"contains": None, "icon": None}), + "database": ConnectionObjectInfo({"contains": None, "icon": None}), + } + + +class SQLAlchemyConnection(Connection): + """ + Support for SQLAlchemy connections to databases. + """ + + def __init__(self, conn): + + self.conn: sqlalchemy.Engine = conn + self.display_name = f"SQLAlchemy ({conn.name})" + self.host = conn.url + self.type = "SQLAlchemy" + + def list_objects(self, path: List[ObjectSchema]): + + if sqlalchemy_ is None: + raise ModuleNotFoundError( + "SQLAlchemy is required for listing objects in SQLAlchemy connections." + ) + + if len(path) == 0: + # we at the root of the database so we return the list of schemas + schemas = sqlalchemy_.inspect(self.conn).get_schema_names() + return [ConnectionObject({"name": name, "kind": "schema"}) for name in schemas] + + if len(path) == 1: + # we must have a schema on the path. and we return the list of tables and views + # in that schema + schema = path[0] + if schema.kind != "schema": + raise ValueError( + f"Invalid path. Expected it to include a schema, but got '{schema.kind}'", + f"Path: {path}", + ) + + tables = sqlalchemy_.inspect(self.conn).get_table_names(schema.name) + views = sqlalchemy_.inspect(self.conn).get_view_names(schema.name) + return [ConnectionObject({"name": name, "kind": "table"}) for name in tables] + [ + ConnectionObject({"name": name, "kind": "view"}) for name in views + ] + + raise ValueError(f"Path length must be at most 1, but got {len(path)}. Path: {path}") + + def list_fields(self, path: List[ObjectSchema]): + + if sqlalchemy_ is None: + raise ModuleNotFoundError( + "SQLAlchemy is required for listing fields in SQLAlchemy connections." + ) + + self._check_table_path(path) + + schema, table = path + fields = sqlalchemy_.inspect(self.conn).get_columns( + schema_name=schema.name, table_name=table.name + ) + return [ + ConnectionObjectFields({"name": field["name"], "dtype": str(field["type"])}) + for field in fields + ] + + def list_object_types(self): + return { + "table": ConnectionObjectInfo({"contains": "data", "icon": None}), + "view": ConnectionObjectInfo({"contains": "data", "icon": None}), + "schema": ConnectionObjectInfo({"contains": None, "icon": None}), + "database": ConnectionObjectInfo({"contains": None, "icon": None}), + } + + def preview_object(self, path: List[ObjectSchema]): + + if sqlalchemy_ is None: + raise ModuleNotFoundError( + "SQLAlchemy is required for previewing objects in SQLAlchemy connections." + ) + + if pd_ is None: + raise ModuleNotFoundError("Pandas is required for previewing SQLAlchemy tables.") + + self._check_table_path(path) + schema, table = path + + table = sqlalchemy_.Table( + table.name, sqlalchemy_.MetaData(), autoload_with=self.conn, schema=schema.name + ) + stmt = sqlalchemy_.sql.select(table).limit(1000) + # using conn.connect() is safer then using the conn directly and is also supported + # with older pandas versions such as 1.5 + return pd_.read_sql(stmt, self.conn.connect()) + + def disconnect(self): + self.conn.dispose() + + def _check_table_path(self, path: List[ObjectSchema]): + if len(path) != 2: + raise ValueError( + f"Invalid path. Length path ({len(path)}) expected to be 2.", f"Path: {path}" + ) + + schema, table = path + if schema.kind != "schema" or table.kind not in ["table", "view"]: + raise ValueError( + "Invalid path. Expected path to contain a schema and a table/view.", + f"But got schema.kind={schema.kind} and table.kind={table.kind}", + ) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections_comm.py new file mode 100644 index 00000000000..d5838605d41 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/connections_comm.py @@ -0,0 +1,250 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from connections.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + + +class ObjectSchema(BaseModel): + """ + ObjectSchema in Schemas + """ + + name: str = Field( + description="Name of the underlying object", + ) + + kind: str = Field( + description="The object type (table, catalog, schema)", + ) + + +class FieldSchema(BaseModel): + """ + FieldSchema in Schemas + """ + + name: str = Field( + description="Name of the field", + ) + + dtype: str = Field( + description="The field data type", + ) + + +@enum.unique +class ConnectionsBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend connections comm. + """ + + # List objects within a data source + ListObjects = "list_objects" + + # List fields of an object + ListFields = "list_fields" + + # Check if an object contains data + ContainsData = "contains_data" + + # Get icon of an object + GetIcon = "get_icon" + + # Preview object data + PreviewObject = "preview_object" + + +class ListObjectsParams(BaseModel): + """ + List objects within a data source, such as schemas, catalogs, tables + and views. + """ + + path: List[ObjectSchema] = Field( + description="The path to object that we want to list children.", + ) + + +class ListObjectsRequest(BaseModel): + """ + List objects within a data source, such as schemas, catalogs, tables + and views. + """ + + params: ListObjectsParams = Field( + description="Parameters to the ListObjects method", + ) + + method: Literal[ConnectionsBackendRequest.ListObjects] = Field( + description="The JSON-RPC method name (list_objects)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ListFieldsParams(BaseModel): + """ + List fields of an object, such as columns of a table or view. + """ + + path: List[ObjectSchema] = Field( + description="The path to object that we want to list fields.", + ) + + +class ListFieldsRequest(BaseModel): + """ + List fields of an object, such as columns of a table or view. + """ + + params: ListFieldsParams = Field( + description="Parameters to the ListFields method", + ) + + method: Literal[ConnectionsBackendRequest.ListFields] = Field( + description="The JSON-RPC method name (list_fields)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ContainsDataParams(BaseModel): + """ + Check if an object contains data, such as a table or view. + """ + + path: List[ObjectSchema] = Field( + description="The path to object that we want to check if it contains data.", + ) + + +class ContainsDataRequest(BaseModel): + """ + Check if an object contains data, such as a table or view. + """ + + params: ContainsDataParams = Field( + description="Parameters to the ContainsData method", + ) + + method: Literal[ConnectionsBackendRequest.ContainsData] = Field( + description="The JSON-RPC method name (contains_data)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class GetIconParams(BaseModel): + """ + Get icon of an object, such as a table or view. + """ + + path: List[ObjectSchema] = Field( + description="The path to object that we want to get the icon.", + ) + + +class GetIconRequest(BaseModel): + """ + Get icon of an object, such as a table or view. + """ + + params: GetIconParams = Field( + description="Parameters to the GetIcon method", + ) + + method: Literal[ConnectionsBackendRequest.GetIcon] = Field( + description="The JSON-RPC method name (get_icon)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class PreviewObjectParams(BaseModel): + """ + Preview object data, such as a table or view. + """ + + path: List[ObjectSchema] = Field( + description="The path to object that we want to preview.", + ) + + +class PreviewObjectRequest(BaseModel): + """ + Preview object data, such as a table or view. + """ + + params: PreviewObjectParams = Field( + description="Parameters to the PreviewObject method", + ) + + method: Literal[ConnectionsBackendRequest.PreviewObject] = Field( + description="The JSON-RPC method name (preview_object)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ConnectionsBackendMessageContent(BaseModel): + comm_id: str + data: Union[ + ListObjectsRequest, + ListFieldsRequest, + ContainsDataRequest, + GetIconRequest, + PreviewObjectRequest, + ] = Field(..., discriminator="method") + + +ObjectSchema.update_forward_refs() + +FieldSchema.update_forward_refs() + +ListObjectsParams.update_forward_refs() + +ListObjectsRequest.update_forward_refs() + +ListFieldsParams.update_forward_refs() + +ListFieldsRequest.update_forward_refs() + +ContainsDataParams.update_forward_refs() + +ContainsDataRequest.update_forward_refs() + +GetIconParams.update_forward_refs() + +GetIconRequest.update_forward_refs() + +PreviewObjectParams.update_forward_refs() + +PreviewObjectRequest.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer.py new file mode 100644 index 00000000000..56bca99fcf2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer.py @@ -0,0 +1,744 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +# flake8: ignore E203 +# pyright: reportOptionalMemberAccess=false + +import logging +import operator +import uuid +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, +) + +import comm + +from .access_keys import decode_access_key +from .data_explorer_comm import ( + ColumnFilter, + ColumnFilterCompareOp, + ColumnSchema, + ColumnSchemaTypeDisplay, + ColumnSortKey, + DataExplorerBackendMessageContent, + DataExplorerFrontendEvent, + FilterResult, + GetColumnProfileProfileType, + GetColumnProfileRequest, + GetDataValuesRequest, + GetSchemaRequest, + GetStateRequest, + SchemaUpdateParams, + SetColumnFiltersRequest, + SetSortColumnsRequest, + TableData, + TableSchema, + TableShape, + TableState, +) +from .positron_comm import CommMessage, PositronComm +from .third_party import pd_ + +if TYPE_CHECKING: + import pandas as pd + + # import polars as pl + # import pyarrow as pa + + +logger = logging.getLogger(__name__) + + +PathKey = Tuple[str, ...] + + +class DataExplorerTableView: + """ + Interface providing a consistent wrapper around different data + frame / table types for the data explorer for serving requests from + the front end. This includes pandas.DataFrame, polars.DataFrame, + pyarrow.Table, and any others + """ + + def __init__( + self, + table, + filters: Optional[List[ColumnFilter]], + sort_keys: Optional[List[ColumnSortKey]], + ): + # Note: we must not ever modify the user's data + self.table = table + + self.filters = filters if filters is not None else [] + self.sort_keys = sort_keys if sort_keys is not None else [] + + self._need_recompute = len(self.filters) > 0 or len(self.sort_keys) > 0 + + def invalidate_computations(self): + raise NotImplementedError + + def ui_should_update_schema(self, new_table): + raise NotImplementedError + + def ui_should_update_data(self, new_table): + raise NotImplementedError + + def _recompute_if_needed(self) -> bool: + if self._need_recompute: + self._recompute() + self._need_recompute = False + return True + else: + return False + + def _recompute(self): + raise NotImplementedError + + def get_schema(self, request: GetSchemaRequest): + return self._get_schema(request.params.start_index, request.params.num_columns).dict() + + def get_data_values(self, request: GetDataValuesRequest): + self._recompute_if_needed() + return self._get_data_values( + request.params.row_start_index, + request.params.num_rows, + request.params.column_indices, + ).dict() + + def set_column_filters(self, request: SetColumnFiltersRequest): + return self._set_column_filters(request.params.filters) + + def set_sort_columns(self, request: SetSortColumnsRequest): + self.sort_keys = request.params.sort_keys + + if not self._recompute_if_needed(): + # If a re-filter is pending, then it will automatically + # trigger a sort + self._sort_data() + + def get_column_profile(self, request: GetColumnProfileRequest): + self._recompute_if_needed() + return self._get_column_profile(request.params.profile_type, request.params.column_index) + + def get_state(self, request: GetStateRequest): + return self._get_state().dict() + + def _get_schema(self, column_start: int, num_columns: int) -> TableSchema: + raise NotImplementedError + + def _get_data_values( + self, + row_start: int, + num_rows: int, + column_indices: Sequence[int], + ) -> TableData: + raise NotImplementedError + + def _set_column_filters(self, filters: List[ColumnFilter]) -> FilterResult: + raise NotImplementedError + + def _sort_data(self) -> None: + raise NotImplementedError + + def _get_column_profile( + self, + profile_type: GetColumnProfileProfileType, + column_index: int, + ) -> None: + raise NotImplementedError + + def _get_state(self) -> TableState: + raise NotImplementedError + + +def _pandas_format_values(col): + import pandas.io.formats.format as fmt + + try: + return fmt.format_array(col._values, None, leading_space=False) + except Exception: + logger.warning(f"Failed to format column '{col.name}'") + return col.astype(str).tolist() + + +class PandasView(DataExplorerTableView): + TYPE_DISPLAY_MAPPING = { + "integer": "number", + "int8": "number", + "int16": "number", + "int32": "number", + "int64": "number", + "uint8": "number", + "uint16": "number", + "uint32": "number", + "uint64": "number", + "floating": "number", + "float16": "number", + "float32": "number", + "float64": "number", + "mixed-integer": "number", + "mixed-integer-float": "number", + "mixed": "unknown", + "decimal": "number", + "complex": "number", + "categorical": "categorical", + "boolean": "boolean", + "datetime64": "datetime", + "datetime64[ns]": "datetime", + "datetime": "datetime", + "date": "date", + "time": "time", + "bytes": "string", + "string": "string", + } + + def __init__( + self, + table, + filters: Optional[List[ColumnFilter]], + sort_keys: Optional[List[ColumnSortKey]], + ): + super().__init__(table, filters, sort_keys) + + self._dtypes = None + + # Maintain a mapping of column index to inferred dtype for any + # object columns, to avoid recomputing. If the underlying + # object is changed, this needs to be reset + self._inferred_dtypes = {} + + # NumPy array of selected ("true") indices using filters. If + # there are also sort keys, we first filter the unsorted data, + # and then sort the filtered data only, for the optimistic + # case that a low-selectivity filters yields less data to sort + self.filtered_indices = None + + # NumPy array of selected AND reordered indices + # (e.g. including any sorting). If there are no sort keys and + # only filters, then this should be the same as + # self.filtered_indices + self.view_indices = None + + def invalidate_computations(self): + self.filtered_indices = self.view_indices = None + self._need_recompute = True + + def ui_should_update_schema(self, new_table) -> Tuple[bool, bool]: + # Add smarter logic here later, but for now always update the + # schema + + if self.table.columns.equals(new_table.columns): + update_schema = False + for i in range(len(self.table.columns)): + if self.table.iloc[:, i].dtype != new_table.iloc[:, i].dtype: + update_schema = True + break + else: + update_schema = True + + discard_state = update_schema + return update_schema, discard_state + + def ui_should_update_data(self, new_table): + # If the variables service says the variable has been updated + # or is uncertain + return True + + def _recompute(self): + # Resetting the column filters will trigger filtering AND + # sorting + self._set_column_filters(self.filters) + + @property + def dtypes(self): + if self._dtypes is None: + self._dtypes = self.table.dtypes + return self._dtypes + + def _get_schema(self, column_start: int, num_columns: int) -> TableSchema: + from pandas.api.types import infer_dtype + + # TODO: pandas MultiIndex columns + # TODO: time zone for datetimetz datetime64[ns] types + columns_slice = self.table.columns[column_start : column_start + num_columns] + dtypes_slice = self.dtypes.iloc[column_start : column_start + num_columns] + column_schemas = [] + + for i, (c, dtype) in enumerate(zip(columns_slice, dtypes_slice)): + if dtype == object: + column_index = i + column_start + if column_index not in self._inferred_dtypes: + self._inferred_dtypes[column_index] = infer_dtype( + self.table.iloc[:, column_index] + ) + type_name = self._inferred_dtypes[column_index] + else: + # TODO: more sophisticated type mapping + type_name = str(dtype) + + type_display = self.TYPE_DISPLAY_MAPPING.get(type_name, "unknown") + + col_schema = ColumnSchema( + column_name=str(c), + type_name=type_name, + type_display=ColumnSchemaTypeDisplay(type_display), + ) + column_schemas.append(col_schema) + + return TableSchema(columns=column_schemas) + + def _get_data_values( + self, row_start: int, num_rows: int, column_indices: Sequence[int] + ) -> TableData: + formatted_columns = [] + + column_indices = sorted(column_indices) + + # TODO(wesm): This value formatting strategy produces output + # that is not the same as what users see in the console. I + # will have to look for the right pandas function that deals + # with value formatting + columns = [] + for i in column_indices: + # The UI has requested data beyond the end of the table, + # so we stop here + if i >= len(self.table.columns): + break + columns.append(self.table.iloc[:, i]) + + formatted_columns = [] + + if self.view_indices is not None: + # If the table is either filtered or sorted, use a slice + # the view_indices to select the virtual range of values for the grid + view_slice = self.view_indices[row_start : row_start + num_rows] + columns = [col.take(view_slice) for col in columns] + indices = self.table.index.take(view_slice) + else: + # No filtering or sorting, just slice directly + indices = self.table.index[row_start : row_start + num_rows] + columns = [col.iloc[row_start : row_start + num_rows] for col in columns] + + formatted_columns = [_pandas_format_values(col) for col in columns] + + # Currently, we format MultiIndex in its flat tuple + # representation. In the future we will return multiple lists + # of row labels to be formatted more nicely in the UI + if isinstance(self.table.index, pd_.MultiIndex): + indices = indices.to_flat_index() + row_labels = [_pandas_format_values(indices)] + return TableData(columns=formatted_columns, row_labels=row_labels) + + def _update_view_indices(self): + if len(self.sort_keys) == 0: + self.view_indices = self.filtered_indices + else: + # If we have just applied a new filter, we now resort to + # reflect the filtered_indices that have just been updated + self._sort_data() + + def _set_column_filters(self, filters) -> FilterResult: + self.filters = filters + + if len(filters) == 0: + # Simply reset if empty filter set passed + self.filtered_indices = None + self._update_view_indices() + return FilterResult(selected_num_rows=len(self.table)) + + # Evaluate all the filters and AND them together + combined_mask = None + for filt in filters: + single_mask = _pandas_eval_filter(self.table, filt) + if combined_mask is None: + combined_mask = single_mask + else: + combined_mask &= single_mask + + self.filtered_indices = combined_mask.nonzero()[0] + + # Update the view indices, re-sorting if needed + self._update_view_indices() + return FilterResult(selected_num_rows=len(self.filtered_indices)) + + def _sort_data(self) -> None: + from pandas.core.sorting import lexsort_indexer, nargsort + + if len(self.sort_keys) == 1: + key = self.sort_keys[0] + column = self.table.iloc[:, key.column_index] + if self.filtered_indices is not None: + # pandas's univariate null-friendly argsort (computes + # the sorting indices). Mergesort is needed to make it + # stable + sort_indexer = nargsort( + column.take(self.filtered_indices), + kind="mergesort", + ascending=key.ascending, + ) + # Reorder the filtered_indices to provide the + # filtered, sorted virtual view for future data + # requests + self.view_indices = self.filtered_indices.take(sort_indexer) + else: + # Data is not filtered + self.view_indices = nargsort(column, kind="mergesort", ascending=key.ascending) + elif len(self.sort_keys) > 1: + # Multiple sorting keys + cols_to_sort = [] + directions = [] + for key in self.sort_keys: + column = self.table.iloc[:, key.column_index] + if self.filtered_indices is not None: + column = column.take(self.filtered_indices) + cols_to_sort.append(column) + directions.append(key.ascending) + + # lexsort_indexer uses np.lexsort and so is always stable + sort_indexer = lexsort_indexer(cols_to_sort, directions) + if self.filtered_indices is not None: + # Create the filtered, sorted virtual view indices + self.view_indices = self.filtered_indices.take(sort_indexer) + else: + self.view_indices = sort_indexer + else: + # This will be None if the data is unfiltered + self.view_indices = self.filtered_indices + + def _get_column_profile( + self, profile_type: GetColumnProfileProfileType, column_index: int + ) -> None: + pass + + def _get_state(self) -> TableState: + return TableState( + table_shape=TableShape(num_rows=self.table.shape[0], num_columns=self.table.shape[1]), + filters=self.filters, + sort_keys=self.sort_keys, + ) + + +COMPARE_OPS = { + ColumnFilterCompareOp.Gt: operator.gt, + ColumnFilterCompareOp.GtEq: operator.ge, + ColumnFilterCompareOp.Lt: operator.lt, + ColumnFilterCompareOp.LtEq: operator.le, + ColumnFilterCompareOp.Eq: operator.eq, + ColumnFilterCompareOp.NotEq: operator.ne, +} + + +def _pandas_eval_filter(df: "pd.DataFrame", filt: ColumnFilter): + col = df.iloc[:, filt.column_index] + mask = None + if filt.filter_type == "compare": + if filt.compare_op not in COMPARE_OPS: + raise ValueError(f"Unsupported filter type: {filt.compare_op}") + op = COMPARE_OPS[filt.compare_op] + # Let pandas decide how to coerce the string we got from the UI + dummy = pd_.Series([filt.compare_value]).astype(col.dtype) + + # pandas comparison filters return False for null values + mask = op(col, dummy.iloc[0]) + elif filt.filter_type == "isnull": + mask = col.isnull() + elif filt.filter_type == "notnull": + mask = col.notnull() + elif filt.filter_type == "set_membership": + boxed_values = pd_.Series(filt.set_member_values).astype(col.dtype) + # IN + mask = col.isin(boxed_values) + if not filt.set_member_inclusive: + # NOT-IN + mask = ~mask + elif filt.filter_type == "search": + raise NotImplementedError + + # TODO(wesm): is it possible for there to be null values in the mask? + return mask.to_numpy() + + +class PolarsView(DataExplorerTableView): + pass + + +class PyArrowView(DataExplorerTableView): + pass + + +def _get_table_view(table, filters=None, sort_keys=None): + return PandasView(table, filters, sort_keys) + + +def _value_type_is_supported(value): + return isinstance(value, pd_.DataFrame) + + +class DataExplorerService: + def __init__(self, comm_target: str) -> None: + self.comm_target = comm_target + + # Maps comm_id for each dataset being viewed to PositronComm + self.comms: Dict[str, PositronComm] = {} + self.table_views: Dict[str, DataExplorerTableView] = {} + + # Maps from variable path to set of comm_ids serving DE + # requests. The user could have multiple DE windows open + # referencing the same dataset. + self.path_to_comm_ids: Dict[PathKey, Set[str]] = {} + + # Mapping from comm_id to the corresponding variable path, if any + self.comm_id_to_path: Dict[str, PathKey] = {} + + # Called when comm closure is initiated from the backend + self._close_callback = None + + def shutdown(self) -> None: + for comm_id in list(self.comms.keys()): + self._close_explorer(comm_id) + + def register_table( + self, + table, + title, + variable_path: Optional[List[str]] = None, + comm_id=None, + ): + """ + Set up a new comm and data explorer table query wrapper to + handle requests and manage state. + + Parameters + ---------- + table : table-like object + title : str + Display name in UI + variable_path : List[str], default None + If the data explorer references an assigned variable in + the user namespace, we track it so that namespace changes + (variable deletions or assignments) can reflect the + appropriate change on active data explorer tabs and make + sure e.g. that we do not hold onto memory inappropriately. + comm_id : str, default None + A specific comm identifier to use, otherwise generate a + random uuid. + + Returns + ------- + comm_id : str + The associated (generated or passed in) comm_id + """ + if type(table).__name__ != "DataFrame": + raise TypeError(type(table)) + + if comm_id is None: + comm_id = str(uuid.uuid4()) + + self.table_views[comm_id] = _get_table_view(table) + + base_comm = comm.create_comm( + target_name=self.comm_target, + comm_id=comm_id, + data={"title": title}, + ) + + def close_callback(msg): + # Notify via callback that the comm_id has closed + if self._close_callback: + self._close_callback(comm_id) + + self._close_explorer(comm_id) + + base_comm.on_close(close_callback) + + if variable_path is not None: + if not isinstance(variable_path, list): + raise ValueError(variable_path) + + key = tuple(variable_path) + self.comm_id_to_path[comm_id] = key + + if key in self.path_to_comm_ids: + self.path_to_comm_ids[key].add(comm_id) + else: + self.path_to_comm_ids[key] = {comm_id} + + wrapped_comm = PositronComm(base_comm) + wrapped_comm.on_msg(self.handle_msg, DataExplorerBackendMessageContent) + self.comms[comm_id] = wrapped_comm + + def _close_explorer(self, comm_id: str): + try: + # This is idempotent, so if the comm is already closed, we + # can call this again. This will also notify the UI with + # the comm_close event + self.comms[comm_id].close() + except Exception as err: + logger.warning(err, exc_info=True) + pass + + del self.comms[comm_id] + del self.table_views[comm_id] + + if comm_id in self.comm_id_to_path: + path = self.comm_id_to_path[comm_id] + self.path_to_comm_ids[path].remove(comm_id) + del self.comm_id_to_path[comm_id] + + def on_comm_closed(self, callback: Callable[[str], None]): + """ + Register a callback to invoke when a comm was closed in the backend. + """ + self._close_callback = callback + + def variable_has_active_explorers(self, variable_name): + # Check if any data explorer has been opened with the indicated + # variable as a path prefix + return len(self.get_paths_for_variable(variable_name)) > 0 + + def get_paths_for_variable(self, variable_name): + result = [] + for path, comm_ids in self.path_to_comm_ids.items(): + key = decode_access_key(path[0]) + if key == variable_name and len(comm_ids) > 0: + # An active data explorer shares a path prefix + result.append(path) + continue + return result + + def handle_variable_deleted(self, variable_name): + """ + If a variable with active data explorers is deleted, we must + shut down and delete unneeded state and object references + stored here. + """ + affected_paths = self.get_paths_for_variable(variable_name) + for path in affected_paths: + for comm_id in list(self.path_to_comm_ids[path]): + self._close_explorer(comm_id) + + def handle_variable_updated(self, variable_name, new_variable): + affected_paths = self.get_paths_for_variable(variable_name) + for path in affected_paths: + for comm_id in list(self.path_to_comm_ids[path]): + self._update_explorer_for_comm(comm_id, path, new_variable) + + def _update_explorer_for_comm(self, comm_id: str, path: PathKey, new_variable): + """ + If a variable is updated, we have to handle the different scenarios: + + * The variable type is the same and the schema is the same, + but the data is possibly different (e.g. if the object is + mutable and large, this will happen every time the user + performs an action). Depending on whether the object + reference has changed, we can reason about what state needs + to be invalidated on a case by case basis (for example: + sort/filter indices will need to be recomputed generally). + * The variable type is the same and the schema is + different. Depending on whether the schema or column names + are different, we may signal the UI to do a "soft" update + (leaving the cursor position and UI state as is) or a hard + update (resetting everything to its initial state). We will + have to do some work to decide whether to preserve filters + and sorts (if the sorts and filters are still valid after + the schema change). + * The variable type is different but still supported in the + data explorer. + * The variable type is different and NOT supported in the data + explorer. + """ + from .variables import _resolve_value_from_path + + comm = self.comms[comm_id] + table_view = self.table_views[comm_id] + + # When detecting namespace assignments or changes, the first + # level of the path has already been resolved. If there is a + # data explorer open for a nested value, then we need to use + # the same variables inspection logic to resolve it here. + if len(path) > 1: + is_found, new_table = _resolve_value_from_path(new_variable, path[1:]) + if not is_found: + raise KeyError(f"Path {', '.join(path)} not found in value") + else: + new_table = new_variable + + if not _value_type_is_supported(new_table): + # If a variable has been assigned a type that is not + # supported in the existing data explorer tab, we should + # tear down everything here and let the comm_closed event + # signal the UI to make the explorer that the user may be + # looking at invalid. + return self._close_explorer(comm_id) + + def _fire_data_update(): + comm.send_event(DataExplorerFrontendEvent.DataUpdate.value, {}) + + def _fire_schema_update(discard_state=False): + msg = SchemaUpdateParams(discard_state=discard_state) + comm.send_event(DataExplorerFrontendEvent.SchemaUpdate.value, msg.dict()) + + if type(new_table) is not type(table_view.table): # noqa: E721 + # Data type has changed. For now, we will signal the UI to + # reset its entire state: sorting keys, filters, etc. and + # start over. At some point we can return here and + # selectively preserve state if we feel it is safe enough + # to do so. + self.table_views[comm_id] = _get_table_view(new_table) + return _fire_schema_update(discard_state=True) + + # New value for data explorer is the same. For now, we just + # invalidate the stored computatations and fire a data update, + # but we'll come back here and improve this for immutable / + # copy-on-write tables like Arrow and Polars + # + # TODO: address pathological pandas case where columns have + # been modified + if new_table is table_view.table: + # The object references are the same, but we were probably + # unsure about whether the data has been mutated, so we + # invalidate the view's cached computations + # (e.g. filter/sort indices) so they get recomputed + table_view.invalidate_computations() + return _fire_data_update() + + ( + should_update_schema, + should_discard_state, + ) = table_view.ui_should_update_schema(new_table) + + # The schema is the same, but the data has changed. We'll just + # set a new table view and preserve the view state and be done + # with it. + self.table_views[comm_id] = _get_table_view( + new_table, + filters=table_view.filters, + sort_keys=table_view.sort_keys, + ) + + if should_update_schema: + _fire_schema_update(discard_state=should_discard_state) + else: + _fire_data_update() + + def handle_msg(self, msg: CommMessage[DataExplorerBackendMessageContent], raw_msg): + """ + Handle messages received from the client via the + positron.data_explorer comm. + """ + comm_id = msg.content.comm_id + request = msg.content.data + + comm = self.comms[comm_id] + table = self.table_views[comm_id] + + result = getattr(table, request.method.value)(request) + comm.send_result(result) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer_comm.py new file mode 100644 index 00000000000..25d1133d4d9 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/data_explorer_comm.py @@ -0,0 +1,653 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from data_explorer.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + + +@enum.unique +class GetColumnProfileProfileType(str, enum.Enum): + """ + Possible values for ProfileType in GetColumnProfile + """ + + Freqtable = "freqtable" + + Histogram = "histogram" + + +@enum.unique +class ColumnSchemaTypeDisplay(str, enum.Enum): + """ + Possible values for TypeDisplay in ColumnSchema + """ + + Number = "number" + + Boolean = "boolean" + + String = "string" + + Date = "date" + + Datetime = "datetime" + + Time = "time" + + Array = "array" + + Struct = "struct" + + Unknown = "unknown" + + +@enum.unique +class ColumnFilterFilterType(str, enum.Enum): + """ + Possible values for FilterType in ColumnFilter + """ + + Isnull = "isnull" + + Notnull = "notnull" + + Compare = "compare" + + SetMembership = "set_membership" + + Search = "search" + + +@enum.unique +class ColumnFilterCompareOp(str, enum.Enum): + """ + Possible values for CompareOp in ColumnFilter + """ + + Eq = "=" + + NotEq = "!=" + + Lt = "<" + + LtEq = "<=" + + Gt = ">" + + GtEq = ">=" + + +@enum.unique +class ColumnFilterSearchType(str, enum.Enum): + """ + Possible values for SearchType in ColumnFilter + """ + + Contains = "contains" + + Startswith = "startswith" + + Endswith = "endswith" + + Regex = "regex" + + +class TableSchema(BaseModel): + """ + The schema for a table-like object + """ + + columns: List[ColumnSchema] = Field( + description="Schema for each column in the table", + ) + + +class TableData(BaseModel): + """ + Table values formatted as strings + """ + + columns: List[List[str]] = Field( + description="The columns of data", + ) + + row_labels: Optional[List[List[str]]] = Field( + default=None, + description="Zero or more arrays of row labels", + ) + + +class FilterResult(BaseModel): + """ + The result of applying filters to a table + """ + + selected_num_rows: int = Field( + description="Number of rows in table after applying filters", + ) + + +class ProfileResult(BaseModel): + """ + Result of computing column profile + """ + + null_count: int = Field( + description="Number of null values in column", + ) + + min_value: Optional[str] = Field( + default=None, + description="Minimum value as string computed as part of histogram", + ) + + max_value: Optional[str] = Field( + default=None, + description="Maximum value as string computed as part of histogram", + ) + + mean_value: Optional[str] = Field( + default=None, + description="Average value as string computed as part of histogram", + ) + + histogram_bin_sizes: Optional[List[int]] = Field( + default=None, + description="Absolute count of values in each histogram bin", + ) + + histogram_bin_width: Optional[float] = Field( + default=None, + description="Absolute floating-point width of a histogram bin", + ) + + histogram_quantiles: Optional[List[ColumnQuantileValue]] = Field( + default=None, + description="Quantile values computed from histogram bins", + ) + + freqtable_counts: Optional[List[FreqtableCounts]] = Field( + default=None, + description="Counts of distinct values in column", + ) + + freqtable_other_count: Optional[int] = Field( + default=None, + description="Number of other values not accounted for in counts", + ) + + +class FreqtableCounts(BaseModel): + """ + Items in FreqtableCounts + """ + + value: str = Field( + description="Stringified value", + ) + + count: int = Field( + description="Number of occurrences of value", + ) + + +class TableState(BaseModel): + """ + The current backend table state + """ + + table_shape: TableShape = Field( + description="Provides number of rows and columns in table", + ) + + filters: List[ColumnFilter] = Field( + description="The set of currently applied filters", + ) + + sort_keys: List[ColumnSortKey] = Field( + description="The set of currently applied sorts", + ) + + +class TableShape(BaseModel): + """ + Provides number of rows and columns in table + """ + + num_rows: int = Field( + description="Numbers of rows in the unfiltered dataset", + ) + + num_columns: int = Field( + description="Number of columns in the unfiltered dataset", + ) + + +class ColumnSchema(BaseModel): + """ + Schema for a column in a table + """ + + column_name: str = Field( + description="Name of column as UTF-8 string", + ) + + type_name: str = Field( + description="Exact name of data type used by underlying table", + ) + + type_display: ColumnSchemaTypeDisplay = Field( + description="Canonical Positron display name of data type", + ) + + description: Optional[str] = Field( + default=None, + description="Column annotation / description", + ) + + children: Optional[List[ColumnSchema]] = Field( + default=None, + description="Schema of nested child types", + ) + + precision: Optional[int] = Field( + default=None, + description="Precision for decimal types", + ) + + scale: Optional[int] = Field( + default=None, + description="Scale for decimal types", + ) + + timezone: Optional[str] = Field( + default=None, + description="Time zone for timestamp with time zone", + ) + + type_size: Optional[int] = Field( + default=None, + description="Size parameter for fixed-size types (list, binary)", + ) + + +class ColumnFilter(BaseModel): + """ + Specifies a table row filter based on a column's values + """ + + filter_id: str = Field( + description="Unique identifier for this filter", + ) + + filter_type: ColumnFilterFilterType = Field( + description="Type of filter to apply", + ) + + column_index: int = Field( + description="Column index to apply filter to", + ) + + compare_op: Optional[ColumnFilterCompareOp] = Field( + default=None, + description="String representation of a binary comparison", + ) + + compare_value: Optional[str] = Field( + default=None, + description="A stringified column value for a comparison filter", + ) + + set_member_values: Optional[List[str]] = Field( + default=None, + description="Array of column values for a set membership filter", + ) + + set_member_inclusive: Optional[bool] = Field( + default=None, + description="Filter by including only values passed (true) or excluding (false)", + ) + + search_type: Optional[ColumnFilterSearchType] = Field( + default=None, + description="Type of search to perform", + ) + + search_term: Optional[str] = Field( + default=None, + description="String value/regex to search for in stringified data", + ) + + search_case_sensitive: Optional[bool] = Field( + default=None, + description="If true, do a case-sensitive search, otherwise case-insensitive", + ) + + +class ColumnQuantileValue(BaseModel): + """ + An exact or approximate quantile value from a column + """ + + q: float = Field( + description="Quantile number (percentile). E.g. 1 for 1%, 50 for median", + ) + + value: str = Field( + description="Stringified quantile value", + ) + + exact: bool = Field( + description="Whether value is exact or approximate (computed from binned data or sketches)", + ) + + +class ColumnSortKey(BaseModel): + """ + Specifies a column to sort by + """ + + column_index: int = Field( + description="Column index to sort by", + ) + + ascending: bool = Field( + description="Sort order, ascending (true) or descending (false)", + ) + + +@enum.unique +class DataExplorerBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend data_explorer comm. + """ + + # Request schema + GetSchema = "get_schema" + + # Get a rectangle of data values + GetDataValues = "get_data_values" + + # Set column filters + SetColumnFilters = "set_column_filters" + + # Set or clear sort-by-column(s) + SetSortColumns = "set_sort_columns" + + # Get a column profile + GetColumnProfile = "get_column_profile" + + # Get the state + GetState = "get_state" + + +class GetSchemaParams(BaseModel): + """ + Request full schema for a table-like object + """ + + start_index: int = Field( + description="First column schema to fetch (inclusive)", + ) + + num_columns: int = Field( + description="Number of column schemas to fetch from start index. May extend beyond end of table", + ) + + +class GetSchemaRequest(BaseModel): + """ + Request full schema for a table-like object + """ + + params: GetSchemaParams = Field( + description="Parameters to the GetSchema method", + ) + + method: Literal[DataExplorerBackendRequest.GetSchema] = Field( + description="The JSON-RPC method name (get_schema)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class GetDataValuesParams(BaseModel): + """ + Request a rectangular subset of data with values formatted as strings + """ + + row_start_index: int = Field( + description="First row to fetch (inclusive)", + ) + + num_rows: int = Field( + description="Number of rows to fetch from start index. May extend beyond end of table", + ) + + column_indices: List[int] = Field( + description="Indices to select, which can be a sequential, sparse, or random selection", + ) + + +class GetDataValuesRequest(BaseModel): + """ + Request a rectangular subset of data with values formatted as strings + """ + + params: GetDataValuesParams = Field( + description="Parameters to the GetDataValues method", + ) + + method: Literal[DataExplorerBackendRequest.GetDataValues] = Field( + description="The JSON-RPC method name (get_data_values)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class SetColumnFiltersParams(BaseModel): + """ + Set or clear column filters on table, replacing any previous filters + """ + + filters: List[ColumnFilter] = Field( + description="Zero or more filters to apply", + ) + + +class SetColumnFiltersRequest(BaseModel): + """ + Set or clear column filters on table, replacing any previous filters + """ + + params: SetColumnFiltersParams = Field( + description="Parameters to the SetColumnFilters method", + ) + + method: Literal[DataExplorerBackendRequest.SetColumnFilters] = Field( + description="The JSON-RPC method name (set_column_filters)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class SetSortColumnsParams(BaseModel): + """ + Set or clear the columns(s) to sort by, replacing any previous sort + columns + """ + + sort_keys: List[ColumnSortKey] = Field( + description="Pass zero or more keys to sort by. Clears any existing keys", + ) + + +class SetSortColumnsRequest(BaseModel): + """ + Set or clear the columns(s) to sort by, replacing any previous sort + columns + """ + + params: SetSortColumnsParams = Field( + description="Parameters to the SetSortColumns method", + ) + + method: Literal[DataExplorerBackendRequest.SetSortColumns] = Field( + description="The JSON-RPC method name (set_sort_columns)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class GetColumnProfileParams(BaseModel): + """ + Requests a statistical summary or data profile for a column + """ + + profile_type: GetColumnProfileProfileType = Field( + description="The type of analytical column profile", + ) + + column_index: int = Field( + description="Column index to compute profile for", + ) + + +class GetColumnProfileRequest(BaseModel): + """ + Requests a statistical summary or data profile for a column + """ + + params: GetColumnProfileParams = Field( + description="Parameters to the GetColumnProfile method", + ) + + method: Literal[DataExplorerBackendRequest.GetColumnProfile] = Field( + description="The JSON-RPC method name (get_column_profile)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class GetStateRequest(BaseModel): + """ + Request the current table state (applied filters and sort columns) + """ + + method: Literal[DataExplorerBackendRequest.GetState] = Field( + description="The JSON-RPC method name (get_state)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class DataExplorerBackendMessageContent(BaseModel): + comm_id: str + data: Union[ + GetSchemaRequest, + GetDataValuesRequest, + SetColumnFiltersRequest, + SetSortColumnsRequest, + GetColumnProfileRequest, + GetStateRequest, + ] = Field(..., discriminator="method") + + +@enum.unique +class DataExplorerFrontendEvent(str, enum.Enum): + """ + An enumeration of all the possible events that can be sent to the frontend data_explorer comm. + """ + + # Reset after a schema change + SchemaUpdate = "schema_update" + + # Clear cache and request fresh data + DataUpdate = "data_update" + + +class SchemaUpdateParams(BaseModel): + """ + Reset after a schema change + """ + + discard_state: bool = Field( + description="If true, the UI should discard the filter/sort state.", + ) + + +TableSchema.update_forward_refs() + +TableData.update_forward_refs() + +FilterResult.update_forward_refs() + +ProfileResult.update_forward_refs() + +FreqtableCounts.update_forward_refs() + +TableState.update_forward_refs() + +TableShape.update_forward_refs() + +ColumnSchema.update_forward_refs() + +ColumnFilter.update_forward_refs() + +ColumnQuantileValue.update_forward_refs() + +ColumnSortKey.update_forward_refs() + +GetSchemaParams.update_forward_refs() + +GetSchemaRequest.update_forward_refs() + +GetDataValuesParams.update_forward_refs() + +GetDataValuesRequest.update_forward_refs() + +SetColumnFiltersParams.update_forward_refs() + +SetColumnFiltersRequest.update_forward_refs() + +SetSortColumnsParams.update_forward_refs() + +SetSortColumnsRequest.update_forward_refs() + +GetColumnProfileParams.update_forward_refs() + +GetColumnProfileRequest.update_forward_refs() + +GetStateRequest.update_forward_refs() + +SchemaUpdateParams.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/__init__.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/__init__.py new file mode 100644 index 00000000000..2af04c4814f --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/__init__.py @@ -0,0 +1,15 @@ +# +# Copyright (C) 2023 Posit Software, PBC. All rights reserved. +# +from .._vendor.docstring_to_markdown.google import google_to_markdown, looks_like_google +from .._vendor.docstring_to_markdown.rst import rst_to_markdown +from .epytext import epytext_to_markdown, looks_like_epytext + + +def convert_docstring(docstring: str) -> str: + if looks_like_google(docstring): + return google_to_markdown(docstring) + if looks_like_epytext(docstring): + return epytext_to_markdown(docstring) + + return rst_to_markdown(docstring) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/epytext.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/epytext.py new file mode 100644 index 00000000000..044d0510966 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/docstrings/epytext.py @@ -0,0 +1,290 @@ +# +# Copyright (C) 2023 Posit Software, PBC. All rights reserved. +# + +import re +from textwrap import dedent +from typing import List + +# from markdown_to_docstring.google.ESCAPE_RULES +ESCAPE_RULES = { + # Avoid Markdown in magic methods or filenames like __init__.py + r"__(?P\S+)__": r"\_\_\g\_\_", +} + +EPYTEXT_FIELDS: List[str] = [ + "@param", + "@type", + "@keyword", + "@ivar", # class instance variable + "@cvar", # static class + "@var", + "@group", + "@sort", + "@todo", + "@return", + "@rtype", # return type + "@raise", + "@see", + "@note", + "@attention", + "@bug", + "@warning", + "@version", + "@deprecated", + "@since", + "@change", + "@permission", + "@requires", + "@precondition", + "@postcondition", + "@invariant", + "@author", + "@organization", + "@copyright", + "@license", + "@contact", + "@summary", +] + + +# adapted from docstring_to_markdown.google.Section +class Section: + def __init__(self, name: str, content: str) -> None: + # --- Start Positron --- + name = dedent(name) + + # epytext is in the style of "@something: " or "@something name: " + # in either case, we can split on the first " " + split = name[1:].split(" ", 1) + + self.name = split[0].capitalize() if split[0].endswith(":") else split[0].capitalize() + ":" + self.content = "" + + # epytext should only ever have 1 arg name per section + self.arg_name = "" + # add any information from the first line to the rest of the content + + self._parse(split[1] + content) + # --- End Positron --- + + def _parse(self, content: str) -> None: + content = content.rstrip("\n") + + parts = [] + cur_part = [] + + for line in content.split("\n"): + # --- Start Positron --- + line = line.replace(" ", " ", 1) + line = line.replace("\t", " ", 1) + # --- End Positron --- + + if line.startswith(" "): + # Continuation from a multiline description + cur_part.append(line) + continue + + if cur_part: + # Leaving multiline description + parts.append(cur_part) + cur_part = [line] + else: + # Entering new description part + cur_part.append(line) + + # Last part + parts.append(cur_part) + # Format section + for part in parts: + indentation = "" + skip_first = False + + if ":" in part[0]: + spl = part[0].split(":") + + arg = spl[0] + # --- Start Positron --- + self.arg_name = arg + # --- End Positron --- + + description = ":".join(spl[1:]).lstrip() + # --- Start Positron --- + # indentation rules are different in epytext + # indentation = (len(arg) + 6) * " " + # --- End Positron --- + + if description: + # --- Start Positron --- + # arg and description are on the same line + # for epytext docstrings + self.content += "- `{}`: {}".format(arg, description).rstrip() + skip_first = True + else: + self.content += " {}\n".format(arg) + else: + self.content += "{}\n".format(part[0]) + # --- End Positron --- + + for n, line in enumerate(part[1:]): + if skip_first and n == 0: + # This ensures that indented params get moved to the + # previous line + # --- Start Positron --- + # previous lines lose spaces between words + self.content += " {}\n".format(line.lstrip()) + # --- End Positron --- + continue + + self.content += "{}{}\n".format(indentation, line.lstrip()) + + # remove trailing whitespaces and trailing newlines + self.content = self.content.rstrip("\n").rstrip() + + def as_markdown(self) -> str: + return "#### {}\n\n{}\n\n".format(self.name, self.content) + + +# similar to docstring_to_markdown.google.GoogleDocstring +# --- Start Positron --- +class EpytextDocstring: + # --- End Positron --- + def __init__(self, docstring: str) -> None: + self.sections: List[Section] = [] + self.description: str = "" + + self._parse(docstring) + + def _parse(self, docstring: str) -> None: + self.sections = [] + self.description = "" + + buf = "" + cur_section = "" + for line in docstring.split("\n"): + if is_section(line): + # Entering new section + if cur_section: + # Leaving previous section, save it and reset buffer + self.sections.append(Section(cur_section, buf)) + buf = "" + + # --- Start Positron --- + # Remember currently parsed section + cur_section = line.rstrip() + # --- End Positron --- + continue + + # Parse section content + if cur_section: + buf += line + "\n" + else: + # Before setting cur_section, we're parsing the function description + self.description += line + "\n" + + # Last section + self.sections.append(Section(cur_section, buf)) + + # --- Start Positron --- + # other docstring styles have all section entries combined, where epytext + # has a section per parameter/type, so we have to aggregate the sections + def combine_sections(self): + # have to have all the types first + self.sections.sort(key=custom_sort_key) + + unique_sections = {} + type_sections = {} + # Iterate through the list of Section objects + for section in self.sections: + name = section.name + content = section.content + + if name == "Type:": + type_sections[section.arg_name] = content.split(f"`{section.arg_name}`: ", 1)[1] + elif name == "Rtype:": + unique_sections["Return:"].content = ( + f"({content.rstrip()}) {unique_sections['Return:'].content}" + ) + else: + matching_type = type_sections.get(str(section.arg_name)) + + if matching_type: + content_split = content.split(":", 1) + # replace the : we split on, add type name, then content + section.content = ( + f"- `{section.arg_name}` ({matching_type.rstrip()}):{content_split[1]}" + ) + if name in unique_sections: + # Append the description if the section heading is already present + unique_sections[name].content += "\n" + section.content + else: + unique_sections[name] = section + + # Convert back to a list of Sections + unique_sections_list = list(unique_sections.values()) + + return unique_sections_list + + # --- End Positron --- + + def as_markdown(self) -> str: + text = self.description + + # --- Start Positron --- + unique_sections = self.combine_sections() + # --- End Positron --- + + for section in unique_sections: + text += section.as_markdown() + + return text.rstrip("\n") + "\n" # Only keep one last newline + + +# --- Start Positron --- +def custom_sort_key(section): + if section.name == "Type:": + return 0 + if section.name == "Rtype": + return 2 + else: + return 1 + + +# --- End Positron --- + + +# adapted from docstring_to_markdown.looks_like_google +# --- Start Positron --- +def looks_like_epytext(value: str) -> bool: + for field in EPYTEXT_FIELDS: + if re.search(r"{}".format(field), value): + # --- End Positron --- + return True + + return False + + +# adapted from docstring_to_markdown.google.is_section +def is_section(line: str) -> bool: + # --- Start Positron --- + for field in EPYTEXT_FIELDS: + if re.search(r"{}".format(field), line): + # --- End Positron --- + return True + + return False + + +# adapted from docstring_to_markdown.google.google_to_markdown +# --- Start Positron --- +def epytext_to_markdown(text: str, extract_signature: bool = True) -> str: + # --- End Positron --- + # Escape parts we don't want to render + for pattern, replacement in ESCAPE_RULES.items(): + text = re.sub(pattern, replacement, text) + + # --- Start Positron --- + docstring = EpytextDocstring(text) + # --- End Positron --- + + return docstring.as_markdown() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help.py new file mode 100644 index 00000000000..fd2dcd72723 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help.py @@ -0,0 +1,145 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from __future__ import annotations + +import logging +import pydoc +from typing import TYPE_CHECKING, Any, Optional, Union + +from .help_comm import ( + HelpBackendMessageContent, + HelpFrontendEvent, + ShowHelpKind, + ShowHelpParams, + ShowHelpTopicRequest, +) +from .positron_comm import CommMessage, PositronComm +from .pydoc import start_server +from .utils import JsonRecord, get_qualname + +if TYPE_CHECKING: + from comm.base_comm import BaseComm + +logger = logging.getLogger(__name__) + + +def help(topic="help"): + """ + Show help for the given topic. + + Examples + -------- + + Show help for the `help` function itself: + + >>> help() + + Show help for a type: + + >>> import pandas + >>> help(pandas.DataFrame) + + A string import path works too: + + >>> help("pandas.DataFrame") + + Show help for a type given an instance: + + >>> df = pandas.DataFrame() + >>> help(df) + """ + from .positron_ipkernel import PositronIPyKernel + + if PositronIPyKernel.initialized(): + kernel = PositronIPyKernel.instance() + kernel.help_service.show_help(topic) + else: + raise Exception("Unexpected error. No PositronIPyKernel has been initialized.") + + +class HelpService: + """ + Manages the help server and submits help-related events to the `FrontendService`. + """ + + # Not sure why, but some qualified names cause errors in pydoc. Manually replace these with + # names that are known to work. + _QUALNAME_OVERRIDES = { + "pandas.core.frame": "pandas", + "pandas.core.series": "pandas", + } + + def __init__(self): + self._comm: Optional[PositronComm] = None + self._pydoc_thread = None + + def on_comm_open(self, comm: BaseComm, msg: JsonRecord) -> None: + self._comm = PositronComm(comm) + self._comm.on_msg(self.handle_msg, HelpBackendMessageContent) + + def handle_msg(self, msg: CommMessage[HelpBackendMessageContent], raw_msg: JsonRecord) -> None: + """ + Handle messages received from the client via the positron.help comm. + """ + request = msg.content.data + + if isinstance(request, ShowHelpTopicRequest): + if self._comm is not None: + self._comm.send_result(data=True) + self.show_help(request.params.topic) + + else: + logger.warning(f"Unhandled request: {request}") + + def shutdown(self) -> None: + # shutdown pydoc + if self._pydoc_thread is not None and self._pydoc_thread.serving: + logger.info("Stopping pydoc server thread") + self._pydoc_thread.stop() + logger.info("Pydoc server thread stopped") + # shutdown comm + if self._comm is not None: + try: + self._comm.close() + except Exception: + pass + + def start(self): + self._pydoc_thread = start_server() + + def show_help(self, request: Optional[Union[str, Any]]) -> None: + if self._pydoc_thread is None or not self._pydoc_thread.serving: + logger.warning("Ignoring help request, the pydoc server is not serving") + return + + # Map from the object to the URL for the pydoc server. + # We first use pydoc.resolve, which lets us handle an object or an import path. + result = None + try: + result = pydoc.resolve(thing=request) + except ImportError: + pass + + if result is None: + # We could not resolve to an object, try to get help for the request as a string. + key = request + else: + # We resolved to an object. + obj = result[0] + key = get_qualname(obj) + + # Not sure why, but some qualified names cause errors in pydoc. Manually replace these with + # names that are known to work. + for old, new in self._QUALNAME_OVERRIDES.items(): + if key.startswith(old): + key = key.replace(old, new) + break + + url = f"{self._pydoc_thread.url}get?key={key}" + + # Submit the event to the frontend service + event = ShowHelpParams(content=url, kind=ShowHelpKind.Url, focus=True) + if self._comm is not None: + self._comm.send_event(name=HelpFrontendEvent.ShowHelp.value, payload=event.dict()) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help_comm.py new file mode 100644 index 00000000000..a909a8b5460 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/help_comm.py @@ -0,0 +1,115 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from help.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + + +@enum.unique +class ShowHelpKind(str, enum.Enum): + """ + Possible values for Kind in ShowHelp + """ + + Html = "html" + + Markdown = "markdown" + + Url = "url" + + +@enum.unique +class HelpBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend help comm. + """ + + # Look for and, if found, show a help topic. + ShowHelpTopic = "show_help_topic" + + +class ShowHelpTopicParams(BaseModel): + """ + Requests that the help backend look for a help topic and, if found, + show it. If the topic is found, it will be shown via a Show Help + notification. If the topic is not found, no notification will be + delivered. + """ + + topic: str = Field( + description="The help topic to show", + ) + + +class ShowHelpTopicRequest(BaseModel): + """ + Requests that the help backend look for a help topic and, if found, + show it. If the topic is found, it will be shown via a Show Help + notification. If the topic is not found, no notification will be + delivered. + """ + + params: ShowHelpTopicParams = Field( + description="Parameters to the ShowHelpTopic method", + ) + + method: Literal[HelpBackendRequest.ShowHelpTopic] = Field( + description="The JSON-RPC method name (show_help_topic)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class HelpBackendMessageContent(BaseModel): + comm_id: str + data: ShowHelpTopicRequest + + +@enum.unique +class HelpFrontendEvent(str, enum.Enum): + """ + An enumeration of all the possible events that can be sent to the frontend help comm. + """ + + # Request to show help in the frontend + ShowHelp = "show_help" + + +class ShowHelpParams(BaseModel): + """ + Request to show help in the frontend + """ + + content: str = Field( + description="The help content to show", + ) + + kind: ShowHelpKind = Field( + description="The type of content to show", + ) + + focus: bool = Field( + description="Whether to focus the Help pane when the content is displayed.", + ) + + +ShowHelpTopicParams.update_forward_refs() + +ShowHelpTopicRequest.update_forward_refs() + +ShowHelpParams.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/inspectors.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/inspectors.py new file mode 100644 index 00000000000..62ec7d57a27 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/inspectors.py @@ -0,0 +1,970 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# +from __future__ import annotations + +import copy +import datetime +import inspect +import logging +import numbers +import pydoc +import re +import sys +import types +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping, MutableSequence, MutableSet, Sequence, Set +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + Dict, + FrozenSet, + Generic, + Iterable, + Optional, + Sized, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from .third_party import np_, pd_, torch_ +from .utils import JsonData, get_qualname, not_none, pretty_format + +if TYPE_CHECKING: + import numpy as np + import pandas as pd + import polars as pl + + try: # temporary try/except for python 3.12 + import torch # type: ignore [reportMissingImports] + except ImportError: + pass + +# General display settings +TRUNCATE_AT: int = 1024 +PRINT_WIDTH: int = 100 + +# Array-specific display settings +ARRAY_THRESHOLD = 20 +ARRAY_EDGEITEMS = 9 + +logger = logging.getLogger(__name__) + +# +# Base inspector +# + +T = TypeVar("T") + + +class PositronInspector(Generic[T]): + """ + Base inspector for any type + """ + + def __init__(self, value: T) -> None: + self.value = value + + def get_display_name(self, key: str) -> str: + return str(key) + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + return pretty_format(self.value, print_width, truncate_at) + + def get_display_type(self) -> str: + type_name = type(self.value).__name__ + + if isinstance(self.value, Sized): + length = self.get_length() + return f"{type_name} [{length}]" + + return type_name + + def get_kind(self) -> str: + return _get_kind(self.value) + + def get_type_info(self) -> str: + return get_qualname(type(self.value)) + + def get_length(self) -> int: + return len(self.value) if isinstance(self.value, Sized) else 0 + + def get_size(self) -> int: + return sys.getsizeof(self.value) + + def has_children(self) -> bool: + return self.get_length() > 0 + + def has_child(self, key: Any) -> bool: + return False + + def get_child(self, key: Any) -> Any: + raise TypeError(f"get_child() is not implemented for type: {type(self.value)}") + + def get_items(self) -> Iterable[Tuple[Any, Any]]: + return [] + + def has_viewer(self) -> bool: + return False + + def is_mutable(self) -> bool: + return False + + def get_comparison_cost(self) -> int: + return self.get_size() + + def equals(self, value: T) -> bool: + try: + return self.value == value + except ValueError: + # If a collection has a nested value that does not support + # bool(x == y) (like NumPy arrays or other array-like + # objects), this will error + return False + + def copy(self) -> T: + # TODO: Need to add unit tests for the deepcopy case + if self.is_mutable(): + return copy.deepcopy(self.value) + else: + return copy.copy(self.value) + + def to_html(self) -> str: + return repr(self.value) + + def to_plaintext(self) -> str: + return repr(self.value) + + def to_json(self) -> JsonData: + return dict(type=self.type_to_json(), data=self.value_to_json()) + + def type_to_json(self) -> str: + return self.get_type_info() + + def value_to_json(self) -> JsonData: + raise NotImplementedError( + f"value_to_json() is not implemented for this type. type: {type(self.value)}" + ) + + @classmethod + def from_json(cls, json_data: JsonData) -> T: + if not isinstance(json_data, dict): + raise ValueError(f"Expected json_data to be dict, got {json_data}") + + if not isinstance(json_data["type"], str): + raise ValueError(f"Expected json_data['type'] to be str, got {json_data['type']}") + + # TODO(pyright): cast shouldn't be necessary, recheck in a future version of pyright + return cls.value_from_json(cast(str, json_data["type"]), json_data["data"]) + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> T: + raise NotImplementedError( + f"value_from_json() is not implemented for this type. type_name: {type_name}, data: {data}" + ) + + +# +# Scalars +# + + +class BooleanInspector(PositronInspector[bool]): + def get_kind(self) -> str: + return "boolean" + + def value_to_json(self) -> JsonData: + return self.value + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> bool: + if not isinstance(data, bool): + raise ValueError(f"Expected data to be bool, got {data}") + + return data + + +class BytesInspector(PositronInspector[bytes]): + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + # Ignore print_width for strings + return super().get_display_value(None, truncate_at) + + def get_kind(self) -> str: + return "bytes" + + def has_children(self) -> bool: + return False + + def value_to_json(self) -> str: + return self.value.decode() + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> bytes: + if not isinstance(data, str): + raise ValueError(f"Expected data to be str, got {data}") + + return data.encode() + + +### object + + +class ObjectInspector(PositronInspector[T], ABC): + def has_child(self, key: str) -> bool: + return hasattr(self.value, key) + + def get_length(self) -> int: + if isinstance(self.value, property): + return 0 + return len([p for p in dir(self.value) if not (p.startswith("_"))]) + + def get_child(self, key: str) -> Any: + return getattr(self.value, key) + + def get_items(self) -> Iterable[Tuple[str, Any]]: + for key in dir(self.value): + if key.startswith("_"): + continue + try: + yield key, self.get_child(key) + except AttributeError: + pass + + +class ClassInspector(ObjectInspector[type]): + def get_kind(self) -> str: + return "class" + + def value_to_json(self) -> JsonData: + return str(self.value) + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> type: + if not isinstance(data, str): + raise ValueError(f"Expected data to be str, got {data}") + + pattern = "(?<= str: + return "string" + + def has_children(self) -> bool: + return False + + def value_to_json(self) -> JsonData: + return self.value + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> str: + if not isinstance(data, str): + raise ValueError(f"Expected data to be str, got {data}") + + return data + + +Timestamp = TypeVar("Timestamp", datetime.datetime, "pd.Timestamp") + + +class _BaseTimestampInspector(PositronInspector[Timestamp], ABC): + CLASS: Type[Timestamp] + + @classmethod + @abstractmethod + def value_from_isoformat(cls, string: str) -> Timestamp: + pass + + def value_to_json(self) -> JsonData: + return self.value.isoformat() + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> Timestamp: + if not isinstance(data, str): + raise ValueError(f"Expected data to be str, got {data}") + + return cls.value_from_isoformat(data) + + +class DatetimeInspector(_BaseTimestampInspector[datetime.datetime]): + CLASS_QNAME = "datetime.datetime" + + @classmethod + def value_from_isoformat(cls, string: str) -> datetime.datetime: + return datetime.datetime.fromisoformat(string) + + +class PandasTimestampInspector(_BaseTimestampInspector["pd.Timestamp"]): + CLASS_QNAME = "pandas._libs.tslibs.timestamps.Timestamp" + + @classmethod + def value_from_isoformat(cls, string: str) -> pd.Timestamp: + return not_none(pd_).Timestamp.fromisoformat(string) + + +# +# Collections +# + +CollectionT = Union[range, FrozenSet, Sequence, Set, Tuple] +CT = TypeVar("CT", CollectionT, "np.ndarray", "torch.Tensor") + + +class _BaseCollectionInspector(PositronInspector[CT], ABC): + def get_kind(self) -> str: + return "collection" + + def has_children(self) -> bool: + # For ranges, we don't visualize the children as they're + # implied as a contiguous set of integers in a range. + # For sets, we don't visualize the children as they're + # not subscriptable objects. + if isinstance(self.value, (range, Set, FrozenSet)): + return False + + return super().has_children() + + def has_child(self, key: int) -> bool: + return key < self.get_length() + + def get_child(self, key: int) -> Any: + # Don't allow indexing into ranges or sets. + if isinstance(self.value, (range, Set, FrozenSet)): + raise TypeError(f"get_child() is not implemented for type: {type(self.value)}") + + # TODO(pyright): type should be narrowed to exclude frozen set, retry in a future version of pyright + return self.value[key] # type: ignore + + def get_items(self) -> Iterable[Tuple[int, Any]]: + # Treat collection items as children, with the index as the name + return enumerate(self.value) + + +# We don't use typing.Sequence here since it includes mappings, +# for which we have a separate inspector. + + +class CollectionInspector(_BaseCollectionInspector[CollectionT]): + def get_display_type(self) -> str: + # Display length for various collections and maps + # using the Python notation for the type + type_name = type(self.value).__name__ + length = self.get_length() + + if isinstance(self.value, Set): + return f"{type_name} {{{length}}}" + elif isinstance(self.value, tuple): + return f"{type_name} ({length})" + else: + return f"{type_name} [{length}]" + + def get_comparison_cost(self) -> int: + # Placeholder estimate. In practice this can be arbitrarily large + return 10 * self.get_length() + + def is_mutable(self) -> bool: + return isinstance(self.value, (MutableSequence, MutableSet)) + + def value_to_json(self) -> JsonData: + if isinstance(self.value, range): + return { + "start": self.value.start, + "stop": self.value.stop, + "step": self.value.step, + } + + return super().value_to_json() + + @classmethod + def value_from_json(cls, type_name: str, data: JsonData) -> CollectionT: + if type_name == "range": + if not isinstance(data, dict): + raise ValueError(f"Expected data to be dict, got {data}") + + if not isinstance(data["start"], int): + raise ValueError(f"Expected data['start'] to be int, got {data['start']}") + + if not isinstance(data["stop"], int): + raise ValueError(f"Expected data['stop'] to be int, got {data['stop']}") + + if not isinstance(data["step"], int): + raise ValueError(f"Expected data['step'] to be int, got {data['step']}") + + # TODO(pyright): cast shouldn't be necessary, recheck in a future version of pyright + return range( + cast(int, data["start"]), + cast(int, data["stop"]), + cast(int, data["step"]), + ) + + return super().value_from_json(type_name, data) + + +Array = TypeVar("Array", "np.ndarray", "torch.Tensor") + + +class _BaseArrayInspector(_BaseCollectionInspector[Array], ABC): + def get_kind(self) -> str: + return "collection" if self.value.ndim > 0 else "number" + + def get_display_type(self) -> str: + display_type = str(self.value.dtype) + + # Include shape information, only if it's not a scalar + shape = self.value.shape + if self.value.ndim == 1: + # Remove the trailing comma for 1D arrays + display_type = f"{display_type} ({shape[0]})" + elif self.value.ndim != 0: + display_type = f"{display_type} {tuple(shape)}" + + # Prepend the module name if it's not already there, to distinguish different types of + # arrays e.g. numpy versus pytorch + module = type(self.value).__module__ + if not display_type.startswith(module): + display_type = f"{module}.{display_type}" + + return display_type + + def get_comparison_cost(self) -> int: + # Placeholder estimate. In practice this can be arbitrarily + # large for object dtypes + return self.get_size() + + def get_size(self) -> int: + if self.value.ndim == 0: + return 0 + + num_elements = 1 + for dim in self.value.shape: + num_elements *= dim + + return num_elements * self.value.dtype.itemsize + + def get_length(self) -> int: + return self.value.shape[0] if self.value.ndim > 0 else 0 + + def is_mutable(self) -> bool: + return True + + +class NumpyNdarrayInspector(_BaseArrayInspector["np.ndarray"]): + CLASS_QNAME = "numpy.ndarray" + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + return ( + not_none(np_).array2string( + self.value, + max_line_width=print_width, + threshold=ARRAY_THRESHOLD, + edgeitems=ARRAY_EDGEITEMS, + separator=",", + ), + True, + ) + + def equals(self, value: np.ndarray) -> bool: + return not_none(np_).array_equal(self.value, value) + + def copy(self) -> np.ndarray: + return self.value.copy() + + +class TorchTensorInspector(_BaseArrayInspector["torch.Tensor"]): + CLASS_QNAME = "torch.Tensor" + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + # NOTE: + # Once https://github.com/pytorch/pytorch/commit/e03800a93af55ef61f2e610d65ac7194c0614edc + # is in a stable version we can use it to temporarily set print options + torch = not_none(torch_) + + new_options = { + "threshold": ARRAY_THRESHOLD, + "edgeitems": ARRAY_EDGEITEMS, + "linewidth": print_width, + } + options_obj = torch._tensor_str.PRINT_OPTS + original_options = {k: getattr(options_obj, k) for k in new_options} + + torch.set_printoptions(**new_options) + + display_value = str(self.value) + # Strip the surrounding `tensor(...)` + display_value = display_value[len("tensor(") : -len(")")] + + torch.set_printoptions(**original_options) + + return display_value, True + + def equals(self, value: torch.Tensor) -> bool: + return not_none(torch_).equal(self.value, value) + + def copy(self) -> torch.Tensor: + # Detach the tensor from any existing computation graphs to avoid gradients propagating + # through them. + # TODO: This creates a completely new tensor using new memory. Is there a more + # memory-efficient way to do this? + return self.value.detach().clone() + + def get_size(self) -> int: + if self.value.ndim == 0: + return self.value.element_size() + + num_elements = 1 + for dim in self.value.shape: + num_elements *= dim + + return num_elements * self.value.element_size() + + +# +# Maps +# + + +MT = TypeVar( + "MT", + Mapping, + "pd.DataFrame", + "pl.DataFrame", + "pd.Series", + "pl.Series", + "pd.Index", +) + + +class _BaseMapInspector(PositronInspector[MT], ABC): + def get_kind(self) -> str: + return "map" + + @abstractmethod + def get_keys(self) -> Collection[Any]: + pass + + def get_size(self) -> int: + result = 1 + for dim in getattr(self.value, "shape", [len(self.value)]): + result *= dim + + # Issue #2174: fudge factor, say 8 bytes per value as a rough + # estimate + return result * 8 + + def has_child(self, key: Any) -> bool: + return key in self.get_keys() + + def get_child(self, key: Any) -> Any: + return self.value[key] + + def get_items(self) -> Iterable[Tuple[Any, Any]]: + for key in self.get_keys(): + yield key, self.value[key] + + +class MapInspector(_BaseMapInspector[Mapping]): + def get_keys(self) -> Collection[Any]: + return self.value.keys() + + def is_mutable(self) -> bool: + return isinstance(self.value, MutableMapping) + + +Column = TypeVar("Column", "pd.Series", "pl.Series", "pd.Index") + + +class BaseColumnInspector(_BaseMapInspector[Column], ABC): + def get_child(self, key: Any) -> Any: + return self.value[key] + + def get_display_type(self) -> str: + return f"{self.value.dtype} [{self.get_length()}]" + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + # TODO(pyright): cast shouldn't be necessary, recheck in a future version of pyright + display_value = str(cast(Column, self.value[:100]).to_list()) + return (display_value, True) + + +class PandasSeriesInspector(BaseColumnInspector["pd.Series"]): + CLASS_QNAME = "pandas.core.series.Series" + + def get_keys(self) -> Collection[Any]: + return self.value.index + + def equals(self, value: pd.Series) -> bool: + return self.value.equals(value) + + def copy(self) -> pd.Series: + # Copies memory because pandas < 3.0 does not have + # copy-on-write. + return self.value.copy(deep=True) + + def to_html(self) -> str: + # TODO: Support HTML + return self.to_plaintext() + + def to_plaintext(self) -> str: + return self.value.to_csv(path_or_buf=None, sep="\t") + + +class PandasIndexInspector(BaseColumnInspector["pd.Index"]): + CLASS_QNAME = [ + "pandas.core.indexes.base.Index", + "pandas.core.indexes.datetimes.DatetimeIndex", + "pandas.core.indexes.range.RangeIndex", + "pandas.core.indexes.multi.MultiIndex", + "pandas.core.indexes.numeric.Int64Index", + ] + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + # RangeIndexes don't need to be truncated. + if isinstance(self.value, not_none(pd_).RangeIndex): + return str(self.value), False + + return super().get_display_value(print_width, truncate_at) + + def has_children(self) -> bool: + # For ranges, we don't visualize the children as they're + # implied as a contiguous set of integers in a range. + if isinstance(self.value, not_none(pd_).RangeIndex): + return False + + return super().has_children() + + def get_keys(self) -> Collection[Any]: + return range(len(self.value)) + + def equals(self, value: pd.Index) -> bool: + return self.value.equals(value) + + def copy(self) -> pd.Index: + # Copies memory because pandas < 3.0 does not have + # copy-on-write. + return self.value.copy(deep=True) + + def to_html(self) -> str: + # TODO: Support HTML + return self.to_plaintext() + + def to_plaintext(self) -> str: + return self.value.to_series().to_csv(path_or_buf=None, sep="\t") + + +class PolarsSeriesInspector(BaseColumnInspector["pl.Series"]): + CLASS_QNAME = [ + "polars.series.series.Series", + "polars.internals.series.series.Series", + ] + + def get_keys(self) -> Collection[Any]: + return range(len(self.value)) + + def equals(self, value: pl.Series) -> bool: + return self.value.series_equal(value) + + def copy(self) -> pl.Series: + # Polars produces a shallow clone and does not copy any memory + # in this operation. + return self.value.clone() + + def to_html(self) -> str: + # TODO: Support HTML + return self.to_plaintext() + + def to_plaintext(self) -> str: + return self.value.to_frame().write_csv(file=None, separator="\t") + + +Table = TypeVar("Table", "pd.DataFrame", "pl.DataFrame") + + +class BaseTableInspector(_BaseMapInspector[Table], Generic[Table, Column], ABC): + """ + Base inspector for tabular data + """ + + def get_display_type(self) -> str: + type_name = type(self.value).__name__ + shape = self.value.shape + return f"{type_name} [{shape[0]}x{shape[1]}]" + + def get_kind(self) -> str: + return "table" + + def get_length(self) -> int: + # send number of columns. + # number of rows per column is handled by ColumnInspector + return self.value.shape[1] + + def get_keys(self) -> Collection[Any]: + return self.value.columns + + def has_viewer(self) -> bool: + return True + + def is_mutable(self) -> bool: + return True + + +# +# Custom inspectors for specific types +# + + +class PandasDataFrameInspector(BaseTableInspector["pd.DataFrame", "pd.Series"]): + CLASS_QNAME = "pandas.core.frame.DataFrame" + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + display_value = get_qualname(self.value) + if hasattr(self.value, "shape"): + shape = self.value.shape + display_value = f"[{shape[0]} rows x {shape[1]} columns] {display_value}" + + return (display_value, True) + + def equals(self, value: pd.DataFrame) -> bool: + return self.value.equals(value) + + def copy(self) -> pd.DataFrame: + # Copies memory because pandas < 3.0 does not have + # copy-on-write. + return self.value.copy(deep=True) + + def to_html(self) -> str: + return self.value.to_html() + + def to_plaintext(self) -> str: + return self.value.to_csv(path_or_buf=None, sep="\t") + + +class PolarsDataFrameInspector(BaseTableInspector["pl.DataFrame", "pl.Series"]): + CLASS_QNAME = [ + "polars.dataframe.frame.DataFrame", + "polars.internals.dataframe.frame.DataFrame", + ] + + def get_display_value( + self, + print_width: Optional[int] = PRINT_WIDTH, + truncate_at: int = TRUNCATE_AT, + ) -> Tuple[str, bool]: + qualname = get_qualname(self.value) + shape = self.value.shape + display_value = f"[{shape[0]} rows x {shape[1]} columns] {qualname}" + return (display_value, True) + + def equals(self, value: pl.DataFrame) -> bool: + try: + return self.value.equals(value) + except AttributeError: # polars.DataFrame.equals was introduced in v0.19.16 + return self.value.frame_equal(value) + + def copy(self) -> pl.DataFrame: + # Polars produces a shallow clone and does not copy any memory + # in this operation. + return self.value.clone() + + def to_html(self) -> str: + return self.value._repr_html_() + + def to_plaintext(self) -> str: + return self.value.write_csv(file=None, separator="\t") + + +INSPECTOR_CLASSES: Dict[str, Type[PositronInspector]] = { + PandasDataFrameInspector.CLASS_QNAME: PandasDataFrameInspector, + PandasSeriesInspector.CLASS_QNAME: PandasSeriesInspector, + **dict.fromkeys(PandasIndexInspector.CLASS_QNAME, PandasIndexInspector), + PandasTimestampInspector.CLASS_QNAME: PandasTimestampInspector, + NumpyNdarrayInspector.CLASS_QNAME: NumpyNdarrayInspector, + TorchTensorInspector.CLASS_QNAME: TorchTensorInspector, + **dict.fromkeys(PolarsDataFrameInspector.CLASS_QNAME, PolarsDataFrameInspector), + **dict.fromkeys(PolarsSeriesInspector.CLASS_QNAME, PolarsSeriesInspector), + DatetimeInspector.CLASS_QNAME: DatetimeInspector, + "boolean": BooleanInspector, + "bytes": BytesInspector, + "class": ClassInspector, + "collection": CollectionInspector, + "function": FunctionInspector, + "map": MapInspector, + "number": NumberInspector, + "other": ObjectInspector, + "string": StringInspector, +} + +# +# Helper functions +# + + +def get_inspector(value: T) -> PositronInspector[T]: + # Look for a specific inspector by qualified classname + if isinstance(value, type): + qualname = str("type") + else: + qualname = get_qualname(value) + inspector_cls = INSPECTOR_CLASSES.get(qualname, None) + + if inspector_cls is None: + # Otherwise, look for an inspector by kind + kind = _get_kind(value) + inspector_cls = INSPECTOR_CLASSES.get(kind, None) + + # Otherwise, default to generic inspector + if inspector_cls is None: + inspector_cls = PositronInspector + + inspector = inspector_cls(value) + + return inspector + + +def _get_kind(value: Any) -> str: + if isinstance(value, str): + return "string" + elif isinstance(value, bool): + return "boolean" + elif isinstance(value, numbers.Number): + return "number" + elif isinstance(value, Mapping): + return "map" + elif isinstance(value, (bytes, bytearray, memoryview)): + return "bytes" + elif isinstance(value, (Sequence, Set)): + return "collection" + elif isinstance(value, (types.FunctionType, types.MethodType)): + return "function" + elif isinstance(value, type): + return "class" + elif value is not None: + return "other" + else: + return "empty" diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/jedi.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/jedi.py new file mode 100644 index 00000000000..cf5d9376698 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/jedi.py @@ -0,0 +1,337 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from typing import Any, Tuple + +from ._vendor.jedi import cache, debug +from ._vendor.jedi.api import Interpreter +from ._vendor.jedi.api.classes import Completion +from ._vendor.jedi.api.completion import ( + Completion as CompletionAPI, +) # Rename to avoid conflict with classes.Completion +from ._vendor.jedi.api.completion import ( + _extract_string_while_in_string, + _remove_duplicates, + filter_names, +) +from ._vendor.jedi.api.file_name import complete_file_name +from ._vendor.jedi.api.interpreter import MixedModuleContext +from ._vendor.jedi.api.strings import get_quote_ending +from ._vendor.jedi.cache import memoize_method +from ._vendor.jedi.file_io import KnownContentFileIO +from ._vendor.jedi.inference.base_value import HasNoContext +from ._vendor.jedi.inference.compiled import ExactValue +from ._vendor.jedi.inference.compiled.mixed import MixedName, MixedObject +from ._vendor.jedi.inference.compiled.value import CompiledName, CompiledValue +from ._vendor.jedi.inference.context import ValueContext +from ._vendor.jedi.inference.helpers import infer_call_of_leaf +from ._vendor.jedi.inference.value import ModuleValue +from ._vendor.jedi.parser_utils import cut_value_at_position +from .utils import safe_isinstance + +# +# We adapt code from the MIT-licensed jedi static analysis library to provide enhanced completions +# for data science users. Note that we've had to dip into jedi's private API to do that. Jedi is +# available at: +# +# https://github.com/davidhalter/jedi +# + +_sentinel = object() + + +class PositronMixedModuleContext(MixedModuleContext): + """ + A `jedi.api.interpreter.MixedModuleContext` that prefers values from the user's namespace over + static analysis. + + For example, given the namespace: `{"x": {"a": 0}}`, and the code: + + ``` + x = {"b": 0} + x[' + ``` + + Completing the line `x['` should return `a` and not `b`. + """ + + def get_filters(self, until_position=None, origin_scope=None): + filters = super().get_filters(until_position, origin_scope) + + # Store the first filter – which corresponds to static analysis of the source code. + merged_filter = next(filters) + + # Yield the remaining filters – which correspond to the user's namespaces. + yield from filters + + # Finally, yield the first filter. + yield merged_filter + + +class PositronInterpreter(Interpreter): + """ + A `jedi.Interpreter` that provides enhanced completions for data science users. + """ + + @cache.memoize_method + def _get_module_context(self): + if self.path is None: + file_io = None + else: + file_io = KnownContentFileIO(self.path, self._code) + tree_module_value = ModuleValue( + self._inference_state, + self._module_node, + file_io=file_io, + string_names=("__main__",), + code_lines=self._code_lines, + ) + # --- Start Positron --- + # Use our custom module context class. + return PositronMixedModuleContext( + tree_module_value, + self.namespaces, + ) + # --- End Positron --- + + def complete(self, line=None, column=None, *, fuzzy=False): + self._inference_state.reset_recursion_limitations() + with debug.increase_indent_cm("complete"): + # --- Start Positron --- + # Use our custom completion class. + completion = PositronCompletion( + # --- End Positron --- + self._inference_state, + self._get_module_context(), + self._code_lines, + (line, column), + self.get_signatures, + fuzzy=fuzzy, + ) + return completion.complete() + + +class PositronCompletion(CompletionAPI): + # As is from jedi.api.completion.Completion, copied here to use our `complete_dict`. + def complete(self): + leaf = self._module_node.get_leaf_for_position( + self._original_position, include_prefixes=True + ) + string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position) + + prefixed_completions = complete_dict( + self._module_context, + self._code_lines, + start_leaf or leaf, + self._original_position, + None if string is None else quote + string, # type: ignore + fuzzy=self._fuzzy, + ) + + if string is not None and not prefixed_completions: + prefixed_completions = list( + complete_file_name( + self._inference_state, + self._module_context, + start_leaf, + quote, + string, + self._like_name, + self._signatures_callback, + self._code_lines, + self._original_position, + self._fuzzy, + ) + ) + if string is not None: + if not prefixed_completions and "\n" in string: + # Complete only multi line strings + prefixed_completions = self._complete_in_string(start_leaf, string) + return prefixed_completions + + cached_name, completion_names = self._complete_python(leaf) + + completions = list( + filter_names( + self._inference_state, + completion_names, + self.stack, + self._like_name, + self._fuzzy, + cached_name=cached_name, + ) + ) + + return ( + # Removing duplicates mostly to remove False/True/None duplicates. + _remove_duplicates(prefixed_completions, completions) + + sorted( + completions, + key=lambda x: (x.name.startswith("__"), x.name.startswith("_"), x.name.lower()), + ) + ) + + +class DictKeyName(CompiledName): + """ + A dictionary key with support for inferring its value. + """ + + def __init__(self, inference_state, parent_value, key): + self._inference_state = inference_state + + try: + self.parent_context = parent_value.as_context() + except HasNoContext: + # If we're completing a dict literal, e.g. `{'a': 0}['`, then parent_value is a + # DictLiteralValue which does not override `as_context()`. + # Manually create the context instead. + self.parent_context = ValueContext(parent_value) + + self._parent_value = parent_value + self._key = key + self.string_name = str(key) + + # NOTE(seem): IIUC is_descriptor is used to return the api_type() 'instance' without an + # execution. If so, it should be safe to always set it to false, but I may have misread + # the jedi code. + self.is_descriptor = False + + @memoize_method + def infer_compiled_value(self) -> CompiledValue: + parent = self._parent_value + + # We actually want to override MixedObject.py__simple_getitem__ to include objects from + # popular data science libraries as allowed getitem types. However, it's simpler to special + # case here instead of vendoring all instantiations of MixedObject. + # START: MixedObject.py__simple_getitem__ + if isinstance(parent, MixedObject): + python_object = parent.compiled_value.access_handle.access._obj + if _is_allowed_getitem_type(python_object): + values = parent.compiled_value.py__simple_getitem__(self._key) + else: + values = parent._wrapped_value.py__simple_getitem__(self._key) + # END: MixedObject.py__simple_getitem__ + else: + values = parent.py__simple_getitem__(self._key) + + values = list(values) + + if len(values) != 1: + raise ValueError(f"Expected exactly one value, got {len(values)}") + value = values[0] + + # This may return an ExactValue which wraps a CompiledValue e.g. when completing a dict + # literal like: `{"a": 0}['`. + # For some reason, ExactValue().get_signatures() returns an empty list, but + # ExactValue()._compiled_value.get_signatures() returns the correct signatures, + # so we return the wrapped compiled value instead. + if isinstance(value, ExactValue): + return value._compiled_value + + return value + + +# As is from jedi.api.completion.Completion, copied here to use our `_completions_for_dicts`. +def complete_dict(module_context, code_lines, leaf, position, string, fuzzy): + bracket_leaf = leaf + if bracket_leaf != "[": + bracket_leaf = leaf.get_previous_leaf() + + cut_end_quote = "" + if string: + cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True) + + if bracket_leaf == "[": + if string is None and leaf is not bracket_leaf: + string = cut_value_at_position(leaf, position) + + context = module_context.create_context(bracket_leaf) + + before_node = before_bracket_leaf = bracket_leaf.get_previous_leaf() # type: ignore + if before_node in (")", "]", "}"): + before_node = before_node.parent + if before_node.type in ("atom", "trailer", "name"): + values = infer_call_of_leaf(context, before_bracket_leaf) + return list( + _completions_for_dicts( + module_context.inference_state, + values, + "" if string is None else string, + cut_end_quote, + fuzzy=fuzzy, + ) + ) + return [] + + +# Adapted from jedi.api.strings._completions_for_dicts. +def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy): + # --- Start Positron --- + # Since we've modified _get_python_keys to return Names, sort by yielded value's string_name + # instead of the yielded value itself. + for name in sorted(_get_python_keys(inference_state, dicts), key=lambda x: repr(x.string_name)): + # --- End Positron --- + yield Completion( + inference_state, + name, + stack=None, + like_name_length=len(literal_string), + is_fuzzy=fuzzy, + ) + + +# Adapted from jedi.api.strings._get_python_keys. +def _get_python_keys(inference_state, dicts): + for dct in dicts: + # --- Start Positron --- + # Handle dict-like objects from popular data science libraries. + try: + obj = dct.compiled_value.access_handle.access._obj + except AttributeError: + pass + else: + if _is_allowed_getitem_type(obj): + if hasattr(obj, "columns"): + for key in obj.columns: + yield DictKeyName(inference_state, dct, key) + return + + # --- End Positron --- + if dct.array_type == "dict": + for key in dct.get_key_values(): + dict_key = key.get_safe_value(default=_sentinel) + if dict_key is not _sentinel: + # --- Start Positron --- + # Return a DictKeyName instead of a string. + yield DictKeyName(inference_state, dct, dict_key) + # --- End Positron --- + + +def _is_allowed_getitem_type(obj: Any) -> bool: + """ + Can we safely call `obj.__getitem__`? + """ + # Only trust builtin types and types from popular data science libraries. + # We specifically compare type(obj) instead of using isinstance because we don't want to trust + # subclasses of builtin types. + return ( + type(obj) in (str, list, tuple, bytes, bytearray, dict) + or safe_isinstance(obj, "pandas", "DataFrame") + or safe_isinstance(obj, "polars", "DataFrame") + ) + + +def get_python_object(completion: Completion) -> Tuple[Any, bool]: + """ + Get the Python object corresponding to a completion, and a boolean indicating whether an object + was found. + """ + name = completion._name + if isinstance(name, (CompiledName, MixedName)): + value = name.infer_compiled_value() + if isinstance(value, CompiledValue): + obj = value.access_handle.access._obj + return obj, True + return None, False diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/lsp.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/lsp.py new file mode 100644 index 00000000000..ba0c12867a8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/lsp.py @@ -0,0 +1,74 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import logging +import urllib.parse +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple + +from comm.base_comm import BaseComm + +from .positron_jedilsp import POSITRON + +if TYPE_CHECKING: + from .positron_ipkernel import PositronIPyKernel + + +logger = logging.getLogger(__name__) + + +class LSPService: + """ + LSPService manages the positron.lsp comm and coordinates starting the LSP. + """ + + def __init__(self, kernel: "PositronIPyKernel"): + self._kernel = kernel + self._comm: Optional[BaseComm] = None + + def on_comm_open(self, comm: BaseComm, msg: Dict[str, Any]) -> None: + """ + Setup positron.lsp comm to receive messages. + """ + self._comm = comm + + # Register the comm message handler + comm.on_msg(self._receive_message) + + # Parse the host and port from the comm open message + data = msg["content"]["data"] + client_address = data.get("client_address", None) + if client_address is None: + logger.warning(f"No client_address in LSP comm open message: {msg}") + return + + host, port = self._split_address(client_address) + if host is None or port is None: + logger.warning(f"Could not parse host and port from client address: {client_address}") + return + + # Start the language server thread + POSITRON.start(lsp_host=host, lsp_port=port, shell=self._kernel.shell, comm=comm) + + def _receive_message(self, msg: Dict[str, Any]) -> None: + """ + Handle messages received from the client via the positron.lsp comm. + """ + pass + + def shutdown(self) -> None: + # Stop the language server thread + POSITRON.stop() + + if self._comm is not None: + try: + self._comm.close() + except Exception: + pass + + def _split_address(self, client_address: str) -> Tuple[Optional[str], Optional[int]]: + """ + Split an address of the form "host:port" into a tuple of (host, port). + """ + result = urllib.parse.urlsplit("//" + client_address) + return (result.hostname, result.port) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plot_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plot_comm.py new file mode 100644 index 00000000000..fdeaa8dcd67 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plot_comm.py @@ -0,0 +1,102 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from plot.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + + +class PlotResult(BaseModel): + """ + A rendered plot + """ + + data: str = Field( + description="The plot data, as a base64-encoded string", + ) + + mime_type: str = Field( + description="The MIME type of the plot data", + ) + + +@enum.unique +class PlotBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend plot comm. + """ + + # Render a plot + Render = "render" + + +class RenderParams(BaseModel): + """ + Requests a plot to be rendered at a given height and width. The plot + data is returned in a base64-encoded string. + """ + + height: int = Field( + description="The requested plot height, in pixels", + ) + + width: int = Field( + description="The requested plot width, in pixels", + ) + + pixel_ratio: float = Field( + description="The pixel ratio of the display device", + ) + + +class RenderRequest(BaseModel): + """ + Requests a plot to be rendered at a given height and width. The plot + data is returned in a base64-encoded string. + """ + + params: RenderParams = Field( + description="Parameters to the Render method", + ) + + method: Literal[PlotBackendRequest.Render] = Field( + description="The JSON-RPC method name (render)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class PlotBackendMessageContent(BaseModel): + comm_id: str + data: RenderRequest + + +@enum.unique +class PlotFrontendEvent(str, enum.Enum): + """ + An enumeration of all the possible events that can be sent to the frontend plot comm. + """ + + # Notification that a plot has been updated on the backend. + Update = "update" + + +PlotResult.update_forward_refs() + +RenderParams.update_forward_refs() + +RenderRequest.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plots.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plots.py new file mode 100644 index 00000000000..653f70048d1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/plots.py @@ -0,0 +1,218 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import codecs +import logging +import pickle +import uuid +from typing import Dict, List, Optional, Tuple + +import comm +from IPython.core.formatters import format_display_data + +from .plot_comm import PlotBackendMessageContent, PlotResult, RenderRequest +from .positron_comm import CommMessage, JsonRpcErrorCode, PositronComm +from .utils import JsonRecord +from .widget import _WIDGET_MIME_TYPE + +logger = logging.getLogger(__name__) + + +# Matplotlib Default Figure Size +DEFAULT_WIDTH_IN = 6.4 +DEFAULT_HEIGHT_IN = 4.8 +BASE_DPI = 96 + + +class PositronDisplayPublisherHook: + def __init__(self, target_name: str): + self.comms: Dict[str, PositronComm] = {} + self.figures: Dict[str, str] = {} + self.target_name = target_name + self.fignums: List[int] = [] + + def __call__(self, msg, *args, **kwargs) -> Optional[dict]: + if msg["msg_type"] == "display_data": + # If there is no image for our display, don't create a + # positron.plot comm and let the parent deal with the msg. + data = msg["content"]["data"] + if _WIDGET_MIME_TYPE in data: + # This is a widget, let the widget hook handle it + return msg + if "image/png" not in data: + return msg + + # Otherwise, try to pickle the current figure so that we + # can restore the context for future renderings. We construct + # a new plot comm to advise the client of the new figure. + pickled = self._pickle_current_figure() + if pickled is not None: + id = str(uuid.uuid4()) + self.figures[id] = pickled + + # Creating a comm per plot figure allows the client + # to request new renderings of each plot at a later time, + # e.g. on resizing the plots view + self._create_comm(id) + + # Returning None implies our hook has processed the message + # and it stops the parent from sending the display_data via + # the standard iopub channel + return None + + return msg + + def _create_comm(self, comm_id: str) -> None: + """ + Create a new plot comm with the given id. + """ + plot_comm = PositronComm(comm.create_comm(target_name=self.target_name, comm_id=comm_id)) + self.comms[comm_id] = plot_comm + plot_comm.on_msg(self.handle_msg, PlotBackendMessageContent) + + def handle_msg(self, msg: CommMessage[PlotBackendMessageContent], raw_msg: JsonRecord) -> None: + """ + Handle client messages to render a plot figure. + """ + comm_id = msg.content.comm_id + request = msg.content.data + + figure_comm = self.comms.get(comm_id, None) + if figure_comm is None: + logger.warning(f"Plot figure comm {comm_id} not found") + return + + if isinstance(request, RenderRequest): + pickled = self.figures.get(comm_id, None) + if pickled is None: + figure_comm.send_error( + code=JsonRpcErrorCode.INVALID_PARAMS, message=f"Figure {comm_id} not found" + ) + return + + width_px = request.params.width or 0 + height_px = request.params.height or 0 + pixel_ratio = request.params.pixel_ratio or 1.0 + + if width_px != 0 and height_px != 0: + format_dict, md_dict = self._resize_pickled_figure( + pickled, width_px, height_px, pixel_ratio + ) + data = format_dict["image/png"] + output = PlotResult(data=data, mime_type="image/png").dict() + figure_comm.send_result(data=output, metadata=md_dict) + + else: + logger.warning(f"Unhandled request: {request}") + + def shutdown(self) -> None: + """ + Shutdown plot comms and release any resources. + """ + for figure_comm in self.comms.values(): + try: + figure_comm.close() + except Exception: + pass + self.comms.clear() + self.figures.clear() + + # -- Private Methods -- + + def _pickle_current_figure(self) -> Optional[str]: + pickled = None + figure = None + + # Delay importing matplotlib until the kernel and shell has been initialized + # otherwise the graphics backend will be reset to the gui + import matplotlib.pyplot as plt + + # We turn off interactive mode before accessing the plot context + was_interactive = plt.isinteractive() + plt.ioff() + + # Check to see if there are any figures left in stack to display + # If not, get the number of figures to display from matplotlib + if len(self.fignums) == 0: + self.fignums = plt.get_fignums() + + # Get the current figure, remove from it from being called next hook + if len(self.fignums) > 0: + figure = plt.figure(self.fignums.pop(0)) + + # Pickle the current figure + if figure is not None and not self._is_figure_empty(figure): + pickled = codecs.encode(pickle.dumps(figure), "base64").decode() + + if was_interactive: + plt.ion() + + return pickled + + def _resize_pickled_figure( + self, + pickled: str, + new_width_px: int = 614, + new_height_px: int = 460, + pixel_ratio: float = 1.0, + formats: list = ["image/png"], + ) -> Tuple[dict, dict]: + # Delay importing matplotlib until the kernel and shell has been + # initialized otherwise the graphics backend will be reset to the gui + import matplotlib.pyplot as plt + + # Turn off interactive mode before, including before unpickling a + # figures (otherwise it will cause and endless loop of plot changes) + was_interactive = plt.isinteractive() + plt.ioff() + + figure = pickle.loads(codecs.decode(pickled.encode(), "base64")) + + # Adjust the DPI based on pixel_ratio to accommodate high + # resolution displays... + dpi = BASE_DPI * pixel_ratio + figure.set_dpi(dpi) + + # ... but use base DPI to convert to inch based dimensions. + width_in, height_in = figure.get_size_inches() + new_width_in = new_width_px / BASE_DPI + new_height_in = new_height_px / BASE_DPI + + # Try to determine if the figure had an explicit width or height set. + if width_in == DEFAULT_WIDTH_IN and height_in == DEFAULT_HEIGHT_IN: + # If default values are still set, apply new size, even if this + # resets the aspect ratio + width_in = new_width_in + height_in = new_height_in + else: + # Preserve the existing aspect ratio, constraining the scale + # based on the shorter dimension + if width_in < height_in: + height_in = height_in * (new_width_in / width_in) + width_in = new_width_in + else: + width_in = width_in * (new_height_in / height_in) + height_in = new_height_in + + figure.set_size_inches(width_in, height_in) + + format_dict, md_dict = format_display_data(figure, include=formats, exclude=[]) # type: ignore + + plt.close(figure) + + if was_interactive: + plt.ion() + + return (format_dict, md_dict) + + def _is_figure_empty(self, figure): + children = figure.get_children() + if len(children) < 1: + return True + + for child in children: + if child.get_visible(): + return False + + return True diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_comm.py new file mode 100644 index 00000000000..8cedb73df5f --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_comm.py @@ -0,0 +1,148 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from __future__ import annotations + +import enum +import logging +from typing import Callable, Generic, Optional, Type, TypeVar + +import comm + +from . import connections_comm, data_explorer_comm, help_comm, plot_comm, ui_comm, variables_comm +from ._vendor.pydantic import ValidationError +from ._vendor.pydantic.generics import GenericModel +from .utils import JsonData, JsonRecord + +logger = logging.getLogger(__name__) + + +## Create an enum of JSON-RPC error codes +@enum.unique +class JsonRpcErrorCode(enum.IntEnum): + PARSE_ERROR = -32700 + INVALID_REQUEST = -32600 + METHOD_NOT_FOUND = -32601 + INVALID_PARAMS = -32602 + INTERNAL_ERROR = -32603 + + +T_content = TypeVar( + "T_content", + data_explorer_comm.DataExplorerBackendMessageContent, + help_comm.HelpBackendMessageContent, + plot_comm.PlotBackendMessageContent, + variables_comm.VariablesBackendMessageContent, + ui_comm.UiBackendMessageContent, + connections_comm.ConnectionsBackendMessageContent, +) + + +class CommMessage(GenericModel, Generic[T_content]): + content: T_content + + +class PositronComm: + """A wrapper around a base IPython comm that provides a JSON-RPC interface""" + + def __init__(self, comm: comm.base_comm.BaseComm) -> None: + self.comm = comm + + def on_msg( + self, + callback: Callable[[CommMessage[T_content], JsonRecord], None], + content_cls: Type[T_content], + ) -> None: + """ + Register a callback for an RPC request from the frontend. + + Will be called with both the parsed `msg: CommMessage` and the original `raw_msg`. + + If the `raw_msg` could not be parsed, a JSON-RPC error will be sent to the frontend. + """ + + def handle_msg( + raw_msg: JsonRecord, + ) -> None: + try: + comm_msg = CommMessage[content_cls].parse_obj(raw_msg) + except ValidationError as exception: + # Check if the error is due to an unknown method + for error in exception.errors(): + # Since Pydantic doesn't support discriminated unions with a single type, + # we use a constant `method` in those cases, and have to check them separately. + if ( + # Comms with multiple backend request methods will have a discriminated_union error + error["loc"] == ("content", "data") + and error["type"] == "value_error.discriminated_union.invalid_discriminator" + and error["ctx"]["discriminator_key"] == "method" + ): + method = error["ctx"]["discriminator_value"] + self.send_error( + JsonRpcErrorCode.METHOD_NOT_FOUND, + f"Unknown method '{method}'", + ) + return + + elif ( + # Comms with a single backend request method will have a const error + error["loc"] == ("content", "data", "method") + and error["type"] == "value_error.const" + ): + method = error["ctx"]["given"] + self.send_error( + JsonRpcErrorCode.METHOD_NOT_FOUND, + f"Unknown method '{method}'", + ) + return + + self.send_error( + JsonRpcErrorCode.INVALID_REQUEST, + f"Invalid request: {exception}", + ) + return + + callback(comm_msg, raw_msg) + + self.comm.on_msg(handle_msg) + + def send_result(self, data: JsonData = None, metadata: Optional[JsonRecord] = None) -> None: + """Send a JSON-RPC result to the frontend-side version of this comm""" + result = dict( + jsonrpc="2.0", + result=data, + ) + self.comm.send( + data=result, + metadata=metadata, + buffers=None, + ) + + def send_event(self, name: str, payload: JsonRecord) -> None: + """Send a JSON-RPC notification (event) to the frontend-side version of this comm""" + event = dict( + jsonrpc="2.0", + method=name, + params=payload, + ) + self.comm.send(data=event) + + def send_error(self, code: JsonRpcErrorCode, message: Optional[str] = None) -> None: + """Send a JSON-RPC result to the frontend-side version of this comm""" + error = dict( + jsonrpc="2.0", + error=dict( + code=code.value, + message=message, + ), + ) + self.comm.send( + data=error, + metadata=None, + buffers=None, + ) + + def close(self) -> None: + """Close the underlying comm.""" + self.comm.close() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_ipkernel.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_ipkernel.py new file mode 100644 index 00000000000..8bc9fd8c293 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_ipkernel.py @@ -0,0 +1,418 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +""" Positron extensions to the iPython Kernel.""" +from __future__ import annotations + +import enum +import logging +import re +import warnings +from pathlib import Path +from typing import Any, Callable, Container, Dict, List, Optional, Type + +import traitlets +from ipykernel.comm.manager import CommManager +from ipykernel.ipkernel import IPythonKernel +from ipykernel.kernelapp import IPKernelApp +from ipykernel.zmqshell import ZMQDisplayPublisher, ZMQInteractiveShell +from IPython.core import oinspect, page +from IPython.core.interactiveshell import ExecutionInfo, InteractiveShell +from IPython.core.magic import Magics, MagicsManager, line_magic, magics_class, needs_local_scope +from IPython.utils import PyColorize + +from .connections import ConnectionsService +from .data_explorer import DataExplorerService +from .help import HelpService, help +from .lsp import LSPService +from .plots import PositronDisplayPublisherHook +from .ui import UiService +from .utils import JsonRecord +from .variables import VariablesService +from .widget import PositronWidgetHook + + +class _CommTarget(str, enum.Enum): + DataExplorer = "positron.dataExplorer" + Ui = "positron.ui" + Help = "positron.help" + Lsp = "positron.lsp" + Plot = "positron.plot" + Variables = "positron.variables" + Widget = "jupyter.widget" + Connections = "positron.connection" + + +logger = logging.getLogger(__name__) + + +class PositronIPythonInspector(oinspect.Inspector): + parent: PositronShell + + def pinfo( + self, + obj: Any, + oname: str = "", + formatter: Optional[Callable[[str], Dict[str, str]]] = None, + info: Optional[oinspect.OInfo] = None, + detail_level: int = 0, + enable_html_pager: bool = True, + omit_sections: Container[str] = (), + ) -> None: + kernel = self.parent.kernel + + # Intercept `%pinfo obj` / `obj?` calls, and instead use Positron's help service + if detail_level == 0: + kernel.help_service.show_help(obj) + return + + # For `%pinfo2 obj` / `obj??` calls, try to open an editor via Positron's UI service + fname = oinspect.find_file(obj) + + if fname is None: + # If we couldn't get a filename, fall back to the default implementation. + return super().pinfo( + obj, + oname, + formatter, + info, + detail_level, + enable_html_pager, + omit_sections, + ) + + # If we got a filename, try to get the line number and open an editor. + lineno = oinspect.find_source_lines(obj) or 0 + kernel.ui_service.open_editor(fname, lineno, 0) + + pinfo.__doc__ = oinspect.Inspector.pinfo.__doc__ + + +@magics_class +class PositronMagics(Magics): + shell: PositronShell + + @line_magic + def clear(self, line: str) -> None: # type: ignore reportIncompatibleMethodOverride + """Clear the console.""" + # Send a message to the frontend to clear the console. + self.shell.kernel.ui_service.clear_console() + + @needs_local_scope + @line_magic + def view(self, line: str, local_ns: Dict[str, Any]): + """View an object in the Positron Data Explorer.""" + try: + obj = local_ns[line] + except KeyError: # not in namespace + obj = eval(line, local_ns, local_ns) + + # Register a dataset with the dataviewer service. + self.shell.kernel.data_explorer_service.register_table(obj, line) + + @needs_local_scope + @line_magic + def connection_show(self, line: str, local_ns: Dict[str, Any]): + """Show a connection object in the Positron Connections Pane.""" + try: + obj = local_ns[line] + except KeyError: # not in namespace + obj = eval(line, local_ns, local_ns) + + self.shell.kernel.connections_service.register_connection(obj) + + +_traceback_file_link_re = re.compile(r"^(File \x1b\[\d+;\d+m)(.+):(\d+)") + + +class PositronShell(ZMQInteractiveShell): + kernel: PositronIPyKernel + object_info_string_level: int + magics_manager: MagicsManager + display_pub: ZMQDisplayPublisher + + inspector_class: Type[PositronIPythonInspector] = traitlets.Type( + PositronIPythonInspector, # type: ignore + help="Class to use to instantiate the shell inspector", + ).tag(config=True) + + def init_events(self) -> None: + super().init_events() + + # Register event handlers to poll the user's environment before and after each execution. + # Use pre/post_run_cell instead of pre/post_execute to only trigger on "interactive" + # executions i.e. by the user and not by the kernel. + # See: https://ipython.readthedocs.io/en/stable/config/callbacks.html. + self.events.register("pre_run_cell", self._handle_pre_run_cell) + self.events.register("post_run_cell", self._handle_post_run_cell) + + @traitlets.observe("colors") + def init_inspector(self, changes: Optional[traitlets.Bunch] = None) -> None: + # Override to pass `parent=self` to the inspector so that the inspector can send messages + # over the kernel's comms. + self.inspector = self.inspector_class( + oinspect.InspectColors, + PyColorize.ANSICodeColors, + self.colors, + self.object_info_string_level, + parent=self, + ) + + def init_hooks(self): + super().init_hooks() + + # For paged output, send display_data messages instead of using the legacy "payload" + # functionality of execute_reply messages. The priority of 90 is chosen arbitrarily, as long + # as its lower than other hooks registered by IPython and ipykernel. + self.set_hook("show_in_pager", page.as_hook(page.display_page), 90) + + def init_magics(self): + super().init_magics() + + # Register Positron's custom magics. + self.register_magics(PositronMagics) + + def init_user_ns(self): + super().init_user_ns() + + # Use Positron's help service + self.user_ns_hidden["help"] = help + self.user_ns["help"] = help + + # These variables are added to user_ns but not user_ns_hidden by ipython/ipykernel, fix that + self.user_ns_hidden.update( + { + "_exit_code": {}, + "__pydevd_ret_val_dict": {}, + "__warningregistry__": {}, + "__nonzero__": {}, + } + ) + + def _handle_pre_run_cell(self, info: ExecutionInfo) -> None: + """ + Prior to execution, reset the user environment watch state. + """ + try: + self.kernel.variables_service.snapshot_user_ns() + except Exception: + logger.warning("Failed to snapshot user namespace", exc_info=True) + + def _handle_post_run_cell(self, info: ExecutionInfo) -> None: + """ + After execution, sends an update message to the client to summarize + the changes observed to variables in the user's environment. + """ + # Check for changes to the working directory + try: + self.kernel.ui_service.poll_working_directory() + except Exception: + logger.exception("Error polling working directory") + + try: + self.kernel.variables_service.poll_variables() + except Exception: + logger.exception("Error polling variables") + + async def _stop(self): + # Initiate the kernel shutdown sequence. + await self.kernel.do_shutdown(restart=False) + + # Stop the main event loop. + self.kernel.io_loop.stop() + + def show_usage(self): + """Show a usage message""" + self.kernel.help_service.show_help("positron_ipykernel.utils.positron_ipykernel_usage") + + @traitlets.observe("exit_now") + def _update_exit_now(self, change): + """stop eventloop when exit_now fires""" + if change["new"]: + if hasattr(self.kernel, "io_loop"): + loop = self.kernel.io_loop + # --- Start Positron --- + # This is reached when a user types `quit` or `exit` into the Positron Console. + # Perform a full kernel shutdown sequence before stopping the loop. + # TODO: We'll need to update this once Positron has a way for kernels to kick off + # Positron's shutdown sequencing. Currently, this is seen as a kernel crash. + # See: https://github.com/posit-dev/positron/issues/628. + loop.call_later(0.1, self._stop) + # --- End Positron --- + if self.kernel.eventloop: + exit_hook = getattr(self.kernel.eventloop, "exit_hook", None) + if exit_hook: + exit_hook(self.kernel) + + def _showtraceback(self, etype, evalue: Exception, stb: List[str]): # type: ignore IPython type annotation is wrong + """ + Enhance tracebacks for the Positron frontend. + """ + # Remove the first two lines of the traceback, which are the "---" header and the repeated + # exception name and "Traceback (most recent call last)". + # Also remove the last line of the traceback, which repeats f"{etype}: {evalue}". + frames = stb[2:-1] + + # Replace file links in each frame's header with an OSC8 link to the file and line number. + new_frames = [] + for frame in frames: + lines = frame.split("\n") + # Add an OSC8 hyperlink to the frame header. + lines[0] = _traceback_file_link_re.sub(_add_osc8_link, lines[0]) + new_frames.append("\n".join(lines)) + + # Pop the first stack trace into evalue, so that it shows above the "Show Traceback" button + # in the Positron Console. + first_frame = new_frames.pop(0) if new_frames else "" + evalue_str = f"{evalue}\n{first_frame}" + + # The parent implementation actually expects evalue to be an Exception instance, but + # eventually calls str() on it. We're short-circuiting that by passing a string directly. + # It works for now but might not in future. + return super()._showtraceback(etype, evalue_str, new_frames) # type: ignore IPython type annotation is wrong + + +def _add_osc8_link(match: re.Match) -> str: + """ + Convert a link matched by `_traceback_file_link_re` to an OSC8 link. + """ + pre, path, line = match.groups() + abs_path = Path(path).expanduser() + try: + uri = abs_path.as_uri() + except ValueError: + # The path might be like '' which raises a ValueError on as_uri(). + return match.group(0) + return pre + _link(uri, f"{path}:{line}", {"line": line}) + + +class PositronIPyKernel(IPythonKernel): + """ + Positron extension of IPythonKernel. + + Adds additional comms to introspect the user's environment. + """ + + execution_count: int # type: ignore reportIncompatibleMethodOverride + shell: PositronShell + comm_manager: CommManager + + # Use the PositronShell class. + shell_class: PositronShell = traitlets.Type( + PositronShell, # type: ignore + klass=InteractiveShell, + ) + + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + # Create Positron services + self.data_explorer_service = DataExplorerService(_CommTarget.DataExplorer) + self.display_pub_hook = PositronDisplayPublisherHook(_CommTarget.Plot) + self.ui_service = UiService() + self.help_service = HelpService() + self.lsp_service = LSPService(self) + self.variables_service = VariablesService(self) + self.widget_hook = PositronWidgetHook(_CommTarget.Widget, self.comm_manager) + self.connections_service = ConnectionsService(self, _CommTarget.Connections) + + # Register comm targets + self.comm_manager.register_target(_CommTarget.Lsp, self.lsp_service.on_comm_open) + self.comm_manager.register_target(_CommTarget.Ui, self.ui_service.on_comm_open) + self.comm_manager.register_target(_CommTarget.Help, self.help_service.on_comm_open) + self.comm_manager.register_target( + _CommTarget.Variables, self.variables_service.on_comm_open + ) + # Register display publisher hooks + self.shell.display_pub.register_hook(self.display_pub_hook) + self.shell.display_pub.register_hook(self.widget_hook) + + # Ignore warnings that the user can't do anything about + warnings.filterwarnings( + "ignore", + category=UserWarning, + message="Matplotlib is currently using module://matplotlib_inline.backend_inline", + ) + # Trying to import a module that's "auto-imported" by Jedi shows a warning in the Positron + # Console. + warnings.filterwarnings( + "ignore", + category=UserWarning, + message=r"Module [^\s]+ not importable in path", + module="jedi", + ) + + def publish_execute_input( + self, + code: str, + parent: JsonRecord, + ) -> None: + self._publish_execute_input(code, parent, self.execution_count - 1) + + def start(self) -> None: + super().start() + + # Start Positron services + self.help_service.start() + + async def do_shutdown(self, restart: bool) -> JsonRecord: # type: ignore ReportIncompatibleMethodOverride + """ + Handle kernel shutdown. + """ + logger.info("Shutting down the kernel") + + # Shutdown Positron services + self.data_explorer_service.shutdown() + self.display_pub_hook.shutdown() + self.ui_service.shutdown() + self.help_service.shutdown() + self.lsp_service.shutdown() + self.widget_hook.shutdown() + await self.variables_service.shutdown() + self.connections_service.shutdown() + + # We don't call super().do_shutdown since it sets shell.exit_now = True which tries to + # stop the event loop at the same time as self.shutdown_request (since self.shell_stream.io_loop + # points to the same underlying asyncio loop). + return dict(status="ok", restart=restart) + + +class PositronIPKernelApp(IPKernelApp): + # Use the PositronIPyKernel class. + kernel_class: Type[PositronIPyKernel] = traitlets.Type(PositronIPyKernel) # type: ignore + + +# +# OSC8 functionality +# +# See https://iterm2.com/3.2/documentation-escape-codes.html for a description. +# + +# Define a few OSC8 excape codes for convenience. +_ESC = "\x1b" +_OSC = _ESC + "]" +_OSC8 = _OSC + "8" +_ST = _ESC + "\\" + + +def _start_hyperlink(uri: str = "", params: Dict[str, str] = {}) -> str: + """ + Start sequence for a hyperlink. + """ + params_str = ":".join(f"{key}={value}" for key, value in params.items()) + return ";".join([_OSC8, params_str, uri]) + _ST + + +def _end_hyperlink() -> str: + """ + End sequence for a hyperlink. + """ + return _start_hyperlink() + + +def _link(uri: str, label: str, params: Dict[str, str] = {}) -> str: + """ + Create a hyperlink with the given label, URI, and params. + """ + return _start_hyperlink(uri, params) + label + _end_hyperlink() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_jedilsp.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_jedilsp.py new file mode 100644 index 00000000000..410ca9277c9 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/positron_jedilsp.py @@ -0,0 +1,704 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import asyncio +import enum +import logging +import re +import threading +from functools import lru_cache +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union, cast + +from comm.base_comm import BaseComm + +from ._vendor import attrs +from ._vendor.jedi.api import Interpreter, Project +from ._vendor.jedi_language_server import jedi_utils, pygls_utils +from ._vendor.jedi_language_server.server import ( + JediLanguageServer, + JediLanguageServerProtocol, + _choose_markup, + completion_item_resolve, + definition, + did_change_configuration, + did_close_diagnostics, + document_symbol, + highlight, + hover, + references, + rename, + signature_help, + type_definition, + workspace_symbol, +) +from ._vendor.lsprotocol.types import ( + COMPLETION_ITEM_RESOLVE, + TEXT_DOCUMENT_CODE_ACTION, + TEXT_DOCUMENT_COMPLETION, + TEXT_DOCUMENT_DEFINITION, + TEXT_DOCUMENT_DID_CHANGE, + TEXT_DOCUMENT_DID_CLOSE, + TEXT_DOCUMENT_DID_OPEN, + TEXT_DOCUMENT_DID_SAVE, + TEXT_DOCUMENT_DOCUMENT_HIGHLIGHT, + TEXT_DOCUMENT_DOCUMENT_SYMBOL, + TEXT_DOCUMENT_HOVER, + TEXT_DOCUMENT_REFERENCES, + TEXT_DOCUMENT_RENAME, + TEXT_DOCUMENT_SIGNATURE_HELP, + TEXT_DOCUMENT_TYPE_DEFINITION, + WORKSPACE_DID_CHANGE_CONFIGURATION, + WORKSPACE_SYMBOL, + CodeAction, + CodeActionKind, + CodeActionOptions, + CodeActionParams, + CompletionItem, + CompletionItemKind, + CompletionList, + CompletionOptions, + CompletionParams, + DidChangeConfigurationParams, + DidChangeTextDocumentParams, + DidCloseTextDocumentParams, + DidOpenTextDocumentParams, + DidSaveTextDocumentParams, + DocumentHighlight, + DocumentSymbol, + DocumentSymbolParams, + Hover, + InsertTextFormat, + Location, + MarkupContent, + MarkupKind, + Position, + RenameParams, + SignatureHelp, + SignatureHelpOptions, + SymbolInformation, + TextDocumentIdentifier, + TextDocumentPositionParams, + WorkspaceEdit, + WorkspaceSymbolParams, +) +from ._vendor.pygls.capabilities import get_capability +from ._vendor.pygls.feature_manager import has_ls_param_or_annotation +from ._vendor.pygls.workspace.text_document import TextDocument +from .help_comm import ShowHelpTopicParams +from .inspectors import BaseColumnInspector, BaseTableInspector, get_inspector +from .jedi import PositronInterpreter, get_python_object + +if TYPE_CHECKING: + from .positron_ipkernel import PositronShell + + +logger = logging.getLogger(__name__) + +_LINE_MAGIC_PREFIX = "%" +_CELL_MAGIC_PREFIX = "%%" +_HELP_TOPIC = "positron/textDocument/helpTopic" + + +@enum.unique +class _MagicType(str, enum.Enum): + cell = "cell" + line = "line" + + +@attrs.define +class HelpTopicParams: + text_document: TextDocumentIdentifier = attrs.field() + position: "Position" = attrs.field() + + +@attrs.define +class HelpTopicRequest: + id: Union[int, str] = attrs.field() + params: HelpTopicParams = attrs.field() + method: str = _HELP_TOPIC + jsonrpc: str = attrs.field(default="2.0") + + +class PositronJediLanguageServerProtocol(JediLanguageServerProtocol): + @lru_cache() + def get_message_type(self, method: str) -> Optional[Type]: + # Overriden to include custom Positron LSP messages. + # Doing so ensures that the corresponding feature function receives `params` of the correct type. + if method == _HELP_TOPIC: + return HelpTopicRequest + return super().get_message_type(method) + + +class PositronJediLanguageServer(JediLanguageServer): + """Positron extension to the Jedi language server.""" + + loop: asyncio.AbstractEventLoop + lsp: PositronJediLanguageServerProtocol # type: ignore reportIncompatibleVariableOverride + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + # LSP comm used to notify the frontend when the server is ready + self._comm: Optional[BaseComm] = None + + # Reference to the user's namespace set on server start + self.shell: Optional["PositronShell"] = None + + # The LSP server is started in a separate thread + self._server_thread: Optional[threading.Thread] = None + + # Enable asyncio debug mode in the event loop + self._debug = False + + def feature(self, feature_name: str, options: Optional[Any] = None) -> Callable: + def decorator(f): + # Unfortunately Jedi doesn't handle subclassing of the LSP, so we + # need to detect and reject features we did not register. + if not has_ls_param_or_annotation(f, type(self)): + return None + + """(Re-)register a feature with the LSP.""" + lsp = self.lsp + + if feature_name in lsp.fm.features: + del lsp.fm.features[feature_name] + if feature_name in lsp.fm.feature_options: + del lsp.fm.feature_options[feature_name] + + return lsp.fm.feature(feature_name, options)(f) + + return decorator + + def start_tcp(self, host: str, port: int) -> None: + """Starts TCP server.""" + logger.info("Starting TCP server on %s:%s", host, port) + + # Set the event loop's debug mode. + self.loop.set_debug(self._debug) + + # Use our event loop as the thread's main event loop. + asyncio.set_event_loop(self.loop) + + self._stop_event = threading.Event() + self._server = self.loop.run_until_complete(self.loop.create_server(self.lsp, host, port)) + + # Notify the frontend that the LSP server is ready + if self._comm is None: + logger.warning("LSP comm was not set, could not send server_started message") + else: + logger.info("LSP server is ready, sending server_started message") + self._comm.send({"msg_type": "server_started", "content": {}}) + + # Run the event loop until the stop event is set. + try: + while not self._stop_event.is_set(): + self.loop.run_until_complete(asyncio.sleep(1)) + except (KeyboardInterrupt, SystemExit): + pass + finally: + self.shutdown() + + def start(self, lsp_host: str, lsp_port: int, shell: "PositronShell", comm: BaseComm) -> None: + """ + Start the LSP with a reference to Positron's IPyKernel to enhance + completions with awareness of live variables from user's namespace. + """ + # Give the LSP server access to the LSP comm to notify the frontend when the server is ready + self._comm = comm + + # Give the LSP server access to the kernel to enhance completions with live variables + self.shell = shell + + if self._server_thread is not None: + logger.warning("LSP server thread was not properly shutdown") + return + + # Start Jedi LSP as an asyncio TCP server in a separate thread. + logger.info("Starting LSP server thread") + self._server_thread = threading.Thread( + target=self.start_tcp, args=(lsp_host, lsp_port), name="LSPServerThread" + ) + self._server_thread.start() + + def shutdown(self) -> None: + logger.info("Shutting down LSP server thread") + + # Below is taken as-is from pygls.server.Server.shutdown to remove awaiting + # server.wait_closed since it is a no-op if called after server.close in <=3.11 and blocks + # forever in >=3.12. See: https://github.com/python/cpython/issues/79033 for more. + if self._stop_event is not None: + self._stop_event.set() + + if self._thread_pool: + self._thread_pool.terminate() + self._thread_pool.join() + + if self._thread_pool_executor: + self._thread_pool_executor.shutdown() + + if self._server: + self._server.close() + # This is where we should wait for the server to close but don't due to the issue + # described above. + + # Reset the loop and thread reference to allow starting a new server in the same process, + # e.g. when a browser-based Positron is refreshed. + if not self.loop.is_closed(): + self.loop.close() + + self.loop = asyncio.new_event_loop() + self._server_thread = None + + def stop(self) -> None: + """Notify the LSP server thread to stop from another thread.""" + if self._stop_event is None: + logger.warning("Cannot stop the LSP server thread, it was not started") + return + + self._stop_event.set() + + def set_debug(self, debug: bool) -> None: + self._debug = debug + + +POSITRON = PositronJediLanguageServer( + name="jedi-language-server", + version="0.18.2", + protocol_cls=PositronJediLanguageServerProtocol, + # Provide an event loop, else the pygls Server base class sets its own event loop as the main + # event loop, which we use to run the kernel. + loop=asyncio.new_event_loop(), +) + +_MAGIC_COMPLETIONS: Dict[str, Any] = {} + + +# Server Features +# Unfortunately we need to re-register these as Pygls Feature Management does +# not support subclassing of the LSP, and Jedi did not use the expected "ls" +# name for the LSP server parameter in the feature registration methods. + + +@POSITRON.feature( + TEXT_DOCUMENT_COMPLETION, + CompletionOptions(trigger_characters=[".", "'", '"', "%"], resolve_provider=True), +) +def positron_completion( + server: PositronJediLanguageServer, params: CompletionParams +) -> Optional[CompletionList]: + """ + Completion feature. + """ + # pylint: disable=too-many-locals + snippet_disable = server.initialization_options.completion.disable_snippets + resolve_eagerly = server.initialization_options.completion.resolve_eagerly + ignore_patterns = server.initialization_options.completion.ignore_patterns + document = server.workspace.get_document(params.text_document.uri) + + # --- Start Positron --- + # Don't complete comments or shell commands + line = document.lines[params.position.line] if document.lines else "" + trimmed_line = line.lstrip() + if trimmed_line.startswith(("#", "!")): + return None + + # Use Interpreter instead of Script to include the shell's namespaces in completions + jedi_script = interpreter(server.project, document, server.shell) + + # --- End Positron --- + + try: + jedi_lines = jedi_utils.line_column(params.position) + completions_jedi_raw = jedi_script.complete(*jedi_lines) + if not ignore_patterns: + # A performance optimization. ignore_patterns should usually be empty; + # this special case avoid repeated filter checks for the usual case. + completions_jedi = (comp for comp in completions_jedi_raw) + else: + completions_jedi = ( + comp + for comp in completions_jedi_raw + if not any(i.match(comp.name) for i in ignore_patterns) + ) + snippet_support = get_capability( + server.client_capabilities, + "text_document.completion.completion_item.snippet_support", + False, + ) + markup_kind = _choose_markup(server) + is_import_context = jedi_utils.is_import( + script_=jedi_script, + line=jedi_lines[0], + column=jedi_lines[1], + ) + enable_snippets = snippet_support and not snippet_disable and not is_import_context + char_before_cursor = pygls_utils.char_before_cursor( + document=server.workspace.get_document(params.text_document.uri), + position=params.position, + ) + jedi_utils.clear_completions_cache() + + # --- Start Positron --- + _MAGIC_COMPLETIONS.clear() + + completion_items = [] + + # Don't add jedi completions if completing an explicit magic command + if not trimmed_line.startswith(_LINE_MAGIC_PREFIX): + jedi_completion_items = [ + jedi_utils.lsp_completion_item( + completion=completion, + char_before_cursor=char_before_cursor, + enable_snippets=enable_snippets, + resolve_eagerly=resolve_eagerly, + markup_kind=markup_kind, + sort_append_text=completion.name, + ) + for completion in completions_jedi + ] + completion_items.extend(jedi_completion_items) + + # Don't add magic completions if: + # - completing an object's attributes e.g `numpy.` + is_completing_attribute = "." in trimmed_line + # - or if the trimmed line has additional whitespace characters e.g `if ` + has_whitespace = " " in trimmed_line + # - of if the trimmed line has a string, typically for dict completion e.g. `x['` + has_string = '"' in trimmed_line or "'" in trimmed_line + exclude_magics = is_completing_attribute or has_whitespace or has_string + if server.shell is not None and not exclude_magics: + magic_commands = cast( + Dict[str, Dict[str, Callable]], server.shell.magics_manager.lsmagic() + ) + + chars_before_cursor = trimmed_line[: params.position.character] + + # TODO: In future we may want to support enable_snippets and ignore_pattern options + # for magic completions. + + # Add cell magic completion items + cell_magic_completion_items = [ + _magic_completion_item( + name=name, + magic_type=_MagicType.cell, + chars_before_cursor=chars_before_cursor, + func=func, + ) + for name, func in magic_commands[_MagicType.cell].items() + ] + completion_items.extend(cell_magic_completion_items) + + # Add line magic completion only if not completing an explicit cell magic + if not trimmed_line.startswith(_CELL_MAGIC_PREFIX): + line_magic_completion_items = [ + _magic_completion_item( + name=name, + magic_type=_MagicType.line, + chars_before_cursor=chars_before_cursor, + func=func, + ) + for name, func in magic_commands[_MagicType.line].items() + ] + completion_items.extend(line_magic_completion_items) + + # --- End Positron --- + except ValueError: + # Ignore LSP errors for completions from invalid line/column ranges. + logger.info("LSP completion error", exc_info=True) + completion_items = [] + + return CompletionList(is_incomplete=False, items=completion_items) if completion_items else None + + +def _magic_completion_item( + name: str, + magic_type: _MagicType, + chars_before_cursor: str, + func: Callable, +) -> CompletionItem: + """ + Create a completion item for a magic command. + + See `jedi_utils.lsp_completion_item` for reference. + """ + # Get the appropriate prefix for the magic type + if magic_type == _MagicType.line: + prefix = _LINE_MAGIC_PREFIX + elif magic_type == _MagicType.cell: + prefix = _CELL_MAGIC_PREFIX + else: + raise AssertionError(f"Invalid magic type: {magic_type}") + + # Determine insert_text. This is slightly tricky since we may have to strip leading '%'s + + # 1. Find the last group of non-whitespace characters before the cursor + m1 = re.search(r"\s*([^\s]*)$", chars_before_cursor) + assert m1, f"Regex should always match. chars_before_cursor: {chars_before_cursor}" + text = m1.group(1) + + # 2. Get the leading '%'s + m2 = re.match("^(%*)", text) + assert m2, f"Regex should always match. text: {text}" + + # 3. Pad the name with '%'s to match the expected prefix so that e.g. both `bash` and + # `%bash` complete to `%%bash` + count = len(m2.group(1)) + pad_count = max(0, len(prefix) - count) + insert_text = prefix[0] * pad_count + name + + label = prefix + name + + _MAGIC_COMPLETIONS[label] = (f"{magic_type.value} magic {name}", func.__doc__) + + return CompletionItem( + label=label, + filter_text=name, + kind=CompletionItemKind.Function, + # Prefix sort_text with 'v', which ensures that it is ordered as an ordinary item + # See jedi_language_server.jedi_utils.complete_sort_name for reference + sort_text=f"v{name}", + insert_text=insert_text, + insert_text_format=InsertTextFormat.PlainText, + ) + + +@POSITRON.feature(COMPLETION_ITEM_RESOLVE) +def positron_completion_item_resolve( + server: PositronJediLanguageServer, params: CompletionItem +) -> CompletionItem: + # --- Start Positron --- + magic_completion = _MAGIC_COMPLETIONS.get(params.label) + if magic_completion is not None: + params.detail, params.documentation = magic_completion + return params + + # Try to include extra information for objects in the user's namespace e.g. dataframes and columns. + completion = jedi_utils._MOST_RECENT_COMPLETIONS[params.label] + obj, is_found = get_python_object(completion) + if is_found: + inspector = get_inspector(obj) + if isinstance(inspector, (BaseColumnInspector, BaseTableInspector)): + params.detail = inspector.get_display_type() + + markup_kind = _choose_markup(server) + # TODO: We may want to use get_display_value when we update inspectors to return + # multiline display values once Positron supports it. + doc = str(obj) + if markup_kind == MarkupKind.Markdown: + doc = f"```text\n{doc}\n```" + params.documentation = MarkupContent(kind=markup_kind, value=doc) + return params + # --- End Positron --- + return completion_item_resolve(server, params) + + +@POSITRON.feature( + TEXT_DOCUMENT_SIGNATURE_HELP, + SignatureHelpOptions(trigger_characters=["(", ","]), +) +def positron_signature_help( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[SignatureHelp]: + return signature_help(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DEFINITION) +def positron_definition( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[List[Location]]: + return definition(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_TYPE_DEFINITION) +def positron_type_definition( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[List[Location]]: + return type_definition(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DOCUMENT_HIGHLIGHT) +def positron_highlight( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[List[DocumentHighlight]]: + return highlight(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_HOVER) +def positron_hover( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[Hover]: + try: + return hover(server, params) + except ValueError: + # Ignore LSP errors for hover over invalid line/column ranges. + logger.info("LSP hover error", exc_info=True) + + return None + + +@POSITRON.feature(TEXT_DOCUMENT_REFERENCES) +def positron_references( + server: PositronJediLanguageServer, params: TextDocumentPositionParams +) -> Optional[List[Location]]: + return references(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DOCUMENT_SYMBOL) +def positron_document_symbol( + server: PositronJediLanguageServer, params: DocumentSymbolParams +) -> Optional[Union[List[DocumentSymbol], List[SymbolInformation]]]: + return document_symbol(server, params) + + +@POSITRON.feature(WORKSPACE_SYMBOL) +def positron_workspace_symbol( + server: PositronJediLanguageServer, params: WorkspaceSymbolParams +) -> Optional[List[SymbolInformation]]: + return workspace_symbol(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_RENAME) +def positron_rename( + server: PositronJediLanguageServer, params: RenameParams +) -> Optional[WorkspaceEdit]: + return rename(server, params) + + +@POSITRON.feature(_HELP_TOPIC) +def positron_help_topic_request( + server: PositronJediLanguageServer, params: HelpTopicParams +) -> Optional[ShowHelpTopicParams]: + """Return topic to display in Help pane""" + document = server.workspace.get_document(params.text_document.uri) + jedi_script = interpreter(server.project, document, server.shell) + jedi_lines = jedi_utils.line_column(params.position) + names = jedi_script.infer(*jedi_lines) + + try: + # if something is found, infer will pass back a list of Name objects + # but the len is always 1 + topic = names[0].full_name + except IndexError: + logger.warning(f"Could not find help topic for request: {params}") + return None + else: + logger.info(f"Help topic found: {topic}") + return ShowHelpTopicParams(topic=topic) + + +@POSITRON.feature( + TEXT_DOCUMENT_CODE_ACTION, + CodeActionOptions( + code_action_kinds=[ + CodeActionKind.RefactorInline, + CodeActionKind.RefactorExtract, + ], + ), +) +def positron_code_action( + server: PositronJediLanguageServer, params: CodeActionParams +) -> Optional[List[CodeAction]]: + # Code Actions are currently causing the kernel process to hang in certain cases, for example, + # when the document contains `from fastai.vision.all import *`. Temporarily disable these + # until we figure out the underlying issue. + + # try: + # return code_action(server, params) + # except ValueError: + # # Ignore LSP errors for actions with invalid line/column ranges. + # logger.info("LSP codeAction error", exc_info=True) + + return None + + +@POSITRON.feature(WORKSPACE_DID_CHANGE_CONFIGURATION) +def positron_did_change_configuration( + server: PositronJediLanguageServer, # pylint: disable=unused-argument + params: DidChangeConfigurationParams, # pylint: disable=unused-argument +) -> None: + return did_change_configuration(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DID_SAVE) +def positron_did_save_diagnostics( + server: PositronJediLanguageServer, params: DidSaveTextDocumentParams +) -> None: + return did_save_diagnostics(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DID_CHANGE) +def positron_did_change_diagnostics( + server: PositronJediLanguageServer, params: DidChangeTextDocumentParams +) -> None: + return did_change_diagnostics(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DID_OPEN) +def positron_did_open_diagnostics( + server: PositronJediLanguageServer, params: DidOpenTextDocumentParams +) -> None: + return did_open_diagnostics(server, params) + + +@POSITRON.feature(TEXT_DOCUMENT_DID_CLOSE) +def positron_did_close_diagnostics( + server: PositronJediLanguageServer, params: DidCloseTextDocumentParams +) -> None: + return did_close_diagnostics(server, params) + + +# Copied from jedi_language_server/server.py to handle exceptions. Exceptions should be handled by +# pygls, but the debounce decorator causes the function to run in a separate thread thus a separate +# stack from pygls' exception handler. +@jedi_utils.debounce(1, keyed_by="uri") # type: ignore - pyright bug +def _publish_diagnostics(server: JediLanguageServer, uri: str) -> None: + """Helper function to publish diagnostics for a file.""" + # The debounce decorator delays the execution by 1 second + # canceling notifications that happen in that interval. + # Since this function is executed after a delay, we need to check + # whether the document still exists + if uri not in server.workspace.documents: + return + + doc = server.workspace.get_document(uri) + + # --- Start Positron --- + try: + diagnostic = jedi_utils.lsp_python_diagnostic(uri, doc.source) + except Exception: + logger.exception(f"Failed to publish diagnostics for uri {uri}", exc_info=True) + diagnostic = None + # --- End Positron --- + + diagnostics = [diagnostic] if diagnostic else [] + + server.publish_diagnostics(uri, diagnostics) + + +def did_save_diagnostics(server: JediLanguageServer, params: DidSaveTextDocumentParams) -> None: + """Actions run on textDocument/didSave: diagnostics.""" + _publish_diagnostics(server, params.text_document.uri) # type: ignore - pyright bug + + +def did_change_diagnostics(server: JediLanguageServer, params: DidChangeTextDocumentParams) -> None: + """Actions run on textDocument/didChange: diagnostics.""" + _publish_diagnostics(server, params.text_document.uri) # type: ignore - pyright bug + + +def did_open_diagnostics(server: JediLanguageServer, params: DidOpenTextDocumentParams) -> None: + """Actions run on textDocument/didOpen: diagnostics.""" + _publish_diagnostics(server, params.text_document.uri) # type: ignore - pyright bug + + +def interpreter( + project: Optional[Project], document: TextDocument, shell: Optional["PositronShell"] +) -> Interpreter: + """ + Return a `jedi.Interpreter` with a reference to the shell's user namespace. + """ + namespaces: List[Dict[str, Any]] = [] + if shell is not None: + namespaces.append(shell.user_ns) + + return PositronInterpreter(document.source, namespaces, path=document.path, project=project) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/pydoc.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/pydoc.py new file mode 100644 index 00000000000..d947a14c830 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/pydoc.py @@ -0,0 +1,1007 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from __future__ import annotations + +import importlib.metadata +import inspect +import io +import logging +import os +import pkgutil +import pydoc +import re +import sys +import warnings +from dataclasses import dataclass +from functools import partial +from pydoc import _is_bound_method # type: ignore +from pydoc import ModuleScanner, describe, isdata, locate, visiblename +from traceback import format_exception_only +from types import ModuleType +from typing import Any, Dict, List, Optional, Type, cast + +from ._vendor.markdown_it import MarkdownIt +from ._vendor.pygments import highlight +from ._vendor.pygments.formatters.html import HtmlFormatter +from ._vendor.pygments.lexers import get_lexer_by_name +from ._vendor.pygments.util import ClassNotFound +from .docstrings import convert_docstring +from .utils import get_module_name, is_numpy_ufunc + +logger = logging.getLogger(__name__) + + +def _compact_signature(obj: Any, name="", max_chars=45) -> Optional[str]: + """ + Produce a compact signature for a callable object. + + This was written to match signatures in class attribute lists in the pandas documentation, + for example: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame + + Example + ------- + + >>> def foo(a, b, c=1, *args, **kwargs): + ... pass + >>> compact_signature(foo) + '(a, b, c=1, *args, **kwargs)' + + Returns `None` for uncallable objects. + + >>> compact_signature(1) + None + """ + try: + signature = inspect.signature(obj) + except (TypeError, ValueError): + # TODO: Try falling back to getting the signature from the docstring e.g. `numpy.array` + return None + + seen_optionals = False + seen_keyword_only = False + + def _stringify(args): + # Convert a list of arg strings to a single signature, adding brackets as needed + nonlocal seen_optionals + result = ", ".join(args) + if seen_optionals: + result += "]" + return f"({result})" + + args = [] + for name, param in signature.parameters.items(): + if name == "self": + continue + + # Is it the first keyword-only argument? + elif not seen_keyword_only and param.kind is param.KEYWORD_ONLY: + seen_keyword_only = True + args.append("*") + + # Is it variadic? + elif param.kind is param.VAR_POSITIONAL: + seen_keyword_only = True + elif param.kind is param.VAR_KEYWORD: + seen_keyword_only = True + + arg = str(param.replace(annotation=param.empty, default=param.empty)) + + # Is it the first optional argument? + if not seen_optionals and param.default is not param.empty: + seen_optionals = True + if args: + args[-1] += "[" + else: + arg = f"[{arg}" + + args.append(arg) + + # Check if we should truncate the remaining args + result = _stringify(args) + if len(name + result) > max_chars: + # Replace the last arg with an ellipsis + args.pop() + args.append("...") + break + + result = _stringify(args) + return result + + +def _untyped_signature(obj: Any) -> Optional[str]: + """ + Produce a signature for a callable object, with all annotations removed. + + This was written to match signatures in the header of pages for callables in the pandas + documentation, for example: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame + + Example + ------- + + >>> def foo(a: int, b: str, c=1, *args, **kwargs) -> None: + ... pass + >>> untyped_signature(foo) + '(a, b, c=1, *args, **kwargs) -> None' + """ + try: + signature = inspect.signature(obj) + except (ValueError, TypeError): + # TODO: Try falling back to getting the signature from the docstring e.g. `numpy.array` + return None + + untyped_params = [ + param.replace(annotation=param.empty) + for name, param in signature.parameters.items() + if name != "self" + ] + signature = signature.replace(parameters=untyped_params, return_annotation=signature.empty) + result = str(signature) + return result + + +def _get_summary(object: Any) -> Optional[str]: + """ + Get the one-line summary from the docstring of an object. + """ + doc = _pydoc_getdoc(object) + return doc.split("\n\n", 1)[0] + + +def _tabulate_attrs(attrs: List[_Attr], cls_name: Optional[str] = None) -> List[str]: + """ + Create an HTML table of attribute signatures and summaries. + """ + result = [] + # "autosummary" refers to the Sphinx extension that this is based on + result.append('') + result.append("") + for attr in attrs: + _cls_name = cls_name or attr.cls.__name__ + full_name = f"{_cls_name}.{attr.name}" + argspec = _compact_signature(attr.value, attr.name) or "" + link = f'{attr.name}{argspec}' + summary = _get_summary(attr.value) or "" + row_lines = [ + "", + "", + "", + "", + ] + result.extend(row_lines) + result.append("") + result.append("
", + link, + "", + summary, + "
") + return result + + +# as-is from pydoc 3.11 +# --- Start Positron --- +class PositronHelper(pydoc.Helper): + # --- End Positron --- + def _gettopic(self, topic, more_xrefs=""): + """Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + """ + try: + import pydoc_data.topics + except ImportError: + return ( + """ +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +""", + "", + ) + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + # --- Start Positron --- + raise ValueError(f"No help found for topic: {topic}.") + # --- End Positron --- + if isinstance(target, str): + return self._gettopic(target, more_xrefs) + label, xrefs = target + doc = pydoc_data.topics.topics[label] + if more_xrefs: + xrefs = (xrefs or "") + " " + more_xrefs + return doc, xrefs + + +@dataclass +class _Attr: + name: str + cls: Any + value: Any + + +class _PositronHTMLDoc(pydoc.HTMLDoc): + def document(self, object: Any, *args: Any): + # Handle numpy ufuncs, which don't return True for `inspect.isroutine` but which we still + # want to document as routines. + if is_numpy_ufunc(object): + return self.docroutine(object, *args) + + return super().document(object, *args) + + def page(self, title, contents): + """Format an HTML page.""" + # --- Start Positron --- + # moved from _HTMLDoc class in pydoc._url_handler + # update path for positron file system + css_path = "_pydoc.css" + + css_link = '' % css_path + + # removed html_navbar() for aesthetics + return """\ + + + + +Pydoc: %s +%s%s +""" % ( + title, + css_link, + contents, + ) + # --- End Positron --- + + def heading(self, title: str, extras="") -> str: # type: ignore ReportIncompatibleMethodOverride + """Format a page heading.""" + # Simplified version of pydoc.HTMLDoc.heading that doesn't use tables + lines = [f"

{title}

"] + if extras: + lines.append(extras) + result = "\n".join(lines) + return result + + def section( # type: ignore ReportIncompatibleMethodOverride + self, + title: str, + cls: str, + contents: str, + width=None, + prelude="", + marginalia=None, + gap=None, + ) -> str: + """Format a section with a heading.""" + # Simplified version of pydoc.HTMLDoc.section that doesn't use tables + if width is not None: + logger.debug(f"Ignoring width: {width}") + + if marginalia: + logger.debug(f"Ignoring marginalia: {marginalia}") + + if gap: + logger.debug(f"Ignoring gap: {gap}") + + lines = [ + f'
', + f"

{title}

", + ] + if prelude: + lines.append(prelude) + lines.append(contents) + lines.append("
") + result = "\n".join(lines) + return result + + def bigsection(self, *args): + # This no longer does anything on top of `section`, we keep it for compatibility with pydoc + return self.section(*args) + + # Heavily customized version of pydoc.HTMLDoc.docmodule + def docmodule(self, object: ModuleType, *_): # type: ignore reportIncompatibleMethodOverride + obj_name = object.__name__ + + # Create the heading, with links to each parent module + parts = obj_name.split(".") + links = [] + for i in range(len(parts) - 1): + url = ".".join(parts[: i + 1]) + ".html" + links.append(f'{parts[i]}') + linkedname = ".".join(links + parts[-1:]) + head = linkedname + + pkg_version = "" + if hasattr(object, "__version__"): + pkg_version = self._version_text(str(object.__version__)) + + # TODO: Re-enable once file links actually work in the Positron Help pane + # Add a link to the module file + # try: + # path = inspect.getabsfile(object) + # except TypeError: + # filelink = "(built-in)" + # else: + # url = urllib.parse.quote(path) + # filelink = f'[source]' + + result = pkg_version + self.heading(title=head) + + # Separate the module's members into modules, classes, functions, and data. + # Respect the module's __all__ attribute if it exists. + all = getattr(object, "__all__", None) + modules = [] + classes = [] + funcs = [] + data = [] + for name, value in inspect.getmembers(object): + if not visiblename(name, all, object): + continue + + attr = _Attr(name=name, cls=object, value=value) + + if inspect.ismodule(value): + modules.append(attr) + elif inspect.isclass(value): + classes.append(attr) + elif inspect.isroutine(value): + funcs.append(attr) + elif isdata(value): + data.append(attr) + + # Add the module's parsed docstring to the page + doc = _getdoc(object) + result += doc + + # Add the module's members to the page + if modules: + contents = _tabulate_attrs(modules, obj_name) + result += self.bigsection("Modules", "modules", "\n".join(contents)) + + if classes: + contents = _tabulate_attrs(classes, obj_name) + result += self.bigsection("Classes", "classes", "\n".join(contents)) + + if funcs: + contents = _tabulate_attrs(funcs, obj_name) + result += self.bigsection("Functions", "functions", "\n".join(contents)) + + if data: + contents = _tabulate_attrs(data, obj_name) + result += self.bigsection("Data", "data", "\n".join(contents)) + + return result + + # Heavily customized version of pydoc.HTMLDoc.docclass + def docclass(self, obj: Type, name=None, *_): # type: ignore reportIncompatibleMethodOverride + obj_name = name or obj.__name__ + + # Separate the class's members into attributes and methods + attributes = [] + methods = [] + for name, value in inspect.getmembers(obj): + if name.startswith("_"): + continue + + attr = _Attr(name=name, cls=obj, value=value) + + if callable(value): + methods.append(attr) + else: + attributes.append(attr) + + match = re.search(r"^([^.]*)\.", obj_name) + pkg_version = "" + + if match: + try: + pkg_version = importlib.metadata.version(match.group(1)) # type: ignore + except importlib.metadata.PackageNotFoundError: # type: ignore + pass + + version_text = self._version_text(pkg_version) + + result = version_text + self.heading(title=obj_name) + + # Add the object's signature to the page + signature = _untyped_signature(obj) or "" + signature = self.escape(signature) + signature = f"class {obj_name}{signature}" + result += signature + + # Add the object's parsed docstring to the page + doc = _getdoc(obj) + result += doc + + # Add the object's members to the page + if attributes: + contents = _tabulate_attrs(attributes, obj_name) + result += self.bigsection("Attributes", "attributes", "\n".join(contents)) + + if methods: + contents = _tabulate_attrs(methods, obj_name) + result += self.bigsection("Methods", "functions", "\n".join(contents)) + + return result + + # as is from pydoc.HTMLDoc to port Python 3.11 breaking CSS changes + def docroutine( + self, + object: Any, + name=None, + mod=None, + funcs={}, + classes={}, + methods={}, + cl=None, + ): + """Produce HTML documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + anchor = (cl and cl.__name__ or "") + "-" + name + note = "" + skipdocs = 0 + if _is_bound_method(object): + imclass = object.__self__.__class__ + if cl: + if imclass is not cl: + note = " from " + self.classlink(imclass, mod) # type: ignore + else: + if object.__self__ is not None: + note = " method of %s instance" % self.classlink(object.__self__.__class__, mod) # type: ignore + else: + note = " unbound %s method" % self.classlink(imclass, mod) # type: ignore + + if inspect.iscoroutinefunction(object) or inspect.isasyncgenfunction(object): + asyncqualifier = "async " + else: + asyncqualifier = "" + + if name == realname: + title = '%s' % (anchor, realname) + else: + if cl and inspect.getattr_static(cl, realname, []) is object: + reallink = '%s' % (cl.__name__ + "-" + realname, realname) + skipdocs = 1 + else: + reallink = realname + title = '%s = %s' % (anchor, name, reallink) + argspec = None + if inspect.isroutine(object): + try: + signature = inspect.signature(object) + except (ValueError, TypeError): + signature = None + if signature: + argspec = str(signature) + if realname == "": + title = "%s lambda " % name + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + argspec = argspec[1:-1] # remove parentheses + if not argspec: + argspec = "(...)" + + decl = ( + asyncqualifier + + title + + self.escape(argspec) + + (note and self.grey('%s' % note)) + ) + + if skipdocs: + return "
%s
\n" % decl + else: + doc = self.markup(_getdoc(object), self.preformat, funcs, classes, methods) + # --- Start Positron --- + # Remove + # doc = doc and '
%s
' % doc + # --- End Positron --- + return "
%s
%s
\n" % (decl, doc) + + # as is from pydoc.HTMLDoc to port Python 3.11 breaking CSS changes + def docdata(self, object, name=None, mod=None, cl=None): + """Produce html documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push("
%s
\n" % name) + doc = self.markup(_getdoc(object), self.preformat) + if doc: + # --- Start Positron --- + # Remove + # push('
%s
\n' % doc) + push("
%s
\n" % doc) + # --- End Positron --- + push("
\n") + + return "".join(results) + + docproperty = docdata + + # as is from pydoc.HTMLDoc to port Python 3.11 breaking CSS changes + def docother(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a data object.""" + lhs = name and "%s = " % name or "" + return lhs + self.repr(object) + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + # Don't do any marking up, let the rst parser handle it. + return text + + # as is from pydoc.HTMLDoc to port Python 3.11 breaking CSS changes + def index(self, dir, shadowed: Optional[Dict[str, int]] = None): # type: ignore reportIncompatibleMethodOverride + """Generate an HTML index for a directory of modules.""" + modpkgs = [] + if shadowed is None: + shadowed = {} + for importer, name, ispkg in pkgutil.iter_modules([dir]): + if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): + # ignore a module if its name contains a surrogate character + continue + modpkgs.append((name, "", ispkg, name in shadowed)) + shadowed[name] = 1 + + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + return self.bigsection(dir, "index", contents) + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_index(self): + """Module Index page.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = self.heading('Index of Modules') + names = [name for name in sys.builtin_module_names if name != "__main__"] + contents = self.multicolumn(names, bltinlink) + contents = [heading, "

" + self.bigsection("Built-in Modules", "index", contents)] + + seen = {} + for dir in sys.path: + contents.append(self.index(dir, seen)) + + contents.append( + '

pydoc by Ka-Ping Yee' + "<ping@lfw.org>

" + ) + return "Index of Modules", "".join(contents) + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_search(self, key): + """Search results page.""" + # scan for modules + search_result = [] + + def callback(path, modname, desc): + if modname[-9:] == ".__init__": + modname = modname[:-9] + " (package)" + search_result.append((modname, desc and "- " + desc)) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") # ignore problems during import + + def onerror(modname): + pass + + ModuleScanner().run(callback, key, onerror=onerror) + + # format page + def bltinlink(name): + return '%s' % (name, name) + + results = [] + heading = self.heading( + 'Search Results', + ) + for name, desc in search_result: + results.append(bltinlink(name) + desc) + contents = heading + self.bigsection("key = %s" % key, "index", "
".join(results)) + return "Search Results", contents + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_getobj(self, url): + # --- Start Positron --- + # Skip forced reloads for all modules. It is unlikely to affect the UX provided that these + # modules don't change within the lifetime of the help service + obj = locate(url, forceload=False) + # --- End Positron --- + if obj is None and url != "None": + raise ValueError("could not find object") + title = describe(obj) + content = self.document(obj, url) + return title, content + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_topics(self): + """Index of topic texts available.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = self.heading( + 'INDEX', + ) + # --- Start Positron --- + names = sorted(PositronHelper.topics.keys()) + # --- End Positron --- + + contents = self.multicolumn(names, bltinlink) + contents = heading + self.bigsection("Topics", "index", contents) + return "Topics", contents + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_keywords(self): + """Index of keywords.""" + heading = self.heading( + 'INDEX', + ) + # --- Start Positron --- + names = sorted(PositronHelper.keywords.keys()) + # --- End Positron --- + + def bltinlink(name): + return '%s' % (name, name) + + contents = self.multicolumn(names, bltinlink) + contents = heading + self.bigsection("Keywords", "index", contents) + return "Keywords", contents + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_topicpage(self, topic): + """Topic or keyword help page.""" + buf = io.StringIO() + # --- Start Positron --- + htmlhelp = PositronHelper(buf, buf) + # --- End Positron --- + contents, xrefs = htmlhelp._gettopic(topic) # type: ignore + if topic in htmlhelp.keywords: + title = "KEYWORD" + else: + title = "TOPIC" + heading = self.heading( + '%s' % title, + ) + contents = "
%s
" % self.markup(contents) + contents = self.bigsection(topic, "index", contents) + if xrefs: + xrefs = sorted(xrefs.split()) + + def bltinlink(name): + return '%s' % (name, name) + + xrefs = self.multicolumn(xrefs, bltinlink) + xrefs = self.section("Related help topics: ", "index", xrefs) + return ("%s %s" % (title, topic), "".join((heading, contents, xrefs))) + + # as is from pydoc._url_handler to port Python 3.11 breaking CSS changes + def html_error(self, url, exc): + heading = self.heading( + 'Not found', + ) + contents = "
".join(self.escape(line) for line in format_exception_only(type(exc), exc)) + # --- Start Positron --- + contents = heading + self.bigsection("", "error", contents) + return "Error", contents + # --- End Positron --- + + # moved from pydoc._url_handler to method + def get_html_page(self, url): + """Generate an HTML page for url.""" + complete_url = url + if url.endswith(".html"): + url = url[:-5] + + # --- Start Positron --- + # for typechecking + title, content = None, None + # --- End Positron --- + + try: + if url in ("", "index"): + title, content = self.html_index() + elif url == "topics": + title, content = self.html_topics() + elif url == "keywords": + title, content = self.html_keywords() + elif "=" in url: + op, _, url = url.partition("=") + if op == "search?key": + title, content = self.html_search(url) + elif op == "topic?key": + # try topics first, then objects. + try: + title, content = self.html_topicpage(url) + except ValueError: + title, content = self.html_getobj(url) + elif op == "get?key": + # try objects first, then topics. + if url in ("", "index"): + title, content = self.html_index() + else: + try: + title, content = self.html_getobj(url) + except ValueError: + title, content = self.html_topicpage(url) + else: + raise ValueError("bad pydoc url") + else: + title, content = self.html_getobj(url) + except Exception as exc: + # Catch any errors and display them in an error page. + title, content = self.html_error(complete_url, exc) + + # --- Start Positron --- + # for typechecking + assert title is not None + assert content is not None + # --- End Positron --- + + return self.page(title, content) + + def _version_text(self, version: str) -> str: + # Add the module's __version__ to the heading + if len(version) > 0: + pkg_version = self.escape(version) + text = f'
{"v"+pkg_version}
' + return text + else: + return "" + + +# as is from < Python 3.9, since 3.9 introduces a breaking change to pydoc.getdoc +def _pydoc_getdoc(object: Any) -> str: + """Get the doc string or comments for an object.""" + result = inspect.getdoc(object) or inspect.getcomments(object) + return result and re.sub("^ *\n", "", result.rstrip()) or "" + + +def _getdoc(object: Any) -> str: + """Override `pydoc.getdoc` to parse reStructuredText docstrings.""" + try: + docstring = _pydoc_getdoc(object) or "No documentation found." + html = _rst_to_html(docstring, object) + except Exception as exception: + # This is caught somewhere above us in pydoc. Log the exception so we see it in Positron + # logs. + logger.exception(f"Failed to parse docstring for {object}: {exception}") + raise exception + return html + + +def _resolve(target: str, from_obj: Any) -> Optional[str]: + """ + Resolve a possibly partially specified `target` to a full import path. + """ + # Special cases that are commonly false positives, never link these: + if target == "data": + return None + + # Is `target` a module? + try: + importlib.import_module(target) + except Exception: + pass + else: + return target + + # Is `target` a fully qualified name to a class, function, or instance? + if "." in target: + module_path, object_path = target.rsplit(".", 1) + try: + module = importlib.import_module(module_path) + except Exception: + pass + else: + # Ignore all warnings that happen upon `hasattr(module, object_path)` e.g. + # `hasattr(numpy, 'object')` + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + + if hasattr(module, object_path): + return target + + # Is `target` a fully qualified path to a class attribute or method? + if "." in module_path: + # Example: + # name: pandas.DataFrame.to_csv + # module_path: pandas.DataFrame -> pandas + # object_path: to_csv -> DataFrame + # attr_path: to_csv + attr_path = object_path + module_path, object_path = module_path.rsplit(".", 1) + try: + module = importlib.import_module(module_path) + except Exception: + pass + else: + obj = getattr(module, object_path, None) + if obj is not None: + if hasattr(obj, attr_path): + return f"{module_path}.{object_path}.{attr_path}" + + # Is `target` a fully qualified name, but implicitly relative to `from_obj`'s package? + from_module_name = get_module_name(from_obj) + if from_module_name is not None: + from_package_name = from_module_name.split(".")[0] + if not target.startswith(from_package_name): # Avoid infinite recursion + target = f"{from_package_name}.{target}" + return _resolve(target, from_obj) + + # Could not resolve. + return None + + +_SECTION_RE = re.compile(r"\n#### ([\w\s]+)\n\n") + + +def _is_argument_name(match: re.Match) -> bool: + """ + Does a match correspond to an argument name? + """ + # Get the line that the match is on. + start, end = match.span() + pre = match.string[:start] + post = match.string[end:] + start_line = pre.rfind("\n") + 1 + end_line = end + post.find("\n") + line = match.string[start_line:end_line] + + # Does the line start with a list item (an argument)? + if line.startswith("- "): + # Are we in a `Parameters` section? + sections = _SECTION_RE.findall(pre) + if sections and sections[-1] == "Parameters": + return True + return False + + +def _linkify_match(match: re.Match, object: Any) -> str: + logger.debug(f"Linkifying: {match.group(0)}") + + # Don't link arguments, a common case of false positives, otherwise, e.g. + # a `copy` argument would link to the standard library `copy` module. + if _is_argument_name(match): + return match.group(0) + + # gather all groups + start, name, end = match.groups() + + # Try to resolve `target` and replace it with a link. + key = _resolve(name, object) + if key is None: + logger.debug("Could not resolve") + return match.group(0) + result = f"[{start}{name}{end}](get?key={key})" + logger.debug(f"Resolved: {key}") + return result + + +def _link_url(match: re.Match, object: Any) -> str: + logger.debug(f"Creating link: {match.group(0)}") + + start, url, end = match.groups() + + return '%s%s%s' % (start, url, url, end) + + +def _linkify(markdown: str, object: Any) -> str: + """ + Replace all instances like '``' or '`[](~name)`' with a + relative pydoc link to a resolved object. + """ + pattern_sphinx = r"(?P`+)(?P[^\d\W`][\w\.]*)(?P`+)" + replacement = partial(_linkify_match, object=object) + result = re.sub(pattern_sphinx, replacement, markdown) + + pattern_md = r"`?\[\]\((?P`?)~(?P[^)^`]+)(?P`?)\)`?" + replacement = partial(_linkify_match, object=object) + result = re.sub(pattern_md, replacement, result) + + pattern_url = re.compile(r"(?P\s)(?Phttps?://\S+)(?P\s)") + replacement = partial(_link_url, object=object) + result = re.sub(pattern_url, replacement, result) + + return result + + +def _highlight(code: str, name: str, attrs: str) -> str: + """ + Highlight a code block. + + This is called via MarkdownIt. For example, given the following markdown code block: + + ```python {.attr1 .attr2} + print("Hello, world!") + ``` + + ... it would call `_highlight('print("Hello, world!"'), "python", ["attr1", "attr2"])`. + """ + try: + lexer = get_lexer_by_name(name) + except ClassNotFound: + # Default to the `TextLexer` which doesn't highlight anything. + lexer = get_lexer_by_name("text") + + formatter = HtmlFormatter() + result = highlight(code, lexer, formatter) + return cast(str, result) + + +def _rst_to_html(docstring: str, object: Any) -> str: + """ + Parse a reStructuredText docstring to HTML. + """ + logger.debug(f"Parsing rST to html for object: {object}") + + markdown = convert_docstring(docstring) + + markdown = _linkify(markdown, object) + + md = MarkdownIt("commonmark", {"html": True, "highlight": _highlight}).enable(["table"]) + + html = md.render(markdown) + + return html + + +# adapted from pydoc._url_handler +def _url_handler(url, content_type="text/html"): + """The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + """ + # --- Start Positron --- + # moved subclass _HTMLDoc and functions to _PositronHTMLDoc + + html = _PositronHTMLDoc() + + # --- End Positron --- + + if url.startswith("/"): + url = url[1:] + if content_type == "text/css": + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, url) + with open(css_path) as fp: + return "".join(fp.readlines()) + elif content_type == "text/html": + return html.get_html_page(url) + # Errors outside the url handler are caught by the server. + raise TypeError("unknown content type %r for url %s" % (content_type, url)) + + +def start_server(port: int = 0): + """Adapted from pydoc.browser.""" + + # Setting port to 0 will use an arbitrary port + thread = pydoc._start_server(_url_handler, hostname="localhost", port=port) # type: ignore + + if thread.error: + logger.error(f"Could not start the pydoc help server. Error: {thread.error}") + return + elif thread.serving: + logger.info(f"Pydoc server ready at: {thread.url}") + + return thread + + +if __name__ == "__main__": + # Run Positron's pydoc server on a custom port, useful for development. + # + # Example: + # + # python -m positron.pydoc + + logging.basicConfig(level=logging.DEBUG) + start_server(port=65216) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/__init__.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/__init__.py new file mode 100644 index 00000000000..3fe49d0379f --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/__init__.py @@ -0,0 +1,3 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/conftest.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/conftest.py new file mode 100644 index 00000000000..8bac479aaec --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/conftest.py @@ -0,0 +1,148 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from typing import Iterable +from unittest.mock import Mock + +import comm +import pytest +from positron_ipykernel.connections import ConnectionsService +from positron_ipykernel.data_explorer import DataExplorerService +from positron_ipykernel.positron_ipkernel import PositronIPyKernel, PositronShell +from positron_ipykernel.variables import VariablesService + + +class DummyComm(comm.base_comm.BaseComm): + """ + A comm that records published messages for testing purposes. + """ + + def __init__(self, *args, **kwargs): + self.messages = [] + super().__init__(*args, **kwargs) + + def publish_msg(self, msg_type, **msg): # type: ignore ReportIncompatibleMethodOverride + msg["msg_type"] = msg_type + self.messages.append(msg) + + +# Enable autouse so that all comms are created as DummyComms. +@pytest.fixture(autouse=True) +def patch_create_comm(monkeypatch: pytest.MonkeyPatch) -> None: + """ + Patch the `comm.create_comm` function to use our dummy comm. + """ + monkeypatch.setattr(comm, "create_comm", DummyComm) + + +# Enable autouse to ensure that the kernel is instantiated with the correct shell_class before +# anyone else tries to instantiate it. +@pytest.fixture(autouse=True) +def kernel() -> PositronIPyKernel: + """ + The Positron kernel, configured for testing purposes. + """ + # Create a Positron kernel. The kernel calls shell_class.instance() to get the globally + # registered shell instance, and IPython registers a TerminalInteractiveShell instead of a + # PositronShell. This causes a traitlets validation error unless we pass the shell_class explicitly. + kernel = PositronIPyKernel.instance(shell_class=PositronShell) + + return kernel + + +# Enable autouse to ensure a clean namespace and correct user_ns_hidden in every test, +# even if it doesn't explicitly use the `shell` fixture. +@pytest.fixture(autouse=True) +def shell() -> Iterable[PositronShell]: + shell = PositronShell.instance() + + # TODO: For some reason these vars are in user_ns but not user_ns_hidden during tests. For now, + # manually add them to user_ns_hidden to replicate running in Positron. + shell.user_ns_hidden.update( + { + k: None + for k in [ + "__name__", + "__doc__", + "__package__", + "__loader__", + "__spec__", + "_", + "__", + "___", + ] + } + ) + + yield shell + + # Reset the namespace so we don't interface with other tests (e.g. environment updates). + shell.reset() + + +@pytest.fixture +def mock_dataexplorer_service(shell: PositronShell, monkeypatch: pytest.MonkeyPatch) -> Mock: + mock = Mock() + monkeypatch.setattr(shell.kernel, "data_explorer_service", mock) + return mock + + +@pytest.fixture +def mock_ui_service(shell: PositronShell, monkeypatch: pytest.MonkeyPatch) -> Mock: + mock = Mock() + monkeypatch.setattr(shell.kernel, "ui_service", mock) + return mock + + +@pytest.fixture +def mock_help_service(shell: PositronShell, monkeypatch: pytest.MonkeyPatch) -> Mock: + mock = Mock() + monkeypatch.setattr(shell.kernel, "help_service", mock) + return mock + + +@pytest.fixture +def mock_displayhook(shell: PositronShell, monkeypatch: pytest.MonkeyPatch) -> Mock: + mock = Mock() + monkeypatch.setattr(shell, "displayhook", mock) + return mock + + +@pytest.fixture +def variables_service(kernel: PositronIPyKernel) -> VariablesService: + """ + The Positron variables service. + """ + return kernel.variables_service + + +@pytest.fixture +def variables_comm(variables_service: VariablesService) -> DummyComm: + """ + Convenience fixture for accessing the variables comm. + """ + # Open a comm + variables_comm = DummyComm("dummy_variables_comm") + variables_service.on_comm_open(variables_comm, {}) + + # Clear messages due to the comm_open + variables_comm.messages.clear() + + return variables_comm + + +@pytest.fixture() +def de_service(kernel: PositronIPyKernel) -> DataExplorerService: + """ + The Positron dataviewer service. + """ + return kernel.data_explorer_service + + +@pytest.fixture() +def connections_service(kernel: PositronIPyKernel) -> ConnectionsService: + """ + The Positron connections service. + """ + return kernel.connections_service diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/data.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/data.py new file mode 100644 index 00000000000..70fbc0083ff --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/data.py @@ -0,0 +1,101 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +import datetime +import math +import sys + +import numpy as np +import pandas as pd + +BOOL_CASES = [True, False] + + +STRING_CASES = [ + "", # Empty String + "Hello, world!", # Basic String + " ", # Whitespace String + "First\nSecond\nThird", # Multiline String + "This has a Windows linebreak\r\n", # Windows Linebreak + " Space Before\tTab Between\tSpace After ", # Trailing Whitespace + "É una bella città", # Accented String + "こんにちは", # Japanese String + "עֶמֶק", # RTL String + "ʇxǝʇ", # Upsidedown String + "😅😁", # Emoji String +] + + +# Python 3 ints are unbounded, but we include a few large numbers +# for basic test cases +INT_CASES = [ + -sys.maxsize * 100, + -sys.maxsize, + -1, + 0, + 1, + sys.maxsize, + sys.maxsize * 100, +] + + +NUMPY_SCALAR_CASES = [ + np.int8(1), + np.int16(1), + np.int32(1), + np.int64(1), + np.float16(1.0), + np.float32(1.0), + np.float64(1.0), +] + + +FLOAT_CASES = [ + float("-inf"), + -sys.float_info.max, + -1.0, + -sys.float_info.min, + float("nan"), + 0.0, + sys.float_info.min, + 1.0, + math.pi, + sys.float_info.max, + float("inf"), +] + + +COMPLEX_CASES = [ + complex(-1.0, 100.1), + complex(-1.0, 0.0), + complex(0, 0), + complex(1.0, 0.0), + complex(1.0, 100.1), +] + + +CLASSES_CASES = [pd.DataFrame, np.ndarray, datetime.tzinfo, bytes, str] + + +BYTES_CASES = [b"", b"\x00", b"caff\\xe8"] + + +RANGE_CASES = [ + range(0), # Empty Range + range(1), # Range with positive start, 1 element + range(-1, 0), # Range with negative start, 1 element + range(-2, 3), # Range with negative start, positive stop + range(10, 21, 2), # Range with positive start, positive stop, and positive step + range(20, 9, -2), # Range with positive start, positive stop, and negative step + range(2, -10, -2), # Range with positive start, negative stop, and negative step + range(-20, -9, 2), # Range with negative start, negative stop, and positive step + range(-10, 3, 2), # Range with negative start, positive stop, and positive step + range(1, 5000), # Large Range (compact display, does not show elements) +] + + +TIMESTAMP_CASES = [ + pd.Timestamp("2021-01-01 01:23:45"), + datetime.datetime(2021, 1, 1, 1, 23, 45), +] diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_access_keys.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_access_keys.py new file mode 100644 index 00000000000..9c847290f70 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_access_keys.py @@ -0,0 +1,116 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +import json +import math +from typing import Any + +import numpy as np +import pandas as pd +import polars as pl +import pytest +from fastcore.foundation import L +from positron_ipykernel.access_keys import decode_access_key, encode_access_key + +from .data import ( + BOOL_CASES, + BYTES_CASES, + COMPLEX_CASES, + FLOAT_CASES, + INT_CASES, + NUMPY_SCALAR_CASES, + RANGE_CASES, + STRING_CASES, + TIMESTAMP_CASES, +) + +try: + import torch # type: ignore [reportMissingImports] for 3.12 +except ImportError: + torch = None + + +@pytest.mark.parametrize( + "case", + BOOL_CASES + + STRING_CASES + + INT_CASES + + NUMPY_SCALAR_CASES + + FLOAT_CASES + + COMPLEX_CASES + + BYTES_CASES + + RANGE_CASES + + TIMESTAMP_CASES, +) +def test_encode_decode_access_key(case: Any) -> None: + """ + Test that we can encode and decode to recovery supported data types. + """ + access_key = encode_access_key(case) + result = decode_access_key(access_key) + # Handle the float('nan') case since nan != nan + if isinstance(case, float) and math.isnan(case): + assert math.isnan(result) + else: + assert result == case + + +@pytest.mark.parametrize( + "case", + [ + bytearray(), + [], + set(), + L(), + pd.DataFrame(), + pd.Series(), + pl.DataFrame(), + pl.Series(), + np.array([]), + ], +) +def test_encode_access_key_not_hashable_error(case: Any) -> None: + """ + Encoding an access key of an unhashable type raises an error. + """ + with pytest.raises(TypeError): + encode_access_key(case) + + +@pytest.mark.parametrize( + "case", + [ + torch.tensor([]) if torch else None, + lambda x: x, + ], +) +def test_encode_access_key_not_implemented_error(case: Any) -> None: + """ + Encoding an access key of an unsupported type raises an error. + """ + access_key = None + + with pytest.raises(NotImplementedError): + access_key = encode_access_key(case) + + if access_key is not None: + with pytest.raises(NotImplementedError): + decode_access_key(access_key) + + +@pytest.mark.parametrize( + "type_name", + [ + # for Python 3.12 + "torch.Tensor" if torch else "None", + "function", + ], +) +def test_decode_access_key_not_implemented_error(type_name: str) -> None: + """ + Decoding an access key of an unsupported type raises an error. + """ + access_key = json.dumps({"type": type_name, "data": None}) + with pytest.raises(NotImplementedError): + decode_access_key(access_key) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_connections.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_connections.py new file mode 100644 index 00000000000..03754925765 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_connections.py @@ -0,0 +1,146 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +import sqlite3 +from typing import Tuple + +import pytest +import sqlalchemy +from positron_ipykernel.connections import ConnectionsService + +from .conftest import DummyComm +from .utils import json_rpc_request + +TARGET_NAME = "positron.connections" + + +def add_default_data(execute): + execute("CREATE TABLE movie(title TEXT, year INTEGER, score NUMERIC)") + execute("INSERT INTO movie VALUES('The Shawshank Redemption', 1994, 9.3)") + execute("INSERT INTO movie VALUES('The Godfather', 1972, 9.2)") + execute("INSERT INTO movie VALUES('The Dark Knight', 2008, 9.0)") + + +def get_sqlalchemy_sqlite_connection(): + con = sqlalchemy.create_engine("sqlite://") + add_default_data(lambda sql: con.connect().execute(sqlalchemy.text(sql))) + return con + + +def get_sqlite3_sqlite_connection(): + con = sqlite3.connect(":memory:") + add_default_data(lambda sql: con.cursor().execute(sql)) + return con + + +def get_sqlite_connections(): + return [get_sqlalchemy_sqlite_connection(), get_sqlite3_sqlite_connection()] + + +@pytest.fixture(scope="function") +def connections_comm( + connections_service: ConnectionsService, con +) -> Tuple[ConnectionsService, DummyComm]: + comm_id = connections_service.register_connection(con) + + dummy_comm = DummyComm(TARGET_NAME, comm_id=comm_id) + connections_service.on_comm_open(dummy_comm) + dummy_comm.messages.clear() + + return connections_service, dummy_comm + + +@pytest.mark.parametrize("con", get_sqlite_connections()) +class TestSQLiteConnectionsService: + def test_register_connection(self, connections_service: ConnectionsService, con): + comm_id = connections_service.register_connection(con) + assert comm_id in connections_service.comms + + @pytest.mark.parametrize( + "path,expected", + [ + ([], False), + ([{"kind": "schema", "name": "main"}], True), + ], + ) + def test_contains_data( + self, connections_comm: Tuple[ConnectionsService, DummyComm], path, expected + ): + _, comm = connections_comm + + msg = self._make_msg(params={"path": path}, method="contains_data", comm_id=comm.comm_id) + comm.handle_msg(msg) + + result = comm.messages[0]["data"]["result"] + assert result is False + + @pytest.mark.parametrize( + "path,expected", + [ + ([], ""), + ([{"kind": "schema", "name": "main"}], ""), + ], + ) + def test_get_icon(self, connections_comm: Tuple[ConnectionsService, DummyComm], path, expected): + _, comm = connections_comm + + msg = self._make_msg(params={"path": path}, method="get_icon", comm_id=comm.comm_id) + comm.handle_msg(msg) + result = comm.messages[0]["data"]["result"] + assert result == expected + + @pytest.mark.parametrize( + "path,expected", + [ + ([], [{"kind": "schema", "name": "main"}]), + ([{"kind": "schema", "name": "main"}], [{"kind": "table", "name": "movie"}]), + ], + ) + def test_list_objects( + self, connections_comm: Tuple[ConnectionsService, DummyComm], path, expected + ): + _, comm = connections_comm + + msg = self._make_msg(params={"path": path}, method="list_objects", comm_id=comm.comm_id) + + comm.handle_msg(msg) + result = comm.messages[0]["data"]["result"] + assert len(result) == 1 + assert result == expected + + def test_list_fields(self, connections_comm: Tuple[ConnectionsService, DummyComm]): + _, comm = connections_comm + + msg = self._make_msg( + params={ + "path": [{"kind": "schema", "name": "main"}, {"kind": "table", "name": "movie"}] + }, + method="list_fields", + comm_id=comm.comm_id, + ) + comm.handle_msg(msg) + result = comm.messages[0]["data"]["result"] + assert len(result) == 3 + assert result[0] == {"name": "title", "dtype": "TEXT"} + assert result[1] == {"name": "year", "dtype": "INTEGER"} + assert result[2] == {"name": "score", "dtype": "NUMERIC"} + + def test_preview_object(self, connections_comm: Tuple[ConnectionsService, DummyComm]): + service, comm = connections_comm + + msg = self._make_msg( + params={ + "path": [{"kind": "schema", "name": "main"}, {"kind": "table", "name": "movie"}] + }, + method="preview_object", + comm_id=comm.comm_id, + ) + comm.handle_msg(msg) + # cleanup the data_explorer state, so we don't break its own tests + service._kernel.data_explorer_service.shutdown() + result = comm.messages[0]["data"]["result"] + assert result is None + + def _make_msg(self, method, params, comm_id): + return json_rpc_request(method=method, params=params, comm_id=comm_id) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_data_explorer.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_data_explorer.py new file mode 100644 index 00000000000..2e47a077e58 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_data_explorer.py @@ -0,0 +1,712 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import uuid +from typing import Any, Dict, List, Optional, Type, cast + +import numpy as np +import pandas as pd +import pytest + +from .._vendor.pydantic import BaseModel +from ..access_keys import encode_access_key +from ..data_explorer import COMPARE_OPS, DataExplorerService +from ..data_explorer_comm import ColumnFilter, ColumnSchema, ColumnSortKey, FilterResult +from .conftest import DummyComm, PositronShell +from .test_variables import BIG_ARRAY_LENGTH +from .utils import json_rpc_notification, json_rpc_request, json_rpc_response + +TARGET_NAME = "positron.dataExplorer" + +# ---------------------------------------------------------------------- +# pytest fixtures + + +def guid(): + return str(uuid.uuid4()) + + +def get_new_comm( + de_service: DataExplorerService, + table: Any, + title: str, + comm_id: Optional[str] = None, +) -> DummyComm: + """ + + A comm corresponding to a test dataset belonging to the Positron + dataviewer service. + """ + if comm_id is None: + comm_id = guid() + de_service.register_table(table, title, comm_id=comm_id) + + # Clear any existing messages + new_comm = cast(DummyComm, de_service.comms[comm_id]) + new_comm.messages.clear() + return new_comm + + +def get_last_message(de_service: DataExplorerService, comm_id: str): + comm = cast(DummyComm, de_service.comms[comm_id].comm) + return comm.messages[-1] + + +# ---------------------------------------------------------------------- +# Test basic service functionality + + +class MyData: + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return repr(self.value) + + +SIMPLE_PANDAS_DF = pd.DataFrame( + { + "a": [1, 2, 3, 4, 5], + "b": [True, False, True, None, True], + "c": ["foo", "bar", None, "bar", "qux"], + "d": [0, 1.2, -4.5, 6, np.nan], + "e": pd.to_datetime( + [ + "2024-01-01 00:00:00", + "2024-01-02 12:34:45", + None, + "2024-01-04 00:00:00", + "2024-01-05 00:00:00", + ] + ), + "f": [None, MyData(5), MyData(-1), None, None], + } +) + + +def test_service_properties(de_service: DataExplorerService): + assert de_service.comm_target == TARGET_NAME + + +def _dummy_rpc_request(*args): + return json_rpc_request(*args, comm_id="dummy_comm_id") + + +def _open_viewer(variables_comm, path): + path = [encode_access_key(p) for p in path] + msg = _dummy_rpc_request("view", {"path": path}) + variables_comm.handle_msg(msg) + assert variables_comm.messages == [json_rpc_response({})] + variables_comm.messages.clear() + return tuple(path) + + +def test_explorer_open_close_delete( + shell: PositronShell, + de_service: DataExplorerService, + variables_comm: DummyComm, +): + shell.user_ns.update( + { + "x": SIMPLE_PANDAS_DF, + "y": {"key1": SIMPLE_PANDAS_DF, "key2": SIMPLE_PANDAS_DF}, + } + ) + + path = _open_viewer(variables_comm, ["x"]) + + paths = de_service.get_paths_for_variable("x") + assert len(paths) == 1 + assert paths[0] == path + + comm_ids = list(de_service.path_to_comm_ids[path]) + assert len(comm_ids) == 1 + comm = de_service.comms[comm_ids[0]] + + # Simulate comm_close initiated from the front end + comm.comm.handle_close({}) + + # Check that cleanup callback worked correctly + assert len(de_service.path_to_comm_ids[path]) == 0 + assert len(de_service.get_paths_for_variable("x")) == 0 + assert len(de_service.comms) == 0 + assert len(de_service.table_views) == 0 + + +def test_explorer_delete_variable( + shell: PositronShell, + de_service: DataExplorerService, + variables_comm: DummyComm, +): + shell.user_ns.update( + { + "x": SIMPLE_PANDAS_DF, + "y": {"key1": SIMPLE_PANDAS_DF, "key2": SIMPLE_PANDAS_DF}, + } + ) + + # Open multiple data viewers + _open_viewer(variables_comm, ["x"]) + _open_viewer(variables_comm, ["x"]) + _open_viewer(variables_comm, ["y", "key1"]) + _open_viewer(variables_comm, ["y", "key2"]) + _open_viewer(variables_comm, ["y", "key2"]) + + assert len(de_service.comms) == 5 + assert len(de_service.table_views) == 5 + assert len(de_service.get_paths_for_variable("x")) == 1 + assert len(de_service.get_paths_for_variable("y")) == 2 + + # Delete x, check cleaned up and + def _check_delete_variable(name): + msg = _dummy_rpc_request("delete", {"names": [name]}) + + paths = de_service.get_paths_for_variable(name) + assert len(paths) > 0 + + comms = [ + de_service.comms[comm_id] for p in paths for comm_id in de_service.path_to_comm_ids[p] + ] + variables_comm.handle_msg(msg) + + # Check that comms were all closed + for comm in comms: + last_message = cast(DummyComm, comm.comm).messages[-1] + assert last_message["msg_type"] == "comm_close" + + for path in paths: + assert len(de_service.path_to_comm_ids[path]) == 0 + + _check_delete_variable("x") + _check_delete_variable("y") + + +def test_explorer_variable_updates( + shell: PositronShell, + de_service: DataExplorerService, + variables_comm: DummyComm, +): + x = pd.DataFrame({"a": [1, 0, 3, 4]}) + big_x = pd.DataFrame({"a": np.arange(BIG_ARRAY_LENGTH)}) + shell.user_ns.update( + { + "x": x, + "big_x": big_x, + "y": {"key1": SIMPLE_PANDAS_DF, "key2": SIMPLE_PANDAS_DF}, + } + ) + + # Check updates + def _check_update_variable(name, update_type="schema", discard_state=True): + paths = de_service.get_paths_for_variable(name) + assert len(paths) > 0 + + comms = [ + de_service.comms[comm_id] for p in paths for comm_id in de_service.path_to_comm_ids[p] + ] + + if update_type == "schema": + expected_msg = json_rpc_notification("schema_update", {"discard_state": discard_state}) + else: + expected_msg = json_rpc_notification("data_update", {}) + + # Check that comms were all closed + for comm in comms: + last_message = cast(DummyComm, comm.comm).messages[-1] + assert last_message == expected_msg + + path_x = _open_viewer(variables_comm, ["x"]) + _open_viewer(variables_comm, ["big_x"]) + _open_viewer(variables_comm, ["y", "key1"]) + _open_viewer(variables_comm, ["y", "key2"]) + _open_viewer(variables_comm, ["y", "key2"]) + + # Do a simple update and make sure that sort keys are preserved + x_comm_id = list(de_service.path_to_comm_ids[path_x])[0] + x_sort_keys = [{"column_index": 0, "ascending": True}] + msg = json_rpc_request( + "set_sort_columns", + params={"sort_keys": [{"column_index": 0, "ascending": True}]}, + comm_id=x_comm_id, + ) + de_service.comms[x_comm_id].comm.handle_msg(msg) + shell.run_cell("import pandas as pd") + shell.run_cell("x = pd.DataFrame({'a': [1, 0, 3, 4, 5]})") + _check_update_variable("x", update_type="data") + + tv = de_service.table_views[x_comm_id] + assert tv.sort_keys == [ColumnSortKey(**k) for k in x_sort_keys] + assert tv._need_recompute + + pf = PandasFixture(de_service) + new_state = pf.get_state("x") + assert new_state["table_shape"]["num_rows"] == 5 + assert new_state["table_shape"]["num_columns"] == 1 + assert new_state["sort_keys"] == [ColumnSortKey(**k) for k in x_sort_keys] + + # Execute code that triggers an update event for big_x because it's large + shell.run_cell("print('hello world')") + _check_update_variable("big_x", update_type="data") + + # Update nested values in y and check for schema updates + shell.run_cell( + """y = {'key1': y['key1'].iloc[:1]], + 'key2': y['key2'].copy()} + """ + ) + _check_update_variable("y", update_type="update", discard_state=False) + + shell.run_cell( + """y = {'key1': y['key1'].iloc[:-1, :-1], + 'key2': y['key2'].copy().iloc[:, 1:]} + """ + ) + _check_update_variable("y", update_type="schema", discard_state=True) + + +def test_register_table(de_service: DataExplorerService): + df = pd.DataFrame({"a": [1, 2, 3, 4, 5]}) + comm_id = guid() + + de_service.register_table(df, "test_table", comm_id=comm_id) + + assert comm_id in de_service.comms + table_view = de_service.table_views[comm_id] + assert table_view.table is df + + +def test_shutdown(de_service: DataExplorerService): + df = pd.DataFrame({"a": [1, 2, 3, 4, 5]}) + de_service.register_table(df, "t1", comm_id=guid()) + de_service.register_table(df, "t2", comm_id=guid()) + de_service.register_table(df, "t3", comm_id=guid()) + + de_service.shutdown() + assert len(de_service.comms) == 0 + assert len(de_service.table_views) == 0 + + +# ---------------------------------------------------------------------- +# Test query support for pandas DataFrame + +JsonRecords = List[Dict[str, Any]] + + +class PandasFixture: + def __init__(self, de_service: DataExplorerService): + self.de_service = de_service + + self.register_table("simple", SIMPLE_PANDAS_DF) + + def register_table(self, table_name: str, table): + comm_id = guid() + + paths = self.de_service.get_paths_for_variable(table_name) + for path in paths: + for old_comm_id in list(self.de_service.path_to_comm_ids[path]): + self.de_service._close_explorer(old_comm_id) + + self.de_service.register_table( + table, + table_name, + comm_id=comm_id, + variable_path=[encode_access_key(table_name)], + ) + + def do_json_rpc(self, table_name, method, **params): + paths = self.de_service.get_paths_for_variable(table_name) + assert len(paths) == 1 + + comm_id = list(self.de_service.path_to_comm_ids[paths[0]])[0] + + request = json_rpc_request( + method, + params=params, + comm_id=comm_id, + ) + self.de_service.comms[comm_id].comm.handle_msg(request) + response = get_last_message(self.de_service, comm_id) + data = response["data"] + if "error" in data: + raise Exception(data["error"]["message"]) + else: + return data["result"] + + def get_schema(self, table_name, start_index, num_columns): + return self.do_json_rpc( + table_name, + "get_schema", + start_index=start_index, + num_columns=num_columns, + ) + + def get_state(self, table_name): + return self.do_json_rpc(table_name, "get_state") + + def get_data_values(self, table_name, **params): + return self.do_json_rpc(table_name, "get_data_values", **params) + + def set_column_filters(self, table_name, filters=None): + return self.do_json_rpc(table_name, "set_column_filters", filters=filters) + + def set_sort_columns(self, table_name, sort_keys=None): + return self.do_json_rpc(table_name, "set_sort_columns", sort_keys=sort_keys) + + def check_filter_case(self, table, filter_set, expected_table): + table_id = guid() + ex_id = guid() + self.register_table(table_id, table) + self.register_table(ex_id, expected_table) + + response = self.set_column_filters(table_id, filters=filter_set) + assert response == FilterResult(selected_num_rows=len(expected_table)) + self.compare_tables(table_id, ex_id, table.shape) + + def check_sort_case(self, table, sort_keys, expected_table, filters=None): + table_id = guid() + ex_id = guid() + self.register_table(table_id, table) + self.register_table(ex_id, expected_table) + + if filters is not None: + self.set_column_filters(table_id, filters) + + response = self.set_sort_columns(table_id, sort_keys=sort_keys) + assert response is None + self.compare_tables(table_id, ex_id, table.shape) + + def compare_tables(self, table_id: str, expected_id: str, table_shape: tuple): + # Query the data and check it yields the same result as the + # manually constructed data frame without the filter + response = self.get_data_values( + table_id, + row_start_index=0, + num_rows=table_shape[0], + column_indices=list(range(table_shape[1])), + ) + ex_response = self.get_data_values( + expected_id, + row_start_index=0, + num_rows=table_shape[0], + column_indices=list(range(table_shape[1])), + ) + assert response == ex_response + + +@pytest.fixture() +def pandas_fixture(de_service: DataExplorerService): + return PandasFixture(de_service) + + +def _wrap_json(model: Type[BaseModel], data: JsonRecords): + return [model(**d).dict() for d in data] + + +def test_pandas_get_state(pandas_fixture: PandasFixture): + result = pandas_fixture.get_state("simple") + assert result["table_shape"]["num_rows"] == 5 + assert result["table_shape"]["num_columns"] == 6 + + sort_keys = [ + {"column_index": 0, "ascending": True}, + {"column_index": 1, "ascending": False}, + ] + filters = [_compare_filter(0, ">", 0), _compare_filter(0, "<", 5)] + pandas_fixture.set_sort_columns("simple", sort_keys=sort_keys) + pandas_fixture.set_column_filters("simple", filters=filters) + + result = pandas_fixture.get_state("simple") + assert result["sort_keys"] == sort_keys + assert result["filters"] == [ColumnFilter(**f) for f in filters] + + +def test_pandas_get_schema(pandas_fixture: PandasFixture): + result = pandas_fixture.get_schema("simple", 0, 100) + + full_schema = [ + { + "column_name": "a", + "type_name": "int64", + "type_display": "number", + }, + { + "column_name": "b", + "type_name": "boolean", + "type_display": "boolean", + }, + { + "column_name": "c", + "type_name": "string", + "type_display": "string", + }, + { + "column_name": "d", + "type_name": "float64", + "type_display": "number", + }, + { + "column_name": "e", + "type_name": "datetime64[ns]", + "type_display": "datetime", + }, + {"column_name": "f", "type_name": "mixed", "type_display": "unknown"}, + ] + + assert result["columns"] == _wrap_json(ColumnSchema, full_schema) + + result = pandas_fixture.get_schema("simple", 2, 100) + assert result["columns"] == _wrap_json(ColumnSchema, full_schema[2:]) + + result = pandas_fixture.get_schema("simple", 6, 100) + assert result["columns"] == [] + + # Make a really big schema + bigger_df = pd.concat([SIMPLE_PANDAS_DF] * 100, axis="columns") + bigger_name = guid() + bigger_schema = full_schema * 100 + pandas_fixture.register_table(bigger_name, bigger_df) + + result = pandas_fixture.get_schema(bigger_name, 0, 100) + assert result["columns"] == _wrap_json(ColumnSchema, bigger_schema[:100]) + + result = pandas_fixture.get_schema(bigger_name, 10, 10) + assert result["columns"] == _wrap_json(ColumnSchema, bigger_schema[10:20]) + + +def test_pandas_wide_schemas(pandas_fixture: PandasFixture): + arr = np.arange(10).astype(object) + + ncols = 10000 + df = pd.DataFrame({f"col_{i}": arr for i in range(ncols)}) + + pandas_fixture.register_table("wide_df", df) + + chunk_size = 100 + for chunk_index in range(ncols // chunk_size): + start_index = chunk_index * chunk_size + pandas_fixture.register_table( + f"wide_df_{chunk_index}", + df.iloc[:, start_index : (chunk_index + 1) * chunk_size], + ) + + schema_slice = pandas_fixture.get_schema("wide_df", start_index, chunk_size) + expected = pandas_fixture.get_schema(f"wide_df_{chunk_index}", 0, chunk_size) + assert schema_slice["columns"] == expected["columns"] + + +def _trim_whitespace(columns): + return [[x.strip() for x in column] for column in columns] + + +def test_pandas_get_data_values(pandas_fixture: PandasFixture): + result = pandas_fixture.get_data_values( + "simple", + row_start_index=0, + num_rows=20, + column_indices=list(range(6)), + ) + + # TODO: pandas pads all values to fixed width, do we want to do + # something different? + expected_columns = [ + ["1", "2", "3", "4", "5"], + ["True", "False", "True", "None", "True"], + ["foo", "bar", "None", "bar", "qux"], + ["0.0", "1.2", "-4.5", "6.0", "NaN"], + [ + "2024-01-01 00:00:00", + "2024-01-02 12:34:45", + "NaT", + "2024-01-04 00:00:00", + "2024-01-05 00:00:00", + ], + ["None", "5", "-1", "None", "None"], + ] + + assert _trim_whitespace(result["columns"]) == expected_columns + + assert result["row_labels"] == [["0", "1", "2", "3", "4"]] + + # Edge cases: request beyond end of table + response = pandas_fixture.get_data_values( + "simple", row_start_index=5, num_rows=10, column_indices=[0] + ) + assert response["columns"] == [[]] + + # Issue #2149 -- return empty result when requesting non-existent + # column indices + response = pandas_fixture.get_data_values( + "simple", row_start_index=0, num_rows=5, column_indices=[2, 3, 4, 5] + ) + assert _trim_whitespace(response["columns"]) == expected_columns[2:] + + # Edge case: request invalid column index + # Per issue #2149, until we can align on whether the UI is allowed + # to request non-existent column indices, disable this test + + # with pytest.raises(IndexError): + # pandas_fixture.get_data_values( + # "simple", row_start_index=0, num_rows=10, column_indices=[4] + # ) + + +def _filter(filter_type, column_index, **kwargs): + kwargs.update( + { + "filter_id": guid(), + "filter_type": filter_type, + "column_index": column_index, + } + ) + return kwargs + + +def _compare_filter(column_index, compare_op, compare_value): + return _filter( + "compare", + column_index, + compare_op=compare_op, + compare_value=compare_value, + ) + + +def _set_member_filter(column_index, values, inclusive=True): + return _filter( + "set_membership", + column_index, + set_member_inclusive=inclusive, + set_member_values=values, + ) + + +def test_pandas_filter_compare(pandas_fixture: PandasFixture): + # Just use the 'a' column to smoke test comparison filters on + # integers + table_name = "simple" + df = SIMPLE_PANDAS_DF + compare_value = 3 + column = "a" + column_index = df.columns.get_loc(column) + + for op, op_func in COMPARE_OPS.items(): + filt = _compare_filter(column_index, op, str(compare_value)) + expected_df = df[op_func(df[column], compare_value)] + pandas_fixture.check_filter_case(df, [filt], expected_df) + + # Test that passing empty filter set resets to unfiltered state + filt = _compare_filter(column_index, "<", str(compare_value)) + _ = pandas_fixture.set_column_filters(table_name, filters=[filt]) + response = pandas_fixture.set_column_filters(table_name, filters=[]) + assert response == FilterResult(selected_num_rows=len(df)) + + # register the whole table to make sure the filters are really cleared + ex_id = guid() + pandas_fixture.register_table(ex_id, df) + pandas_fixture.compare_tables(table_name, ex_id, df.shape) + + +def test_pandas_filter_isnull_notnull(pandas_fixture: PandasFixture): + df = SIMPLE_PANDAS_DF + b_isnull = _filter("isnull", 1) + b_notnull = _filter("notnull", 1) + c_notnull = _filter("notnull", 2) + + cases = [ + [[b_isnull], df[df["b"].isnull()]], + [[b_notnull], df[df["b"].notnull()]], + [[b_notnull, c_notnull], df[df["b"].notnull() & df["c"].notnull()]], + ] + + for filter_set, expected_df in cases: + pandas_fixture.check_filter_case(df, filter_set, expected_df) + + +def test_pandas_filter_set_membership(pandas_fixture: PandasFixture): + df = SIMPLE_PANDAS_DF + + cases = [ + [[_set_member_filter(0, [2, 4])], df[df["a"].isin([2, 4])]], + [ + [_set_member_filter(2, ["bar", "foo"])], + df[df["c"].isin(["bar", "foo"])], + ], + [[_set_member_filter(2, [])], df[df["c"].isin([])]], + [[_set_member_filter(2, ["bar"], False)], df[~df["c"].isin(["bar"])]], + [[_set_member_filter(2, [], False)], df], + ] + + for filter_set, expected_df in cases: + pandas_fixture.check_filter_case(df, filter_set, expected_df) + + +def test_pandas_set_sort_columns(pandas_fixture: PandasFixture): + tables = { + "df1": SIMPLE_PANDAS_DF, + # Just some random data to test multiple keys, different sort + # orders, etc. + "df2": pd.DataFrame( + { + "a": np.random.standard_normal(10000), + "b": np.tile(np.arange(2), 5000), + "c": np.tile(np.arange(10), 1000), + } + ), + } + + cases = [ + ("df1", [(2, True)], {"by": "c"}), + ("df1", [(2, False)], {"by": "c", "ascending": False}), + # Tests stable sorting + ("df2", [(1, True)], {"by": "b"}), + ("df2", [(2, True)], {"by": "c"}), + ("df2", [(0, True), (1, True)], {"by": ["a", "b"]}), + ( + "df2", + [(0, True), (1, False)], + {"by": ["a", "b"], "ascending": [True, False]}, + ), + ( + "df2", + [(2, False), (1, True), (0, False)], + {"by": ["c", "b", "a"], "ascending": [False, True, False]}, + ), + ] + + # Test sort AND filter + filter_cases = {"df2": [(lambda x: x[x["a"] > 0], [_compare_filter(0, ">", 0)])]} + + for df_name, keys, expected_params in cases: + wrapped_keys = [ + {"column_index": index, "ascending": ascending} for index, ascending in keys + ] + df = tables[df_name] + + expected_params["kind"] = "mergesort" + + expected_df = df.sort_values(**expected_params) + + pandas_fixture.check_sort_case(df, wrapped_keys, expected_df) + + for filter_f, filters in filter_cases.get(df_name, []): + expected_filtered = filter_f(df).sort_values(**expected_params) + pandas_fixture.check_sort_case(df, wrapped_keys, expected_filtered, filters=filters) + + +# def test_pandas_get_column_profile(pandas_fixture: PandasFixture): +# pass + + +# def test_pandas_get_state(pandas_fixture: PandasFixture): +# pass + + +# ---------------------------------------------------------------------- +# Test RPCs for polars DataFrame + + +# ---------------------------------------------------------------------- +# Test RPCs for pyarrow Table diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_docstrings_epytext.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_docstrings_epytext.py new file mode 100644 index 00000000000..45ddabfff87 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_docstrings_epytext.py @@ -0,0 +1,129 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import pytest +from positron_ipykernel.docstrings import epytext_to_markdown, looks_like_epytext + +BASIC_EXAMPLE = """Example of epytext docstring. + +This is a paragraph. + +@param a: this is the first param +@type a: str +@param b: this is a second param +@type b: int + +@return: this function returns something for sure +@rtype: str +""" + +BASIC_EXAMPLE_MD = """Example of epytext docstring. + +This is a paragraph. + +#### Param: + +- `a` (str): this is the first param +- `b` (int): this is a second param + +#### Return: + +(str) this function returns something for sure +""" + +ESCAPE_MAGIC_METHOD = """Example. + +@param a: see __init__.py +""" + +ESCAPE_MAGIC_METHOD_MD = """Example. + +#### Param: + +- `a`: see \\_\\_init\\_\\_.py +""" + +PLAIN_SECTION = """Example. + +@param a: some arg + +@note: do not use this. Notes can + include multiple lines. + + There can even be multiple paragraphs. +""" + +PLAIN_SECTION_MD = """Example. + +#### Param: + +- `a`: some arg + +#### Note: + +do not use this. Notes can include multiple lines. + +There can even be multiple paragraphs. +""" + +MULTILINE_ARG_DESCRIPTION = """Example of epytext docstring. + +@param a: This is a description of + the parameter including + several lines. +@type a: str +@param b: this is a second param + it has two lines +@type b: int +""" + +MULTILINE_ARG_DESCRIPTION_MD = """Example of epytext docstring. + +#### Param: + +- `a` (str): This is a description of the parameter including several lines. +- `b` (int): this is a second param it has two lines +""" + +EPYTEXT_CASES = { + "basic example": { + "epytext": BASIC_EXAMPLE, + "md": BASIC_EXAMPLE_MD, + }, + "escape magic method": { + "epytext": ESCAPE_MAGIC_METHOD, + "md": ESCAPE_MAGIC_METHOD_MD, + }, + "plain section": { + "epytext": PLAIN_SECTION, + "md": PLAIN_SECTION_MD, + }, + "multiline arg description": { + "epytext": MULTILINE_ARG_DESCRIPTION, + "md": MULTILINE_ARG_DESCRIPTION_MD, + }, +} + + +@pytest.mark.parametrize( + "epytext", + [case["epytext"] for case in EPYTEXT_CASES.values()], + ids=EPYTEXT_CASES.keys(), +) +def test_looks_like_epytext_recognises_epytext(epytext): + assert looks_like_epytext(epytext) + + +def test_looks_like_epytext_ignores_plain_text(): + assert not looks_like_epytext("This is plain text") + assert not looks_like_epytext("See Also\n--------\n") + + +@pytest.mark.parametrize( + "epytext,markdown", + [[case["epytext"], case["md"]] for case in EPYTEXT_CASES.values()], + ids=EPYTEXT_CASES.keys(), +) +def test_epytext_to_markdown(epytext, markdown): + assert epytext_to_markdown(epytext) == markdown diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_help.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_help.py new file mode 100644 index 00000000000..c3b319d620f --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_help.py @@ -0,0 +1,156 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from typing import Any +from unittest.mock import Mock +from urllib.request import urlopen + +import numpy as np +import pandas as pd +import pytest +from positron_ipykernel.help import HelpService, help +from positron_ipykernel.help_comm import HelpBackendRequest, HelpFrontendEvent + +from .conftest import DummyComm +from .utils import json_rpc_request + +TARGET_NAME = "target_name" + + +@pytest.fixture +def help_service() -> HelpService: + """ + A Positron help service. + """ + return HelpService() + + +@pytest.fixture +def running_help_service(help_service: HelpService): + help_service.start() + yield help_service + help_service.shutdown() + + +@pytest.fixture +def help_comm(help_service: HelpService) -> DummyComm: + """ + Open a dummy comm for the help service. + """ + # Open a comm + help_comm = DummyComm(TARGET_NAME) + help_service.on_comm_open(help_comm, {}) + + # Clear messages due to the comm_open + help_comm.messages.clear() + + return help_comm + + +def test_pydoc_server_starts_and_shuts_down(running_help_service: HelpService): + help_service = running_help_service + + assert help_service._pydoc_thread is not None + assert help_service._pydoc_thread.serving + + help_service.shutdown() + + assert not help_service._pydoc_thread.serving + + +def test_pydoc_server_styling(running_help_service: HelpService): + """ + We should pydoc should apply css styling + """ + help_service = running_help_service + + assert help_service._pydoc_thread is not None + + key = "pandas.read_csv" + url = f"{help_service._pydoc_thread.url}get?key={key}" + with urlopen(url) as f: + html = f.read().decode("utf-8") + + # Html should include stylesheet if added correctly + assert ' None: + # Mock the show_help method + mock_show_help = Mock() + monkeypatch.setattr(help_service, "show_help", mock_show_help) + + msg = json_rpc_request( + HelpBackendRequest.ShowHelpTopic, {"topic": "logging"}, comm_id="dummy_comm_id" + ) + help_comm.handle_msg(msg) + + assert help_comm.messages == [ + { + "data": {"jsonrpc": "2.0", "result": True}, + "metadata": None, + "buffers": None, + "msg_type": "comm_msg", + } + ] + + mock_show_help.assert_called_once_with("logging") diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_inspectors.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_inspectors.py new file mode 100644 index 00000000000..aa9548d15e1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_inspectors.py @@ -0,0 +1,791 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import datetime +import inspect +import pprint +import random +import string +import types +from typing import Any, Callable, Tuple, Type + +import numpy as np +import pandas as pd +import polars as pl +import pytest +from fastcore.foundation import L +from positron_ipykernel.inspectors import PRINT_WIDTH, TRUNCATE_AT, get_inspector +from positron_ipykernel.utils import get_qualname +from positron_ipykernel.variables_comm import VariableKind + +from .data import ( + BOOL_CASES, + BYTES_CASES, + CLASSES_CASES, + COMPLEX_CASES, + FLOAT_CASES, + INT_CASES, + NUMPY_SCALAR_CASES, + RANGE_CASES, + STRING_CASES, + TIMESTAMP_CASES, +) +from .utils import get_type_as_str + +try: + import torch # type: ignore [reportMissingImports] for 3.12 +except ImportError: + torch = None + + +def verify_inspector( + value: Any, + length: int, + display_value: str, + is_truncated: bool, + kind: str, + display_type: str, + type_info: str, + has_children: bool = False, + has_viewer: bool = False, +) -> None: + # NOTE: Skip `get_size` for now, since it depends on platform, Python version, and package version. + + inspector = get_inspector(value) + + assert inspector.get_length() == length + assert inspector.has_children() == has_children + assert inspector.has_viewer() == has_viewer + assert inspector.get_display_value() == (display_value, is_truncated) + assert inspector.get_kind() == kind + assert inspector.get_display_type() == display_type + assert inspector.get_type_info() == type_info + + +class HelperClass: + """ + A helper class for testing method functions. + """ + + def __init__(self): + self._x = 1 + + def fn_no_args(self): + return "No args" + + def fn_one_arg(self, x: str) -> str: + return f"One arg {x}" + + def fn_two_args(self, x: int, y: int) -> Tuple[int, int]: + return (x, y) + + @property + def prop(self): + return self._x + + +# +# Test Booleans +# + + +@pytest.mark.parametrize("value", BOOL_CASES) +def test_inspect_boolean(value: bool) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind="boolean", + display_type="bool", + type_info="bool", + ) + + +# +# Test Strings +# + + +@pytest.mark.parametrize("value", STRING_CASES) +def test_inspect_string(value: str) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=repr(value), + kind=VariableKind.String, + display_type="str", + type_info="str", + length=length, + ) + + +def test_inspect_string_truncated() -> None: + value = "".join(random.choices(string.ascii_letters, k=(TRUNCATE_AT + 10))) + length = len(value) + verify_inspector( + value=value, + display_value=f"'{value[:TRUNCATE_AT]}'", + kind=VariableKind.String, + display_type="str", + type_info="str", + length=length, + is_truncated=True, + ) + + +# +# Test Numbers +# + + +@pytest.mark.parametrize("value", INT_CASES) +def test_inspect_integer(value: int) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Number, + display_type="int", + type_info="int", + ) + + +@pytest.mark.parametrize("value", NUMPY_SCALAR_CASES) +def test_inspect_numpy_scalars(value: np.integer) -> None: + dtype = str(value.dtype) + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Number, + display_type=str(dtype), + type_info=f"numpy.{dtype}", + ) + + +@pytest.mark.parametrize("value", FLOAT_CASES) +def test_inspect_float(value: float) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Number, + display_type="float", + type_info="float", + ) + + +@pytest.mark.parametrize("value", COMPLEX_CASES) +def test_inspect_complex(value: complex) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Number, + display_type="complex", + type_info="complex", + ) + + +# +# Test Classes +# + + +@pytest.mark.parametrize("value", CLASSES_CASES) +def test_inspect_classes(value: type) -> None: + verify_inspector( + value=value, + length=len([p for p in dir(value) if not (p.startswith("_"))]), + has_children=True, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Class, + display_type="type", + type_info="type", + ) + + +# +# Test Bytes +# + + +@pytest.mark.parametrize("value", BYTES_CASES) +def test_inspect_bytes(value: bytes) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Bytes, + display_type=f"bytes [{length}]", + type_info="bytes", + length=length, + ) + + +BYTEARRAY_CASES = [ + bytearray(), + bytearray(0), + bytearray(1), + bytearray(b"\x41\x42\x43"), +] + + +@pytest.mark.parametrize("value", BYTEARRAY_CASES) +def test_inspect_bytearray(value: bytearray) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Bytes, + display_type=f"bytearray [{length}]", + type_info="bytearray", + length=length, + ) + + +def test_inspect_bytearray_truncated() -> None: + value = bytearray(TRUNCATE_AT * 2) + length = len(value) + verify_inspector( + value=value, + display_value=str(value)[:TRUNCATE_AT], + kind=VariableKind.Bytes, + display_type=f"bytearray [{length}]", + type_info="bytearray", + length=length, + is_truncated=True, + ) + + +def test_inspect_memoryview() -> None: + byte_array = bytearray("東京", "utf-8") + value = memoryview(byte_array) + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Bytes, + display_type=f"memoryview [{length}]", + type_info="memoryview", + length=length, + ) + + +# +# Test Timestamps +# + + +@pytest.mark.parametrize("value", TIMESTAMP_CASES) +def test_inspect_timestamp(value: datetime.datetime) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=repr(value), + kind=VariableKind.Other, + display_type=type(value).__name__, + type_info=get_qualname(value), + ) + + +# +# Test Empty +# + +NONE_CASES = [None] + + +@pytest.mark.parametrize("value", NONE_CASES) +def test_inspect_none(value: None) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value="None", + kind=VariableKind.Empty, + display_type="NoneType", + type_info="NoneType", + ) + + +# +# Test Collections +# + +SET_CASES = [ + set(), + set([None]), + set(BOOL_CASES), + set(INT_CASES), + set(FLOAT_CASES), + set(COMPLEX_CASES), + set(BYTES_CASES), + set(STRING_CASES), +] + + +@pytest.mark.parametrize("value", SET_CASES) +def test_inspect_set(value: set) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True), + kind=VariableKind.Collection, + display_type=f"set {{{length}}}", + type_info="set", + length=length, + ) + + +def test_inspect_set_truncated() -> None: + value = set(list(range(TRUNCATE_AT * 2))) + length = len(value) + verify_inspector( + value=value, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True)[:TRUNCATE_AT], + kind=VariableKind.Collection, + display_type=f"set {{{length}}}", + type_info="set", + length=length, + is_truncated=True, + ) + + +LIST_CASES = [ + [], + NONE_CASES, + BOOL_CASES, + INT_CASES, + FLOAT_CASES, + COMPLEX_CASES, + BYTES_CASES, + BYTEARRAY_CASES, + STRING_CASES, +] + + +@pytest.mark.parametrize("value", LIST_CASES) +def test_inspect_list(value: list) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True), + kind=VariableKind.Collection, + display_type=f"list [{length}]", + type_info="list", + length=length, + has_children=length > 0, + ) + + +def test_inspect_list_truncated() -> None: + value = list(range(TRUNCATE_AT * 2)) + length = len(value) + verify_inspector( + value=value, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True)[:TRUNCATE_AT], + kind=VariableKind.Collection, + display_type=f"list [{length}]", + type_info="list", + length=length, + has_children=True, + is_truncated=True, + ) + + +def test_inspect_list_cycle() -> None: + value = list([1, 2]) + value.append(value) # type: ignore + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True)[:TRUNCATE_AT], + kind=VariableKind.Collection, + display_type=f"list [{length}]", + type_info="list", + length=length, + has_children=True, + ) + + +@pytest.mark.parametrize("value", RANGE_CASES) +def test_inspect_range(value: range) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True), + kind=VariableKind.Collection, + display_type=f"range [{length}]", + type_info="range", + length=length, + ) + + +FASTCORE_LIST_CASES = [ + L(), + L(NONE_CASES), + L(BOOL_CASES), + L(INT_CASES), + L(FLOAT_CASES), + L(COMPLEX_CASES), + L(BYTES_CASES), + L(BYTEARRAY_CASES), + L(STRING_CASES), +] + + +@pytest.mark.parametrize("value", FASTCORE_LIST_CASES) +def test_inspect_fastcore_list(value: L) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True), + kind=VariableKind.Collection, + display_type=f"L [{length}]", + type_info="fastcore.foundation.L", + length=length, + has_children=length > 0, + ) + + +# +# Test Maps +# + + +MAP_CASES = [ + {}, # empty dict + {"": None}, # empty key + {10: "Ten"}, # int key + {"A": True}, # bool value + {"B": 1}, # int value + {"C": -1.01}, # float value + {"D": complex(1, 2)}, # complex value + {"E": "Echo"}, # str value + {"F": b"Foxtrot"}, # bytes value + {"G": bytearray(b"\x41\x42\x43")}, # byterray value + {"H": (1, 2, 3)}, # tuple value + {"I": [1, 2, 3]}, # list value + {"J": {1, 2, 3}}, # set value + {"K": range(3)}, # range value + {"L": {"L1": 1, "L2": 2, "L3": 3}}, # nested dict value +] + + +@pytest.mark.parametrize("value", MAP_CASES) +def test_inspect_map(value: dict) -> None: + length = len(value) + verify_inspector( + value=value, + is_truncated=False, + display_value=pprint.pformat(value, width=PRINT_WIDTH, compact=True), + kind=VariableKind.Map, + display_type=f"dict [{length}]", + type_info="dict", + length=length, + has_children=length > 0, + ) + + +# +# Test Functions +# +helper = HelperClass() + + +FUNCTION_CASES = [ + lambda: None, # No argument lambda function + lambda x: x, # Single argument lambda function + lambda x, y: x + y, # Multiple argument lambda function + helper.fn_no_args, # No argument method + helper.fn_one_arg, # Single argument method with single return type + helper.fn_two_args, # Multiple argument method with tuple return type +] + + +@pytest.mark.parametrize("value", FUNCTION_CASES) +def test_inspect_function(value: Callable) -> None: + expected_type = "method" if isinstance(value, types.MethodType) else "function" + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=f"{value.__qualname__}{inspect.signature(value)}", + kind=VariableKind.Function, + display_type=expected_type, + type_info=expected_type, + ) + + +# +# Test objects +# + +OBJECTS_CASES = [helper] + + +@pytest.mark.parametrize("value", OBJECTS_CASES) +def test_inspect_object(value: Any) -> None: + verify_inspector( + value=value, + length=4, + has_children=True, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Other, + display_type="HelperClass", + type_info="positron_ipykernel.tests.test_inspectors.HelperClass", + ) + + +# +# Test property +# +PROPERTY_CASES = [HelperClass.prop] + + +@pytest.mark.parametrize("value", PROPERTY_CASES) +def test_inspect_property(value: property) -> None: + verify_inspector( + value=value, + length=0, + is_truncated=False, + display_value=str(value), + kind=VariableKind.Other, + display_type="property", + type_info="property", + ) + + +# +# Test arrays +# + + +@pytest.mark.parametrize( + "value", + [ + np.array([1, 2, 3], dtype=np.int64), # 1D + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64), # 2D + ], +) +def test_inspect_numpy_array(value: np.ndarray) -> None: + shape = value.shape + display_shape = f"({shape[0]})" if len(shape) == 1 else str(tuple(shape)) + verify_inspector( + value=value, + display_value=np.array2string(value, separator=","), + kind=VariableKind.Collection, + display_type=f"numpy.int64 {display_shape}", + type_info="numpy.ndarray", + has_children=True, + is_truncated=True, + length=shape[0], + ) + + +@pytest.mark.parametrize( + "value", + [ + np.array(1, dtype=np.int64), + ], +) +def test_inspect_numpy_array_0d(value: np.ndarray) -> None: + verify_inspector( + value=value, + display_value=np.array2string(value, separator=","), + kind=VariableKind.Number, + display_type="numpy.int64", + type_info="numpy.ndarray", + is_truncated=True, + length=0, + ) + + +# +# Test tables +# + + +def test_inspect_pandas_dataframe() -> None: + value = pd.DataFrame({"a": [1, 2], "b": ["3", "4"]}) + rows, cols = value.shape + verify_inspector( + value=value, + display_value=f"[{rows} rows x {cols} columns] {get_type_as_str(value)}", + kind=VariableKind.Table, + display_type=f"DataFrame [{rows}x{cols}]", + type_info=get_type_as_str(value), + has_children=True, + has_viewer=True, + is_truncated=True, + length=rows, + ) + + +@pytest.mark.parametrize( + "value", + [ + pd.RangeIndex(0, 2), + pd.Index([0, 1]), + pd.date_range("2021-01-01 00:00:00", "2021-01-01 02:00:00", freq="h"), + pd.MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")]), + ], +) +def test_inspect_pandas_index(value: pd.Index) -> None: + (rows,) = value.shape + not_range_index = not isinstance(value, pd.RangeIndex) + verify_inspector( + value=value, + display_value=str(value.to_list() if not_range_index else value), + kind=VariableKind.Map, + display_type=f"{value.dtype} [{rows}]", + type_info=get_qualname(value), + has_children=not_range_index, + is_truncated=not_range_index, + length=rows, + ) + + +def test_inspect_pandas_series() -> None: + value = pd.Series({"a": 0, "b": 1}) + (rows,) = value.shape + verify_inspector( + value=value, + display_value="[0, 1]", + kind=VariableKind.Map, + display_type=f"int64 [{rows}]", + type_info=get_type_as_str(value), + has_children=True, + is_truncated=True, + length=rows, + ) + + +def test_inspect_polars_dataframe() -> None: + value = pl.DataFrame({"a": [1, 2], "b": [3, 4]}) + rows, cols = value.shape + verify_inspector( + value=value, + display_value=f"[{rows} rows x {cols} columns] {get_type_as_str(value)}", + kind=VariableKind.Table, + display_type=f"DataFrame [{rows}x{cols}]", + type_info=get_type_as_str(value), + has_children=True, + has_viewer=True, + is_truncated=True, + length=rows, + ) + + +def test_inspect_polars_series() -> None: + value = pl.Series([0, 1]) + (rows,) = value.shape + verify_inspector( + value=value, + display_value="[0, 1]", + kind=VariableKind.Map, + display_type=f"Int64 [{rows}]", + type_info=get_type_as_str(value), + has_children=True, + is_truncated=True, + length=rows, + ) + + +@pytest.mark.parametrize( + ("cls", "data"), + [ + (pd.Series, {"a": 0, "b": 1}), + (pl.Series, [0, 1]), + (pd.DataFrame, {"a": [1, 2], "b": ["3", "4"]}), + (pl.DataFrame, {"a": [1, 2], "b": ["3", "4"]}), + (pd.Index, [0, 1]), + ( + pd.Index, + [datetime.datetime(2021, 1, 1), datetime.datetime(2021, 1, 2)], + ), + (np.array, [0, 1]), # 1D + (np.array, [[0, 1], [2, 3]]), # 2D + ], +) +def test_get_items(cls: Type, data: Any) -> None: + parent = cls(data) + inspector = get_inspector(parent) + + items = list(inspector.get_items()) + + expected_keys = data.keys() if isinstance(data, dict) else range(len(data)) + assert len(items) == len(expected_keys) + for (key, value), expected_key in zip(items, expected_keys): + expected_value = parent[expected_key] + assert key == expected_key + assert get_inspector(value).equals(expected_value) + + +@pytest.mark.parametrize( + ("value", "key", "expected"), + [ + (helper, "fn_no_args", helper.fn_no_args), + (pd.Series({"a": 0, "b": 1}), "a", 0), + (pl.Series([0, 1]), 0, 0), + ( + pd.DataFrame({"a": [1, 2], "b": ["3", "4"]}), + "a", + pd.Series([1, 2], name="a"), + ), + ( + pl.DataFrame({"a": [1, 2], "b": ["3", "4"]}), + "a", + pl.Series(values=[1, 2], name="a"), + ), + (pd.Index([0, 1]), 0, 0), + ( + pd.Index([datetime.datetime(2021, 1, 1), datetime.datetime(2021, 1, 2)]), + 0, + datetime.datetime(2021, 1, 1), + ), + (np.array([0, 1]), 0, 0), # 1D + (np.array([[0, 1], [2, 3]]), 0, [0, 1]), # 2D + ], +) +def test_get_child(value: Any, key: Any, expected: Any) -> None: + child = get_inspector(value).get_child(key) + assert get_inspector(child).equals(expected) + + +@pytest.mark.parametrize( + ("value", "expected"), + [ + (np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"), 48), + (torch.Tensor([[1, 2, 3], [4, 5, 6]]) if torch else None, 24), + (pd.Series([1, 2, 3, 4]), 32), + (pl.Series([1, 2, 3, 4]), 32), + (pd.DataFrame({"a": [1, 2], "b": ["3", "4"]}), 32), + (pl.DataFrame({"a": [1, 2], "b": ["3", "4"]}), 32), + (pd.Index([0, 1]), 16), + ], +) +def test_arrays_maps_get_size(value: Any, expected: int) -> None: + if value is None: + return + inspector = get_inspector(value) + assert inspector.get_size() == expected diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_plots.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_plots.py new file mode 100644 index 00000000000..3902e3960fe --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_plots.py @@ -0,0 +1,237 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import codecs +import pickle +from pathlib import Path +from typing import Iterable, cast + +import matplotlib +import matplotlib.pyplot as plt +import pytest +from IPython.core.formatters import DisplayFormatter, format_display_data +from matplotlib.axes import Axes +from matplotlib.figure import Figure +from matplotlib.testing.compare import compare_images +from matplotlib_inline.backend_inline import configure_inline_support +from positron_ipykernel.plots import BASE_DPI, PositronDisplayPublisherHook +from positron_ipykernel.positron_comm import JsonRpcErrorCode + +from .conftest import DummyComm, PositronShell +from .utils import comm_request, json_rpc_error, json_rpc_request + +PLOT_DATA = [1, 2] + + +@pytest.fixture(autouse=True) +def setup_matplotlib(shell: PositronShell) -> Iterable[None]: + # Use IPython's `matplotlib_inline` backend + backend = "module://matplotlib_inline.backend_inline" + matplotlib.use(backend) + + # Enable all IPython mimetype formatters + display_formatter = cast(DisplayFormatter, shell.display_formatter) + active_types = display_formatter.active_types + display_formatter.active_types = display_formatter.format_types + + # Enable matplotlib IPython formatters + configure_inline_support(shell, backend) + + yield + + # Restore the original active formatters + display_formatter.active_types = active_types + + +@pytest.fixture(scope="session") +def images_path() -> Path: + images_path = Path(__file__).parent / "images" + images_path.mkdir(exist_ok=True) + return images_path + + +@pytest.fixture +def hook() -> PositronDisplayPublisherHook: + return PositronDisplayPublisherHook("positron.plot") + + +@pytest.fixture +def figure_comm(hook: PositronDisplayPublisherHook) -> DummyComm: + """ + A comm corresponding to a test figure belonging to the Positron display publisher hook. + """ + # Initialize the hook by calling it on a figure created with the test plot data + plt.plot(PLOT_DATA) + msg = comm_request({"image/png": None}, msg_type="display_data") + hook(msg) + plt.close() + + # Return the comm corresponding to the first figure + id = next(iter(hook.comms)) + figure_comm = cast(DummyComm, hook.comms[id].comm) + + # Clear messages due to the comm_open + figure_comm.messages.clear() + + return figure_comm + + +def test_hook_call_noop_on_non_display_data(hook: PositronDisplayPublisherHook) -> None: + msg = comm_request({"image/png": None}, msg_type="not_display_data") + assert hook(msg) == msg + assert hook.figures == {} + assert hook.comms == {} + + +def test_hook_call_noop_on_no_image_png(hook: PositronDisplayPublisherHook) -> None: + msg = comm_request({}, msg_type="display_data") + assert hook(msg) == msg + assert hook.figures == {} + assert hook.comms == {} + + +def test_hook_call(hook: PositronDisplayPublisherHook, images_path: Path) -> None: + # It returns `None` to indicate that it's consumed the message + plt.plot(PLOT_DATA) + msg = comm_request({"image/png": None}, msg_type="display_data") + assert hook(msg) is None + + # It creates a new figure and comm + assert len(hook.figures) == 1 + id = next(iter(hook.figures)) + assert id in hook.comms + + # Check the comm's properties + comm = hook.comms[id].comm + assert comm.target_name == hook.target_name + assert comm.comm_id == id + + # Check that the figure is a pickled base64-encoded string by decoding it and comparing it + # with a reference figure. + # First, save the hook's figure + fig_encoded = hook.figures[id] + fig: Figure = pickle.loads(codecs.decode(fig_encoded.encode(), "base64")) + actual = images_path / "test-hook-call-actual.png" + fig.savefig(str(actual)) + + # Create the reference figure + fig_ref = cast(Figure, plt.figure()) + fig_axes = cast(Axes, fig_ref.subplots()) + fig_axes.plot(PLOT_DATA) + expected = images_path / "test-hook-call-expected.png" + fig_ref.savefig(str(expected)) + + # Compare actual versus expected figures + err = compare_images(str(actual), str(expected), tol=0) + assert not err + + +def test_hook_handle_msg_noop_on_unknown_method(figure_comm: DummyComm) -> None: + # Handle a message with an invalid msg_type + msg = json_rpc_request("not_render", {}) + figure_comm.handle_msg(msg) + + assert figure_comm.messages == [ + json_rpc_error(JsonRpcErrorCode.METHOD_NOT_FOUND, "Unknown method 'not_render'") + ] + + +def render_request(comm_id: str, width_px: int = 500, height_px: int = 500, pixel_ratio: int = 1): + return json_rpc_request( + "render", + {"width": width_px, "height": height_px, "pixel_ratio": pixel_ratio}, + comm_id=comm_id, + ) + + +def test_hook_render_noop_on_unknown_comm(figure_comm: DummyComm) -> None: + # Handle a valid message but invalid comm_id + msg = render_request("unknown_comm_id") + figure_comm.handle_msg(msg) + + # No messages sent + assert figure_comm.messages == [] + + +def test_hook_render_error_on_unknown_figure( + hook: PositronDisplayPublisherHook, figure_comm: DummyComm +) -> None: + # Clear the hook's figures to simulate a missing figure + hook.figures.clear() + + # Handle a message with a valid msg_type and valid comm_id, but the hook now has a missing figure + msg = render_request(figure_comm.comm_id) + figure_comm.handle_msg(msg) + + # Check that we receive an error reply + assert figure_comm.messages == [ + json_rpc_error(JsonRpcErrorCode.INVALID_PARAMS, f"Figure {figure_comm.comm_id} not found") + ] + + +def _save_base64_image(encoded: str, filename: Path) -> None: + image = codecs.decode(encoded.encode(), "base64") + with open(filename, "wb") as f: + f.write(image) + + +def test_hook_render(figure_comm: DummyComm, images_path: Path) -> None: + # Send a valid render message with a custom width and height + width_px = height_px = 100 + pixel_ratio = 1 + msg = render_request(figure_comm.comm_id, width_px, height_px, pixel_ratio) + figure_comm.handle_msg(msg) + + # Check that the reply is a comm_msg + reply = figure_comm.messages[0] + assert reply["msg_type"] == "comm_msg" + assert reply["buffers"] is None + assert reply["metadata"] == {} + + # Check that the reply data is an `image` message + image_msg = reply["data"] + assert image_msg["result"]["mime_type"] == "image/png" + + # Check that the reply data includes the expected base64-encoded resized image + + # Save the reply's image + actual = images_path / "test-hook-render-actual.png" + _save_base64_image(image_msg["result"]["data"], actual) + + # Create the reference figure + dpi = BASE_DPI * pixel_ratio + width_in = width_px / BASE_DPI + height_in = height_px / BASE_DPI + + fig_ref = cast(Figure, plt.figure()) + fig_axes = cast(Axes, fig_ref.subplots()) + fig_axes.plot([1, 2]) + fig_ref.set_dpi(dpi) + fig_ref.set_size_inches(width_in, height_in) + + # Serialize the reference figure as a base64-encoded image + data_ref, _ = format_display_data(fig_ref, include=["image/png"], exclude=[]) # type: ignore + expected = images_path / "test-hook-render-expected.png" + _save_base64_image(data_ref["image/png"], expected) + + # Compare the actual vs expected figures + err = compare_images(str(actual), str(expected), tol=0) + assert not err + + +# It's important that we depend on the figure_comm fixture too, so that the hook is initialized +def test_shutdown(hook: PositronDisplayPublisherHook, figure_comm: DummyComm) -> None: + # Double-check that it still has figures and comms + assert len(hook.figures) == 1 + assert len(hook.comms) == 1 + + # Double-check that the comm is not yet closed + assert not figure_comm._closed + + hook.shutdown() + + # Figures and comms are closed and cleared + assert not hook.figures + assert not hook.comms + assert figure_comm._closed diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_ipkernel.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_ipkernel.py new file mode 100644 index 00000000000..966027c8599 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_ipkernel.py @@ -0,0 +1,230 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import os +from pathlib import Path +from typing import Any, cast +from unittest.mock import Mock + +import pandas as pd +import pytest +from IPython.utils.syspathcontext import prepended_to_syspath +from positron_ipykernel.help import help +from positron_ipykernel.utils import alias_home + +from .conftest import PositronShell +from .utils import assert_dataset_registered + +# The idea for these tests is to mock out communications with Positron +# via our various comms, and only test IPython interactions. For +# example, in testing the %view magic, we assert that running a cell +# with `%view` calls the data_explorer service's `register_table` +# method with the expected arguments. The actual messages sent over +# the comm are tested in the respective service tests. + + +def test_override_help(shell: PositronShell) -> None: + """ + Check that we override the shell's `help` function with our own. + """ + assert shell.user_ns["help"] == help + assert shell.user_ns_hidden["help"] == help + + +def test_view_pandas_df_expression(shell: PositronShell, mock_dataexplorer_service: Mock) -> None: + expr = "pd.DataFrame({'x': [1,2,3]})" + + shell.run_cell( + f"""import pandas as pd +%view {expr}""" + ) + + obj = pd.DataFrame({"x": [1, 2, 3]}) + assert_dataset_registered(mock_dataexplorer_service, obj, expr) + + +def test_view_pandas_df_var(shell: PositronShell, mock_dataexplorer_service: Mock) -> None: + name = "a" + shell.run_cell( + f"""import pandas as pd +{name} = pd.DataFrame({{'x': [1,2,3]}}) +%view {name}""" + ) + + obj = shell.user_ns[name] + assert_dataset_registered(mock_dataexplorer_service, obj, name) + + +def test_view_polars_df_var(shell: PositronShell, mock_dataexplorer_service: Mock) -> None: + name = "a" + shell.run_cell( + f"""import polars as pl +{name} = pl.DataFrame({{'x': [1,2,3]}}) +%view {name}""" + ) + + obj = shell.user_ns[name] + assert_dataset_registered(mock_dataexplorer_service, obj, name) + + +def test_view_unsupported_type(shell: PositronShell) -> None: + with pytest.raises(TypeError): + shell.run_line_magic("view", "12") + + +code = """def f(): + raise Exception("This is an error!") + +def g(): + f() +""" + + +def test_traceback(shell: PositronShell, tmp_path: Path, mock_displayhook: Mock) -> None: + # We follow the approach of IPython's test_ultratb.py, which is to create a temporary module, + # prepend its parent directory to sys.path, import it, then run a cell that calls a function + # from it. + + # Create a temporary module. + file = tmp_path / "test_traceback.py" + file.write_text(code) + + # Temporarily add the module to sys.path and call a function from it, which should error. + with prepended_to_syspath(str(tmp_path)): + shell.run_cell("import test_traceback; test_traceback.g()") + + # NOTE(seem): This is not elegant, but I'm not sure how else to test this than other than to + # compare the beginning of each frame of the traceback. The escape codes make it particularly + # challenging. + + path = str(alias_home(file)) + uri = file.expanduser().as_uri() + + # Define a few OSC8 escape codes for convenience. + esc = "\x1b" + osc8 = esc + "]8" + st = esc + "\\" + + # Convenient reference to colors from the active scheme. + colors = cast(Any, shell.InteractiveTB.Colors) + + # This template matches the beginning of each traceback frame. We don't check each entire frame + # because syntax highlighted code is full of escape codes. For example, after removing + # escape codes a formatted version of below might look like: + # + # File /private/var/folders/.../test_traceback.py:11, in func() + # + traceback_frame_header = "".join( + [ + "File ", + colors.filenameEm, + # File paths are replaced with OSC8 links. + osc8, + ";line={line};", + uri, + st, + path, + ":{line}", + osc8, + ";;", + st, + colors.Normal, + ", in ", + colors.vName, + "{func}", + colors.valEm, + "()", + colors.Normal, + ] + ) + + # Check that a single message was sent to the frontend. + call_args_list = mock_displayhook.session.send.call_args_list + assert len(call_args_list) == 1 + + call_args = call_args_list[0] + + # Check that the message was sent over the "error" stream. + assert call_args.args[1] == "error" + + exc_content = call_args.args[2] + + # Check that two frames were included (the top frame is included in the exception value below). + traceback = exc_content["traceback"] + assert len(traceback) == 2 + + # Check the beginning of each frame. + assert_ansi_string_startswith(traceback[0], traceback_frame_header.format(line=5, func="g")) + assert_ansi_string_startswith(traceback[1], traceback_frame_header.format(line=2, func="f")) + + # Check the exception name. + assert exc_content["ename"] == "Exception" + + # The exception value should include the top of the stack trace. + assert_ansi_string_startswith( + exc_content["evalue"], "This is an error!\nCell " + colors.filenameEm + ) + + +def assert_ansi_string_startswith(actual: str, expected: str) -> None: + """ + Assert that an ansi-formatted string starts with an expected string, in a way that gets pytest + to print a helpful diff. + """ + # We manually trim each string instead of using str.startswith else pytest doesn't highlight + # where strings differ. We compare reprs so that pytest displays escape codes instead of + # interpreting them - it's easier to debug. + length = min(len(actual), len(expected)) + actual = repr(actual[:length]) + expected = repr(expected[:length]) + assert actual == expected + + +def test_pinfo(shell: PositronShell, mock_help_service: Mock) -> None: + """ + Redirect `object?` to the Positron help service's `show_help` method. + """ + shell.run_cell("object?") + + mock_help_service.show_help.assert_called_once_with(object) + + +def test_pinfo_2(shell: PositronShell, tmp_path: Path, mock_ui_service: Mock) -> None: + """ + Redirect `object??` to the Positron UI service's `open_editor` method. + """ + # Create a temporary module using a predefined code snippet, so that we know the expected + # file and line number where the object is defined. + file = tmp_path / "test_pinfo_2.py" + file.write_text(code) + + # Temporarily add the module to sys.path and run the `??` magic. + with prepended_to_syspath(str(tmp_path)): + shell.run_cell("import test_pinfo_2") + shell.run_cell("test_pinfo_2.g??") + + # IPython normalizes the case of the file path. + expected_file = os.path.normcase(file) + mock_ui_service.open_editor.assert_called_once_with(expected_file, 4, 0) + + +def test_clear(shell: PositronShell, mock_ui_service: Mock) -> None: + """ + Redirect `%clear` to the Positron UI service's `clear_console` method. + """ + shell.run_cell("%clear") + + mock_ui_service.clear_console.assert_called_once_with() + + +def test_question_mark_help(shell: PositronShell, mock_help_service: Mock) -> None: + """ + Redirect `?` to the Positron Help service. + """ + + shell.run_cell("?") + + mock_help_service.show_help.assert_called_once_with( + "positron_ipykernel.utils.positron_ipykernel_usage" + ) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_jedilsp.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_jedilsp.py new file mode 100644 index 00000000000..afb6cf22b95 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_positron_jedilsp.py @@ -0,0 +1,252 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from typing import Any, Dict, List, Optional, cast +from unittest.mock import Mock + +import pandas as pd +import polars as pl +import pytest +from positron_ipykernel._vendor.jedi import Project +from positron_ipykernel._vendor.jedi_language_server import jedi_utils +from positron_ipykernel._vendor.lsprotocol.types import ( + CompletionItem, + CompletionParams, + MarkupContent, + MarkupKind, + Position, + TextDocumentIdentifier, +) +from positron_ipykernel._vendor.pygls.workspace.text_document import TextDocument +from positron_ipykernel.help_comm import ShowHelpTopicParams +from positron_ipykernel.jedi import PositronInterpreter +from positron_ipykernel.positron_jedilsp import ( + HelpTopicParams, + positron_completion, + positron_completion_item_resolve, + positron_help_topic_request, +) + + +def mock_server(uri: str, source: str, namespace: Dict[str, Any]) -> Mock: + """ + Minimum interface for a pylgs server to support LSP unit tests. + """ + server = Mock() + server.client_capabilities.text_document.completion.completion_item.documentation_format = list( + MarkupKind + ) + server.initialization_options.completion.disable_snippets = False + server.initialization_options.completion.resolve_eagerly = False + server.initialization_options.completion.ignore_patterns = [] + server.initialization_options.markup_kind_preferred = MarkupKind.Markdown + server.shell.user_ns = namespace + server.project = Project("") + server.workspace.get_document.return_value = TextDocument(uri, source) + return server + + +@pytest.mark.parametrize( + ("source", "namespace", "expected_topic"), + [ + # An unknown variable should not be resolved. + ("x", {}, None), + # ... but a variable in the user's namespace should resolve. + ("x", {"x": 0}, "builtins.int"), + ], +) +def test_positron_help_topic_request( + source: str, + namespace: Dict[str, Any], + expected_topic: Optional[str], +) -> None: + params = HelpTopicParams(TextDocumentIdentifier("file:///foo.py"), Position(0, 0)) + server = mock_server(params.text_document.uri, source, namespace) + + topic = positron_help_topic_request(server, params) + + if expected_topic is None: + assert topic is None + else: + assert topic == ShowHelpTopicParams(topic=expected_topic) + + +class _ObjectWithProperty: + @property + def prop(self) -> str: + return "prop" + + +_object_with_property = _ObjectWithProperty() + + +def _completions( + source: str, + namespace: Dict[str, Any], +) -> List[CompletionItem]: + lines = source.splitlines() + line = len(lines) - 1 + character = len(lines[line]) + params = CompletionParams(TextDocumentIdentifier("file:///foo.py"), Position(line, character)) + server = mock_server(params.text_document.uri, source, namespace) + + completion_list = positron_completion(server, params) + + assert completion_list is not None, "No completions returned" + + return completion_list.items + + +@pytest.mark.parametrize( + ("source", "namespace", "expected_labels"), + [ + # Dict key mapping to a property. + ('x["', {"x": {"a": _object_with_property.prop}}, ["a"]), + # When completions match a variable defined in the source _and_ a variable in the user's namespace, + # prefer the namespace variable. + ('x = {"a": 0}\nx["', {"x": {"b": 0}}, ["b"]), + # Dict key mapping to an int. + ('x["', {"x": {"a": 0}}, ["a"]), + # Dict literal key mapping to an int. + ('{"a": 0}["', {}, ["a"]), + # Pandas dataframe - dict key access. + ('x["', {"x": pd.DataFrame({"a": []})}, ["a"]), # string column name + ('x["', {"x": pd.DataFrame({0: []})}, ["0"]), # integer column name + # Polars dataframe - dict key access. + ('x["', {"x": pl.DataFrame({"a": []})}, ["a"]), + ], +) +def test_positron_completion_exact( + source: str, + namespace: Dict[str, Any], + expected_labels: List[str], +) -> None: + completions = _completions(source, namespace) + completion_labels = [completion.label for completion in completions] + assert completion_labels == expected_labels + + +@pytest.mark.parametrize( + ("source", "namespace", "expected_label"), + [ + # Pandas dataframe - attribute access. + # Note that polars dataframes don't support accessing columns as attributes. + ("x.a", {"x": pd.DataFrame({"a": []})}, "a"), + ], +) +def test_positron_completion_contains( + source: str, + namespace: Dict[str, Any], + expected_label: str, +) -> None: + completions = _completions(source, namespace) + completion_labels = [completion.label for completion in completions] + assert expected_label in completion_labels + + +_pd_df = pd.DataFrame({"a": [0]}) +_pl_df = pl.DataFrame({"a": [0]}) + + +@pytest.mark.parametrize( + ("source", "namespace", "expected_detail", "expected_documentation"), + [ + # Dict key mapping to a property. + ( + 'x["', + {"x": {"a": _object_with_property.prop}}, + "instance str(object='', /) -> str", + jedi_utils.convert_docstring(cast(str, str.__doc__), MarkupKind.Markdown), + ), + # Dict key mapping to an int. + ( + 'x["', + {"x": {"a": 0}}, + "instance int(x=None, /) -> int", + jedi_utils.convert_docstring(cast(str, int.__doc__), MarkupKind.Markdown), + ), + # Integer, to sanity check for a basic value. + ( + "x", + {"x": 0}, + "instance int(x=None, /) -> int", + jedi_utils.convert_docstring(cast(str, int.__doc__), MarkupKind.Markdown), + ), + # Dict literal key mapping to an int. + ( + '{"a": 0}["', + {}, + "instance int(x=None, /) -> int", + jedi_utils.convert_docstring(cast(str, int.__doc__), MarkupKind.Markdown), + ), + # Pandas dataframe. + ( + "x", + {"x": _pd_df}, + f"DataFrame [{_pd_df.shape[0]}x{_pd_df.shape[1]}]", + f"```text\n{_pd_df}\n```", + ), + # Pandas dataframe column - dict key access. + ( + 'x["', + {"x": _pd_df}, + f"int64 [{_pd_df['a'].shape[0]}]", + f"```text\n{_pd_df['a']}\n```", + ), + # Pandas series. + ( + "x", + {"x": _pd_df["a"]}, + f"int64 [{_pd_df['a'].shape[0]}]", + f"```text\n{_pd_df['a']}\n```", + ), + # Polars dataframe. + ( + "x", + {"x": _pl_df}, + f"DataFrame [{_pl_df.shape[0]}x{_pl_df.shape[1]}]", + f"```text\n{_pl_df}\n```", + ), + # Polars dataframe column - dict key access. + ( + 'x["', + {"x": _pl_df}, + f"Int64 [{_pl_df['a'].shape[0]}]", + f"```text\n{_pl_df['a']}\n```", + ), + # Polars series. + ( + "x", + {"x": _pl_df["a"]}, + f"Int64 [{_pl_df['a'].shape[0]}]", + f"```text\n{_pl_df['a']}\n```", + ), + ], +) +def test_positron_completion_item_resolve( + source: str, + namespace: Dict[str, Any], + expected_detail: str, + expected_documentation: str, + monkeypatch, +) -> None: + # Create a jedi Completion and patch jedi language server's most recent completions. + # This is the state that we expect to be in when positron_completion_item_resolve is called. + lines = source.splitlines() + line = len(lines) + character = len(lines[line - 1]) + completions = PositronInterpreter(source, namespaces=[namespace]).complete(line, character) + assert len(completions) == 1, "Test cases must have exactly one completion" + [completion] = completions + monkeypatch.setattr(jedi_utils, "_MOST_RECENT_COMPLETIONS", {"label": completion}) + + server = mock_server("", source, namespace) + params = CompletionItem("label") + + resolved = positron_completion_item_resolve(server, params) + + assert resolved.detail == expected_detail + assert isinstance(resolved.documentation, MarkupContent) + assert resolved.documentation.kind == MarkupKind.Markdown + assert resolved.documentation.value == expected_documentation diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_pydoc.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_pydoc.py new file mode 100644 index 00000000000..268449ca655 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_pydoc.py @@ -0,0 +1,536 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from types import ModuleType +from typing import Any, Callable, List, Tuple + +import numpy as np +import pandas as pd +import pytest +from positron_ipykernel.pydoc import ( + _Attr, + _compact_signature, + _get_summary, + _getdoc, + _PositronHTMLDoc, + _resolve, + _tabulate_attrs, + _untyped_signature, +) + +# Test data + + +def _test_func(a: int, b: str, c=1, *args, **kwargs) -> None: + """ + Function summary. + + Function long description. + """ + pass + + +class _DummyAttribute: + """ + Attribute summary. + + Attribute long description. + """ + + +class _A: + """ + Class summary. + + Class long description. + """ + + attr = _DummyAttribute() + + def __init__(self) -> None: + """This should not be documented.""" + + def method(self, a: int, b: str, c=1, *args, **kwargs) -> None: + """ + Method summary, may contain [links](target). + + Method long description. + """ + pass + + +_module = ModuleType( + "test_module", + """\ +Module summary. + +Module long description.""", +) +setattr(_module, "A", _A) +setattr(_module, "test_func", _test_func) + + +def _test_getdoc_links_arguments_section() -> None: + """ + Summary. + + Parameters + ---------- + copy : bool + Uses `copy.copy`. + second : int + Description 2. + """ + + +def _test_getdoc_md_links_arguments_section() -> None: + """ + Summary. + + Parameters + ---------- + copy : bool + Uses [](`~copy.copy`). + second : int + Description 2. + """ + + +_TEST_GETDOC_LINKS_ARGS_SECTION_OUTPUT = """\ +

Summary.

+

Parameters

+
    +
  • copy: bool +Uses copy.copy.
  • +
  • second: int +Description 2.
  • +
+""" + + +def _test_getdoc_links_see_also_section() -> None: + """ + Summary. + + See Also + -------- + copy.copy : Description 1. + """ + + +def _test_getdoc_md_links_see_also_section() -> None: + """ + Summary. + + See Also + -------- + [](`~copy.copy`) : Description 1. + """ + + +_TEST_GETDOC_LINKS_SEE_ALSO_SECTION_OUTPUT = """\ +

Summary.

+

See Also

+ +""" + + +def _test_getdoc_code_blocks() -> None: + """ + >>> import pandas as pd + >>> pd.DataFrame() + Empty DataFrame + Columns: [] + Index: [] + """ + + +def _test_getdoc_urls() -> None: + """ + Note + ---- + See https://url.com for more info + """ + + +def _test_getdoc_md_urls() -> None: + """ + Note + ---- + See [url](https://url.com) for more info + """ + + +# Tests + + +_html = _PositronHTMLDoc() + + +@pytest.mark.parametrize( + ("func", "args"), + [ + (_html.html_index, ()), + (_html.html_error, ("test-url", Exception())), + # NOTE: For some reason, including html_search causes an existing test to fail: + # tests/unittestadapter/test_discovery.py::test_error_discovery + # (_html.html_search, ("pydoc",)), + (_html.html_getobj, ("pydoc",)), # Module + (_html.html_getobj, ("pydoc.Helper",)), # Class + (_html.html_getobj, ("pydoc.getdoc",)), # Function + (_html.html_getobj, ("pydoc.help",)), # Data + (_html.html_keywords, ()), + (_html.html_topicpage, ("FLOAT",)), + (_html.html_topics, ()), + ], +) +def test_pydoc_py311_breaking_changes(func: Callable, args: Tuple[Any, ...]) -> None: + """ + Python 3.11 introduced a breaking change into pydoc.HTMLDoc methods: heading, section, and + bigsection. Ensure that we've patched these to work in all versions from 3.8+. + + NOTE: We can remove this test once we have better end-to-end tests on the generated HTML for + specific objects. + """ + # These will error unless we patch pydoc.HTMLDoc heading, section, and bigsection. + func(*args) + + +def _no_args(): + pass + + +def _one_arg(values): + pass + + +def _two_args(loc, value): + pass + + +def _required_and_optional_args(other, axis=None, level=None): + pass + + +def _only_optional_args(index=None, name=None): + pass + + +def _keyword_only_required_and_optional_args(*, columns, index=None, values=None): + pass + + +def _keyword_only_optional_args(*, axis=None, inplace=None, limit=None, downcast=None): + pass + + +def _required_and_keyword_only_optional_args(labels, *, axis=None, copy=None): + pass + + +def _variadic_positional_and_keyword_args(func, *args, **kwargs): + pass + + +def _variadic_keyword_args(**kwargs): + pass + + +def _variadic_positional_and_optional_keyword_args(*args, copy=None): + pass + + +def _truncated_1(other, join=None, overwrite=None, filter_func=None, errors=None): + pass + + +def _truncated_2(subset=None, normalize=None, sort=None, ascending=None, dropna=None): + pass + + +def _truncated_3(subset=None, *, keep, inplace, ignore_index): + pass + + +@pytest.mark.parametrize( + ("func", "expected"), + [ + (_no_args, "()"), + (_one_arg, "(values)"), + (_two_args, "(loc, value)"), + (_required_and_optional_args, "(other[, axis, level])"), + (_only_optional_args, "([index, name])"), + (_keyword_only_required_and_optional_args, "(*, columns[, index, values])"), + (_keyword_only_optional_args, "(*[, axis, inplace, limit, downcast])"), + (_required_and_keyword_only_optional_args, "(labels, *[, axis, copy])"), + (_variadic_positional_and_keyword_args, "(func, *args, **kwargs)"), + (_variadic_keyword_args, "(**kwargs)"), + (_variadic_positional_and_optional_keyword_args, "(*args[, copy])"), + (_truncated_1, "(other[, join, overwrite, ...])"), + (_truncated_2, "([subset, normalize, sort, ...])"), + (_truncated_3, "([subset, *, keep, inplace, ...])"), + # Non-callable + (None, None), + ], +) +def test_compact_signature(func: Callable, expected: str) -> None: + result = _compact_signature(func) + assert result == expected + + +@pytest.mark.parametrize( + ("func", "expected"), + [ + (pd.DataFrame, "(data=None, index=None, columns=None, dtype=None, copy=None)"), + (_test_func, "(a, b, c=1, *args, **kwargs)"), + ], +) +def test_untyped_signature(func: Callable, expected: str) -> None: + result = _untyped_signature(func) + assert result == expected + + +@pytest.mark.parametrize( + ("attrs", "expected"), + [ + # Empty + ([], ['', "", "", "
"]), + # One attr + ( + [_Attr(name="attr", cls=_A, value=_DummyAttribute)], + [ + '', + "", + "", + "", + "", + "", + "", + "
", + 'attr()', + "", + "Attribute summary.", + "
", + ], + ), + ], +) +def test_tabulate_attrs(attrs: List[_Attr], expected: List[str]) -> None: + result = _tabulate_attrs(attrs) + assert result == expected + + +@pytest.mark.parametrize( + ("obj", "expected"), + [ + (pd.DataFrame, "Two-dimensional, size-mutable, potentially heterogeneous tabular data."), + ], +) +def test_get_summary(obj: Any, expected: str) -> None: + result = _get_summary(obj) + assert result == expected + + +def _assert_html_equal(result: str, expected: str) -> None: + # Ignore whitespace between lines. + # This is specifically to handle the fact that black removes trailing whitespaces from our + # `expected` HTML above. + _result = [line.strip() for line in result.split("\n")] + _expected = [line.strip() for line in expected.split("\n")] + assert _result == _expected + + +def test_document_module() -> None: + result = _html.document(_module) + expected = """\ +

test_module

Module summary.

+

Module long description.

+
+

Classes

+ + + + + + + +
+A() + +Class summary. +
+
+

Functions

+ + + + + + + +
+test_func(a, b[, c, *args, **kwargs]) + +Function summary. +
+
""" + + _assert_html_equal(result, expected) + + +def test_document_class(): + result = _html.document(_A) + expected = """\ +

_A

class _A()

Class summary.

+

Class long description.

+
+

Attributes

+ + + + + + + +
+attr + +Attribute summary. +
+
+

Methods

+ + + + + + + +
+method(a, b[, c, *args, **kwargs]) + +Method summary, may contain [links](target). +
+
""" + + _assert_html_equal(result, expected) + + +def test_document_no_topic(): + with pytest.raises(ValueError, match=r"No help found for topic: NoneType."): + _html.html_topicpage("NoneType") + + +def test_document_version() -> None: + result = _html.document(pd) + expected = f"""
v{pd.__version__}

pandas

""" + + assert result.startswith(expected) + + +@pytest.mark.parametrize( + ("target", "from_obj", "expected"), + [ + # *From* a module + ("Series", pd, "pandas.Series"), + # A package + ("os", pd.read_csv, "os"), + ("pandas", pd.read_csv, "pandas"), + # A sub-module + ("pandas.io", pd.read_csv, "pandas.io"), + # A sub-module, implicitly relative to `from_obj`'s package + ("api", pd.read_csv, "pandas.api"), + # This is a bit ambiguous, but we have to assume that the user is referring to the stdlib... + # TODO: Maybe we lost some info here when going from rst to markdown... + # So maybe we want to parse links before converting to markdown? + ("io", pd.read_csv, "io"), + # A fully qualified name to a class, function, or instance + ("os.PathLike", pd.read_csv, "os.PathLike"), + ("os.path.split", pd.read_csv, "os.path.split"), + ("os.path.sep", pd.read_csv, "os.path.sep"), + ("pandas.DataFrame", pd.read_csv, "pandas.DataFrame"), + # A fully qualified name to a class attribute or method + ("pandas.DataFrame.to_csv", pd.read_csv, "pandas.DataFrame.to_csv"), + # A fully qualified name, implicitly relative to `from_obj`'s package + ("DataFrame", pd.read_csv, "pandas.DataFrame"), + ("DataFrame.to_csv", pd.read_csv, "pandas.DataFrame.to_csv"), + ("read_fwf", pd.read_csv, "pandas.read_fwf"), + # Unresolvable + ("filepath_or_buffer", pd.read_csv, None), + ("pd.to_datetime", pd.read_csv, None), + # Ensure that we can handle linking from a `property` + ("DataFrame.transpose", pd.read_csv, "pandas.DataFrame.transpose"), + # Linking from a getset_descriptor + ("ndarray.base", np.generic.base, "numpy.ndarray.base"), + ], +) +def test_resolve(target: str, from_obj: Any, expected: Any) -> None: + """ + Unit test for `_resolve` since it is particularly tricky. + """ + assert _resolve(target, from_obj) == expected + + +@pytest.mark.parametrize( + ("object", "expected"), + [ + # Does not link item names/types in Arguments section, but does link descriptions. + ( + _test_getdoc_links_arguments_section, + _TEST_GETDOC_LINKS_ARGS_SECTION_OUTPUT, + ), + # Same as previous but using markdown link format. + ( + _test_getdoc_md_links_arguments_section, + _TEST_GETDOC_LINKS_ARGS_SECTION_OUTPUT, + ), + # Links items in the list under the See Also section. + ( + _test_getdoc_links_see_also_section, + _TEST_GETDOC_LINKS_SEE_ALSO_SECTION_OUTPUT, + ), + # Same as previous but using markdown link format. + ( + _test_getdoc_md_links_see_also_section, + _TEST_GETDOC_LINKS_SEE_ALSO_SECTION_OUTPUT, + ), + # Highlights code blocks. + # Inputs and outputs are split into separate html elements. + ( + _test_getdoc_code_blocks, + """\ +
import pandas as pd
+pd.DataFrame()
+
+
+
Empty DataFrame
+Columns: []
+Index: []
+
+
+""", + ), + # Match and replace bare urls + ( + _test_getdoc_urls, + """\ +

Note

+

See https://url.com for more info

+""", + ), + # Should not match to markdown URLs + ( + _test_getdoc_md_urls, + """\ +

Note

+

See url for more info

+""", + ), + ], +) +def test_getdoc(object: Any, expected: str) -> None: + html = _getdoc(object) + assert html == expected diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_ui.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_ui.py new file mode 100644 index 00000000000..5b851a7f793 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_ui.py @@ -0,0 +1,142 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import os +from pathlib import Path +from typing import Any, Dict + +import numpy as np +import pandas as pd +import polars as pl +import pytest + +try: + import torch # type: ignore [reportMissingImports] for 3.12 +except ImportError: + torch = None + +from positron_ipykernel.positron_ipkernel import PositronIPyKernel, PositronShell +from positron_ipykernel.ui import UiService +from positron_ipykernel.utils import alias_home + +from .conftest import DummyComm +from .utils import ( + comm_open_message, + json_rpc_notification, + json_rpc_request, + json_rpc_response, + preserve_working_directory, +) + +TARGET_NAME = "target_name" + + +@pytest.fixture +def ui_service(kernel: PositronIPyKernel) -> UiService: + """ + The Positron UI service. + """ + return kernel.ui_service + + +@pytest.fixture +def ui_comm(ui_service: UiService) -> DummyComm: + """ + Open a dummy comm for the UI service. + """ + # TODO: Close any existing comms? + + # Open a comm + ui_comm = DummyComm(TARGET_NAME) + ui_service.on_comm_open(ui_comm, {}) + + # Clear messages due to the comm_open + ui_comm.messages.clear() + + return ui_comm + + +def working_directory_event() -> Dict[str, Any]: + return json_rpc_notification("working_directory", {"directory": str(alias_home(Path.cwd()))}) + + +def test_comm_open(ui_service: UiService) -> None: + # Double-check that comm is not yet open + assert ui_service._comm is None + + # Open a comm + ui_comm = DummyComm(TARGET_NAME) + ui_service.on_comm_open(ui_comm, {}) + + # Check that the comm_open and initial working_directory messages are sent + assert ui_comm.messages == [comm_open_message(TARGET_NAME), working_directory_event()] + + +def test_set_console_width(ui_comm: DummyComm) -> None: + """ + Test the `setConsoleWidth` RPC method called from Positron. + """ + width = 118 + msg = json_rpc_request( + "call_method", + { + "method": "setConsoleWidth", + "params": [width], + }, + comm_id="dummy_comm_id", + ) + ui_comm.handle_msg(msg) + + # Check that the response is sent, with a result of None. + assert ui_comm.messages == [json_rpc_response(None)] + + # See the comments in positron.ui._set_console_width for a description of these values. + assert os.environ["COLUMNS"] == str(width) + assert np.get_printoptions()["linewidth"] == width + assert pd.get_option("display.width") is None + assert pl.Config.state()["POLARS_TABLE_WIDTH"] == str(width) + if torch is not None: # temporary workaround for Python 3.12 + assert torch._tensor_str.PRINT_OPTS.linewidth == width + + +def test_open_editor(ui_service: UiService, ui_comm: DummyComm) -> None: + file, line, column = "/Users/foo/bar/baz.py", 12, 34 + ui_service.open_editor(file, line, column) + + assert ui_comm.messages == [ + json_rpc_notification("open_editor", {"file": file, "line": line, "column": column}) + ] + + +def test_clear_console(ui_service: UiService, ui_comm: DummyComm) -> None: + ui_service.clear_console() + + assert ui_comm.messages == [json_rpc_notification("clear_console", {})] + + +def test_poll_working_directory(shell: PositronShell, ui_comm: DummyComm) -> None: + # If a cell execution does not change the working directory, no comm messages should be sent. + shell.run_cell("print()") + + assert ui_comm.messages == [] + + # If the working directory *does* change, a working directory event should be sent. + with preserve_working_directory(): + shell.run_cell( + """import os +os.chdir('..')""" + ) + + assert ui_comm.messages == [working_directory_event()] + + +def test_shutdown(ui_service: UiService, ui_comm: DummyComm) -> None: + # Double-check that the comm is not yet closed + assert ui_service._comm is not None + assert not ui_comm._closed + + ui_service.shutdown() + + # Comm is closed + assert ui_comm._closed diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_variables.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_variables.py new file mode 100644 index 00000000000..44c513c0c53 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_variables.py @@ -0,0 +1,408 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from __future__ import annotations + +import asyncio +from typing import Any, List, Type, cast +from unittest.mock import Mock + +import numpy as np +import pandas as pd +import polars as pl +import pytest +from positron_ipykernel.access_keys import encode_access_key +from positron_ipykernel.positron_comm import JsonRpcErrorCode +from positron_ipykernel.positron_ipkernel import PositronIPyKernel +from positron_ipykernel.utils import JsonRecord, not_none +from positron_ipykernel.variables import ( + VariablesService, + _summarize_children, + _summarize_variable, +) + +from .conftest import DummyComm, PositronShell +from .utils import ( + assert_dataset_registered, + comm_open_message, + json_rpc_error, + json_rpc_notification, + json_rpc_request, + json_rpc_response, +) + +BIG_ARRAY_LENGTH = 10_000_001 +TARGET_NAME = "target_name" + + +def test_comm_open(kernel: PositronIPyKernel) -> None: + service = VariablesService(kernel) + + # Double-check that comm is not yet open + assert service._comm is None + + # Open a comm + variables_comm = DummyComm(TARGET_NAME) + service.on_comm_open(variables_comm, {}) + + # Check that the comm_open and empty refresh messages were sent + assert variables_comm.messages == [ + comm_open_message(TARGET_NAME), + json_rpc_notification("refresh", {"variables": [], "length": 0, "version": 0}), + ] + + +@pytest.mark.parametrize( + ("import_code", "value_codes"), + [ + # + # Same types + # + ("import numpy as np", [f"np.array({x})" for x in [3, [3], [[3]]]]), + ("import torch", [f"torch.tensor({x})" for x in [3, [3], [[3]]]]), + pytest.param( + "import pandas as pd", + [f"pd.Series({x})" for x in [[], [3], [3, 3], ["3"]]], + ), + pytest.param( + "import polars as pl", + [f"pl.Series({x})" for x in [[], [3], [3, 3], ["3"]]], + ), + ( + "import pandas as pd", + [ + f"pd.DataFrame({x})" + for x in [ + {"a": []}, + {"a": [3]}, + {"a": ["3"]}, + {"a": [3], "b": [3]}, + ] + ], + ), + ( + "import polars as pl", + [ + f"pl.DataFrame({x})" + for x in [ + {"a": []}, + {"a": [3]}, + {"a": ["3"]}, + {"a": [3], "b": [3]}, + ] + ], + ), + # + # Changing types + # + ("", ["3", "'3'"]), + ("import numpy as np", ["3", "np.array(3)"]), + ], +) +def test_change_detection( + import_code: str, + value_codes: List[str], + shell: PositronShell, + variables_comm: DummyComm, +) -> None: + """ + Test change detection when updating the value of the same name. + """ + _import_library(shell, import_code) + for value_code in value_codes: + _assert_assigned(shell, value_code, variables_comm) + + +def _assert_assigned(shell: PositronShell, value_code: str, variables_comm: DummyComm): + # Assign the value to a variable. + shell.run_cell(f"x = {value_code}") + + # Test that the expected `update` message was sent with the + # expected `assigned` value. + assert variables_comm.messages == [ + json_rpc_notification( + "update", + { + "assigned": [not_none(_summarize_variable("x", shell.user_ns["x"])).dict()], + "removed": [], + "version": 0, + }, + ) + ] + + # Clear messages for the next assignment. + variables_comm.messages.clear() + + +def _import_library(shell: PositronShell, import_code: str): + # Import the necessary library. + if import_code: + if import_code.endswith("torch"): # temporary workaround for python 3.12 + pytest.skip() + shell.run_cell(import_code) + + +def test_change_detection_over_limit(shell: PositronShell, variables_comm: DummyComm): + _import_library(shell, "import numpy as np") + + big_array = f"x = np.arange({BIG_ARRAY_LENGTH})" + shell.run_cell(big_array) + variables_comm.messages.clear() + + _assert_assigned(shell, big_array, variables_comm) + _assert_assigned(shell, big_array, variables_comm) + _assert_assigned(shell, big_array, variables_comm) + + +def test_handle_refresh(shell: PositronShell, variables_comm: DummyComm) -> None: + shell.user_ns["x"] = 3 + + msg = json_rpc_request("list", comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # A list message is sent + assert variables_comm.messages == [ + json_rpc_response( + { + "variables": [ + not_none(_summarize_variable("x", shell.user_ns["x"])).dict(), + ], + "length": 1, + "version": 0, + } + ) + ] + + +@pytest.mark.asyncio +async def test_handle_clear( + shell: PositronShell, + variables_service: VariablesService, + variables_comm: DummyComm, +) -> None: + shell.user_ns.update({"x": 3, "y": 5}) + + msg = json_rpc_request("clear", {"include_hidden_objects": False}, comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # Wait until all resulting kernel tasks are processed + await asyncio.gather(*variables_service._pending_tasks) + + # We should get a result + assert variables_comm.messages == [ + json_rpc_response({}), + json_rpc_notification( + "update", + { + "assigned": [], + "removed": [encode_access_key("x"), encode_access_key("y")], + "version": 0, + }, + ), + json_rpc_notification("refresh", {"length": 0, "variables": [], "version": 0}), + ] + + # All user variables are removed + assert "x" not in shell.user_ns + assert "y" not in shell.user_ns + + +def test_handle_delete(shell: PositronShell, variables_comm: DummyComm) -> None: + shell.user_ns.update({"x": 3, "y": 5}) + + msg = json_rpc_request("delete", {"names": ["x"]}, comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # Only the `x` variable is removed + assert "x" not in shell.user_ns + assert "y" in shell.user_ns + + assert variables_comm.messages == [ + json_rpc_response([encode_access_key("x")]), + ] + + +def test_handle_delete_error(variables_comm: DummyComm) -> None: + msg = json_rpc_request("delete", {"names": ["x"]}, comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # No variables are removed, since there are no variables named `x` + assert variables_comm.messages == [json_rpc_response([])] + + +def _assert_inspect(value: Any, path: List[Any], variables_comm: DummyComm) -> None: + encoded_path = [encode_access_key(key) for key in path] + msg = json_rpc_request( + "inspect", + # TODO(pyright): We shouldn't need to cast; may be a pyright bug + cast(JsonRecord, {"path": encoded_path}), + comm_id="dummy_comm_id", + ) + variables_comm.handle_msg(msg) + + assert len(variables_comm.messages) == 1 + + children = _summarize_children(value) + assert variables_comm.messages == [ + json_rpc_response( + { + "children": [child.dict() for child in children], + "length": len(children), + } + ) + ] + + variables_comm.messages.clear() + + +@pytest.mark.parametrize( + ("value_fn"), + [ + lambda: {0: [0], "0": [1]}, + lambda: pd.DataFrame({0: [0], "0": [1]}), + lambda: pl.DataFrame({"a": [1, 2], "b": ["3", "4"]}), + lambda: np.array([[0, 1], [2, 3]]), + # Inspecting large objects should not trigger update messages: https://github.com/posit-dev/positron/issues/2308. + lambda: np.arange(BIG_ARRAY_LENGTH), + ], +) +def test_handle_inspect(value_fn, shell: PositronShell, variables_comm: DummyComm) -> None: + """ + Test that we can inspect root-level objects. + """ + value = value_fn() + shell.user_ns["x"] = value + + _assert_inspect(value, ["x"], variables_comm) + + +@pytest.mark.parametrize( + ("cls", "data"), + [ + # We should be able to inspect the children of a map/table with keys that have the same string representation. + (dict, {0: [0], "0": [1]}), + (pd.DataFrame, {0: [0], "0": [1]}), + # DataFrames + (pd.DataFrame, {"a": [1, 2], "b": ["3", "4"]}), + (pl.DataFrame, {"a": [1, 2], "b": ["3", "4"]}), + # Arrays + (np.array, [[0, 1], [2, 3]]), # 2D + ], +) +def test_handle_inspect_2d( + cls: Type, data: Any, shell: PositronShell, variables_comm: DummyComm +) -> None: + """ + Test that we can inspect children of "two-dimensional" objects. + """ + value = cls(data) + shell.user_ns["x"] = value + + keys = data.keys() if isinstance(data, dict) else range(len(data)) + for key in keys: + _assert_inspect(value[key], ["x", key], variables_comm) + + +def test_handle_inspect_error(variables_comm: DummyComm) -> None: + path = [encode_access_key("x")] + # TODO(pyright): We shouldn't need to cast; may be a pyright bug + msg = json_rpc_request("inspect", cast(JsonRecord, {"path": path}), comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # An error message is sent + assert variables_comm.messages == [ + json_rpc_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to inspect", + ) + ] + + +def test_handle_clipboard_format(shell: PositronShell, variables_comm: DummyComm) -> None: + shell.user_ns.update({"x": 3, "y": 5}) + + msg = json_rpc_request( + "clipboard_format", + { + "path": [encode_access_key("x")], + "format": "text/plain", + }, + comm_id="dummy_comm_id", + ) + variables_comm.handle_msg(msg) + + assert variables_comm.messages == [json_rpc_response({"content": "3"})] + + +def test_handle_clipboard_format_error(variables_comm: DummyComm) -> None: + path = [encode_access_key("x")] + # TODO(pyright): We shouldn't need to cast; may be a pyright bug + msg = json_rpc_request( + "clipboard_format", + cast(JsonRecord, {"path": path, "format": "text/plain"}), + comm_id="dummy_comm_id", + ) + variables_comm.handle_msg(msg) + + # An error message is sent + assert variables_comm.messages == [ + json_rpc_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to format", + ) + ] + + +def test_handle_view( + shell: PositronShell, + variables_comm: DummyComm, + mock_dataexplorer_service: Mock, +) -> None: + shell.user_ns["x"] = pd.DataFrame({"a": [0]}) + + msg = json_rpc_request("view", {"path": [encode_access_key("x")]}, comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # An acknowledgment message is sent + assert variables_comm.messages == [json_rpc_response({})] + + assert_dataset_registered(mock_dataexplorer_service, shell.user_ns["x"], "x") + + +def test_handle_view_error(variables_comm: DummyComm) -> None: + path = [encode_access_key("x")] + # TODO(pyright): We shouldn't need to cast; may be a pyright bug + msg = json_rpc_request("view", cast(JsonRecord, {"path": path}), comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + # An error message is sent + assert variables_comm.messages == [ + json_rpc_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to view", + ) + ] + + +def test_handle_unknown_method(variables_comm: DummyComm) -> None: + msg = json_rpc_request("unknown_method", comm_id="dummy_comm_id") + variables_comm.handle_msg(msg) + + assert variables_comm.messages == [ + json_rpc_error( + JsonRpcErrorCode.METHOD_NOT_FOUND, + "Unknown method 'unknown_method'", + ) + ] + + +@pytest.mark.asyncio +async def test_shutdown(variables_service: VariablesService, variables_comm: DummyComm) -> None: + # Double-check that the comm is not yet closed + assert not variables_comm._closed + + await variables_service.shutdown() + + # Comm is closed + assert variables_comm._closed diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_widget.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_widget.py new file mode 100644 index 00000000000..9e5adb6fb52 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/test_widget.py @@ -0,0 +1,66 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +from typing import Iterable, cast + +import pytest +from IPython.core.formatters import DisplayFormatter +from positron_ipykernel.widget import PositronWidgetHook + +from .conftest import DummyComm, PositronShell + + +@pytest.fixture(autouse=True) +def setup_shell(shell: PositronShell) -> Iterable[None]: + # Enable all IPython mimetype formatters + display_formatter = cast(DisplayFormatter, shell.display_formatter) + active_types = display_formatter.active_types + display_formatter.active_types = display_formatter.format_types + + yield + + # Restore the original active formatters + display_formatter.active_types = active_types + + +@pytest.fixture +def hook() -> PositronWidgetHook: + return PositronWidgetHook("jupyter.widget", comm_manager=None) + + +@pytest.fixture +def widget_comm(hook: PositronWidgetHook) -> DummyComm: + """ + A comm corresponding to a test widget belonging to the Positron display publisher hook. + """ + # Initialize the hook by calling it on a widget + msg = { + "content": {"data": {"application/vnd.jupyter.widget-view+json": {"model_id": 1234}}}, + "msg_type": "display_data", + } + hook(msg) + + # Return the comm corresponding to the first figure + id = next(iter(hook.comms)) + widget_comm = cast(DummyComm, hook.comms[id]) + + # Clear messages due to the comm_open + widget_comm.messages.clear() + + return widget_comm + + +def test_hook_call_noop_on_non_display_data(hook: PositronWidgetHook) -> None: + msg = {"msg_type": "not_display_data"} + assert hook(msg) == msg + assert hook.comms == {} + + +def test_hook_call_noop_on_no_model_id(hook: PositronWidgetHook) -> None: + msg = { + "content": {"data": {"application/vnd.jupyter.widget-view+json": {}}}, + "msg_type": "display_data", + } + assert hook(msg) == msg + assert hook.comms == {} diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/utils.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/utils.py new file mode 100644 index 00000000000..c24a8457e58 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/tests/utils.py @@ -0,0 +1,125 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import os +from contextlib import contextmanager +from pathlib import Path +from typing import Any, Optional, Set +from unittest.mock import Mock + +from positron_ipykernel._vendor.pydantic import BaseModel +from positron_ipykernel.utils import JsonData, JsonRecord + + +def assert_pydantic_model_equal(actual: BaseModel, expected: BaseModel, exclude: Set[str]) -> None: + actual_dict = actual.dict(exclude=exclude) + expected_dict = expected.dict(exclude=exclude) + assert actual_dict == expected_dict + + +@contextmanager +def preserve_working_directory(): + """ + Reset the working directory after the context exits. + """ + cwd = Path.cwd() + try: + yield + finally: + os.chdir(cwd) + + +def assert_dataset_registered(mock_datatool_service: Mock, obj: Any, title: str) -> None: + call_args_list = mock_datatool_service.register_table.call_args_list + + assert len(call_args_list) == 1 + + passed_table, passed_title = call_args_list[0].args + + assert passed_title == title + + try: + assert passed_table.equals(obj) + except AttributeError: # polars.DataFrame.equals was introduced in v0.19.16 + assert passed_table.frame_equal(obj) + + +def comm_message( + data: Optional[JsonRecord] = None, +) -> JsonRecord: + if data is None: + data = {} + return { + "data": data, + "metadata": None, + "buffers": None, + "msg_type": "comm_msg", + } + + +def comm_request(data: JsonRecord, **kwargs) -> JsonRecord: + return {"content": {"data": data, **kwargs.pop("content", {})}, **kwargs} + + +def comm_open_message(target_name: str, data: Optional[JsonRecord] = None) -> JsonRecord: + return { + **comm_message(data), + "target_name": target_name, + "target_module": None, + "msg_type": "comm_open", + } + + +def json_rpc_error(code: int, message: str) -> JsonRecord: + return comm_message( + { + "jsonrpc": "2.0", + "error": { + "code": code, + "message": message, + }, + } + ) + + +def json_rpc_notification(method: str, params: JsonRecord) -> JsonRecord: + return comm_message( + { + "jsonrpc": "2.0", + "method": method, + "params": params, + } + ) + + +def json_rpc_request( + method: str, + params: Optional[JsonRecord] = None, + **content: JsonData, +) -> JsonRecord: + data = {"params": params} if params else {} + return { + "content": { + "data": { + "jsonrpc": "2.0", + "method": method, + **data, + }, + **content, + }, + } + + +def json_rpc_response(result: JsonData) -> JsonRecord: + return comm_message( + { + "jsonrpc": "2.0", + "result": result, + } + ) + + +# remove "" from value +def get_type_as_str(value: Any) -> str: + return repr(type(value))[8:-2] diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/third_party.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/third_party.py new file mode 100644 index 00000000000..2cfbba16e7c --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/third_party.py @@ -0,0 +1,68 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +# Third-party packages that may be available in the user's environment. +# The convention is to use the popular import name for each package followed by an underscore, +# since we may also need to import the actual package inside an `if TYPE_CHECKING` block for type +# checking. + + +def _get_numpy(): + try: + import numpy + except ImportError: + numpy = None + return numpy + + +def _get_pandas(): + try: + import pandas + except ImportError: + pandas = None + return pandas + + +def _get_polars(): + try: + import polars + except ImportError: + polars = None + return polars + + +def _get_torch(): + try: + import torch # type: ignore [reportMissingImports] for 3.12 + except ImportError: + torch = None + return torch + + +def _get_pyarrow(): + try: + import pyarrow # type: ignore [reportMissingImports] for 3.12 + except ImportError: + pyarrow = None + return pyarrow + + +def _get_sqlalchemy(): + try: + import sqlalchemy + except ImportError: + sqlalchemy = None + return sqlalchemy + + +# Currently, pyright only correctly infers the types below as `Optional` if we set their values +# using function calls. +np_ = _get_numpy() +pa_ = _get_pyarrow() +pd_ = _get_pandas() +pl_ = _get_polars() +torch_ = _get_torch() +sqlalchemy_ = _get_sqlalchemy() + +__all__ = ["np_", "pa_", "pd_", "pl_", "torch_", "sqlalchemy_"] diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui.py new file mode 100644 index 00000000000..2242799a983 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui.py @@ -0,0 +1,152 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import logging +import os +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +from comm.base_comm import BaseComm + +from ._vendor.pydantic import BaseModel +from .positron_comm import CommMessage, PositronComm +from .third_party import np_, pd_, pl_, torch_ +from .ui_comm import ( + CallMethodParams, + CallMethodRequest, + OpenEditorParams, + UiBackendMessageContent, + UiFrontendEvent, + WorkingDirectoryParams, +) +from .utils import JsonData, JsonRecord, alias_home + +logger = logging.getLogger(__name__) + + +# +# RPC methods called from the frontend. +# + + +class _InvalidParamsError(Exception): + pass + + +def _set_console_width(params: List[JsonData]) -> None: + if not (isinstance(params, list) and len(params) == 1 and isinstance(params[0], int)): + raise _InvalidParamsError(f"Expected an integer width, got: {params}") + + width = params[0] + + # Set the COLUMNS variable to alter the value returned by shutil.get_terminal_size. + # For example, pandas uses this (if set) to automatically determine display.max_columns. + os.environ["COLUMNS"] = str(width) + + # Library-specific options: + + if np_ is not None: + np_.set_printoptions(linewidth=width) + + if pd_ is not None: + # Set display.width to None so that pandas auto-detects the correct value given the + # terminal width configured via the COLUMNS variable above. + # See: https://pandas.pydata.org/docs/user_guide/options.html + pd_.set_option("display.width", None) + + if pl_ is not None: + pl_.Config.set_tbl_width_chars(width) + + if torch_ is not None: + torch_.set_printoptions(linewidth=width) + + +_RPC_METHODS: Dict[str, Callable[[List[JsonData]], JsonData]] = { + "setConsoleWidth": _set_console_width, +} + + +class UiService: + """ + Wrapper around a comm channel whose lifetime matches that of the Positron frontend. + Used for communication with the frontend, unscoped to any particular view. + """ + + def __init__(self) -> None: + self._comm: Optional[PositronComm] = None + + self._working_directory: Optional[Path] = None + + def on_comm_open(self, comm: BaseComm, msg: JsonRecord) -> None: + self._comm = PositronComm(comm) + self._comm.on_msg(self.handle_msg, UiBackendMessageContent) + + # Clear the current working directory to generate an event for the new + # client (i.e. after a reconnect) + self._working_directory = None + try: + self.poll_working_directory() + except Exception: + logger.exception("Error polling working directory") + + def poll_working_directory(self) -> None: + """ + Polls for changes to the working directory, and sends an event to the + front end if the working directory has changed. + """ + # Get the current working directory + current_dir = Path.cwd() + + # If it isn't the same as the last working directory, send an event + if current_dir != self._working_directory: + self._working_directory = current_dir + # Deliver event to client + if self._comm is not None: + event = WorkingDirectoryParams(directory=str(alias_home(current_dir))) + self._send_event(name=UiFrontendEvent.WorkingDirectory, payload=event) + + def open_editor(self, file: str, line: int, column: int) -> None: + event = OpenEditorParams(file=file, line=line, column=column) + self._send_event(name=UiFrontendEvent.OpenEditor, payload=event) + + def clear_console(self) -> None: + self._send_event(name=UiFrontendEvent.ClearConsole, payload={}) + + def handle_msg(self, msg: CommMessage[UiBackendMessageContent], raw_msg: JsonRecord) -> None: + request = msg.content.data + + if isinstance(request, CallMethodRequest): + # Unwrap nested JSON-RPC + self._call_method(request.params) + + else: + logger.warning(f"Unhandled request: {request}") + + def _call_method(self, rpc_request: CallMethodParams) -> None: + func = _RPC_METHODS.get(rpc_request.method, None) + if func is None: + return logger.warning(f"Invalid frontend RPC request method: {rpc_request.method}") + + try: + result = func(rpc_request.params) + except _InvalidParamsError as exception: + return logger.warning( + f"Invalid frontend RPC request params for method '{rpc_request.method}'. {exception}" + ) + + if self._comm is not None: + self._comm.send_result(data=result) + + def shutdown(self) -> None: + if self._comm is not None: + try: + self._comm.close() + except Exception: + pass + + def _send_event(self, name: str, payload: Union[BaseModel, JsonRecord]) -> None: + if self._comm is not None: + if isinstance(payload, BaseModel): + payload = payload.dict() + self._comm.send_event(name=name, payload=payload) diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui_comm.py new file mode 100644 index 00000000000..cbbaa0a3fd2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/ui_comm.py @@ -0,0 +1,329 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from ui.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + +Param = Any +CallMethodResult = Any + + +class EditorContext(BaseModel): + """ + Editor metadata + """ + + document: TextDocument = Field( + description="Document metadata", + ) + + contents: List[str] = Field( + description="Document contents", + ) + + selection: Selection = Field( + description="The primary selection, i.e. selections[0]", + ) + + selections: List[Selection] = Field( + description="The selections in this text editor.", + ) + + +class TextDocument(BaseModel): + """ + Document metadata + """ + + path: str = Field( + description="URI of the resource viewed in the editor", + ) + + eol: str = Field( + description="End of line sequence", + ) + + is_closed: bool = Field( + description="Whether the document has been closed", + ) + + is_dirty: bool = Field( + description="Whether the document has been modified", + ) + + is_untitled: bool = Field( + description="Whether the document is untitled", + ) + + language_id: str = Field( + description="Language identifier", + ) + + line_count: int = Field( + description="Number of lines in the document", + ) + + version: int = Field( + description="Version number of the document", + ) + + +class Position(BaseModel): + """ + A line and character position, such as the position of the cursor. + """ + + character: int = Field( + description="The zero-based character value, as a Unicode code point offset.", + ) + + line: int = Field( + description="The zero-based line value.", + ) + + +class Selection(BaseModel): + """ + Selection metadata + """ + + active: Position = Field( + description="Position of the cursor.", + ) + + start: Position = Field( + description="Start position of the selection", + ) + + end: Position = Field( + description="End position of the selection", + ) + + text: str = Field( + description="Text of the selection", + ) + + +@enum.unique +class UiBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend ui comm. + """ + + # Run a method in the interpreter and return the result to the frontend + CallMethod = "call_method" + + +class CallMethodParams(BaseModel): + """ + Unlike other RPC methods, `call_method` calls into methods implemented + in the interpreter and returns the result back to the frontend using + an implementation-defined serialization scheme. + """ + + method: str = Field( + description="The method to call inside the interpreter", + ) + + params: List[Param] = Field( + description="The parameters for `method`", + ) + + +class CallMethodRequest(BaseModel): + """ + Unlike other RPC methods, `call_method` calls into methods implemented + in the interpreter and returns the result back to the frontend using + an implementation-defined serialization scheme. + """ + + params: CallMethodParams = Field( + description="Parameters to the CallMethod method", + ) + + method: Literal[UiBackendRequest.CallMethod] = Field( + description="The JSON-RPC method name (call_method)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class UiBackendMessageContent(BaseModel): + comm_id: str + data: CallMethodRequest + + +@enum.unique +class UiFrontendEvent(str, enum.Enum): + """ + An enumeration of all the possible events that can be sent to the frontend ui comm. + """ + + # Change in backend's busy/idle status + Busy = "busy" + + # Clear the console + ClearConsole = "clear_console" + + # Open an editor + OpenEditor = "open_editor" + + # Show a message + ShowMessage = "show_message" + + # New state of the primary and secondary prompts + PromptState = "prompt_state" + + # Change the displayed working directory + WorkingDirectory = "working_directory" + + # Execute a Positron command + ExecuteCommand = "execute_command" + + +class BusyParams(BaseModel): + """ + Change in backend's busy/idle status + """ + + busy: bool = Field( + description="Whether the backend is busy", + ) + + +class OpenEditorParams(BaseModel): + """ + Open an editor + """ + + file: str = Field( + description="The path of the file to open", + ) + + line: int = Field( + description="The line number to jump to", + ) + + column: int = Field( + description="The column number to jump to", + ) + + +class ShowMessageParams(BaseModel): + """ + Show a message + """ + + message: str = Field( + description="The message to show to the user.", + ) + + +class ShowQuestionParams(BaseModel): + """ + Show a question + """ + + title: str = Field( + description="The title of the dialog", + ) + + message: str = Field( + description="The message to display in the dialog", + ) + + ok_button_title: str = Field( + description="The title of the OK button", + ) + + cancel_button_title: str = Field( + description="The title of the Cancel button", + ) + + +class PromptStateParams(BaseModel): + """ + New state of the primary and secondary prompts + """ + + input_prompt: str = Field( + description="Prompt for primary input.", + ) + + continuation_prompt: str = Field( + description="Prompt for incomplete input.", + ) + + +class WorkingDirectoryParams(BaseModel): + """ + Change the displayed working directory + """ + + directory: str = Field( + description="The new working directory", + ) + + +class DebugSleepParams(BaseModel): + """ + Sleep for n seconds + """ + + ms: float = Field( + description="Duration in milliseconds", + ) + + +class ExecuteCommandParams(BaseModel): + """ + Execute a Positron command + """ + + command: str = Field( + description="The command to execute", + ) + + +EditorContext.update_forward_refs() + +TextDocument.update_forward_refs() + +Position.update_forward_refs() + +Selection.update_forward_refs() + +CallMethodParams.update_forward_refs() + +CallMethodRequest.update_forward_refs() + +BusyParams.update_forward_refs() + +OpenEditorParams.update_forward_refs() + +ShowMessageParams.update_forward_refs() + +ShowQuestionParams.update_forward_refs() + +PromptStateParams.update_forward_refs() + +WorkingDirectoryParams.update_forward_refs() + +DebugSleepParams.update_forward_refs() + +ExecuteCommandParams.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/utils.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/utils.py new file mode 100644 index 00000000000..710bbe39d26 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/utils.py @@ -0,0 +1,331 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import asyncio +import inspect +import numbers +import pprint +import sys +import types +from binascii import b2a_base64 +from datetime import datetime +from pathlib import Path +from typing import Any, Coroutine, Dict, List, Optional, Set, Tuple, TypeVar, Union, cast + +JsonData = Union[Dict[str, "JsonData"], List["JsonData"], str, int, float, bool, None] +JsonRecord = Dict[str, JsonData] + + +T = TypeVar("T") + + +def get_qualname(value: Any) -> str: + """ + Utility to manually construct a qualified type name as + __qualname__ does not work for all types + """ + # Get a named object corresponding to the value, e.g. an instance's class or a property's getter + if ( + isinstance(value, type) + or inspect.ismodule(value) + or callable(value) + or inspect.isgetsetdescriptor(value) + ): + named_obj = value + elif isinstance(value, property): + assert value.fget is not None + named_obj = value.fget + else: + named_obj = type(value) + + qualname = getattr(named_obj, "__qualname__", None) + if qualname is None: + # Fall back to unqualified name if a qualified name doesn't exist + qualname = getattr(named_obj, "__name__", None) + + if qualname is None: + # Some objects may only have a name on a __class__ attribute + class_obj = getattr(named_obj, "__class__", None) + qualname = getattr(class_obj, "__name__", None) + + if qualname is None: + # Finally, try to return the generic type's name, otherwise report object + qualname = getattr(type(value), "__name__", "object") + + # Tell the type checker that it's a string + qualname = cast(str, qualname) + + # If the value is not itself a module, prepend its module name if it exists + if not inspect.ismodule(value): + module = get_module_name(named_obj) + if module is not None and module not in {"builtins", "__main__"}: + qualname = f"{module}.{qualname}" + + return qualname + + +def get_module_name(value: Any) -> Optional[str]: + """ + Get the name of the module defining `value`. + """ + # It's already a module, return its name + if inspect.ismodule(value): + return value.__name__ + + # Try to use its __module__ attribute + module = getattr(value, "__module__", None) + if module is not None: + return module + + # Handle numpy ufuncs which don't have a __module__ attribute but which we can assume is "numpy" + if is_numpy_ufunc(value): + return "numpy" + + # Handle getset_descriptors (e.g. numpy.float_.base) which don't have a __module__, by + # finding its module via the __objclass__ attribute + obj_class = getattr(value, "__objclass__", None) + if obj_class is not None: + return obj_class.__module__ + + # We couldn't infer the module name + return None + + +def is_numpy_ufunc(object: Any) -> bool: + # We intentionally don't use get_qualname here to avoid an infinite recursion + object_type = type(object) + return ( + getattr(object_type, "__module__") == "numpy" + and getattr(object_type, "__name__") == "ufunc" + ) + + +def pretty_format( + value, + print_width: Optional[int] = None, + truncate_at: Optional[int] = None, +) -> Tuple[str, bool]: + if print_width is not None: + s = pprint.pformat(value, width=print_width, compact=True) + else: + s = str(value) + + # TODO: Add type aware truncation + if truncate_at is not None: + return truncate_string(s, truncate_at) + + return s, False + + +def truncate_string(value: str, max: int) -> Tuple[str, bool]: + if len(value) > max: + return (value[:max], True) + else: + return (value, False) + + +ISO8601 = "%Y-%m-%dT%H:%M:%S.%f" + + +# We can't use ipykernel's json_clean function directly as it has since been +# deactivated. JSON message cleaning in jupyter_client will also be removed in +# the near future. We keep a copy below and adjust it for display-only use. +# +# The original function is available in the ipykernel module and was made +# available under the following license: +# +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. +# +def json_clean(obj): + # types that are 'atomic' and ok in json as-is. + atomic_ok = (str, type(None)) + + # containers that we need to convert into lists + container_to_list = (tuple, set, types.GeneratorType) + + # Since bools are a subtype of Integrals, which are a subtype of Reals, + # we have to check them in that order. + + if isinstance(obj, bool): + return obj + + if isinstance(obj, numbers.Integral): + # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598) + return int(obj) + + if isinstance(obj, numbers.Real): + # use string repr to avoid precision issues with JSON + return repr(obj) + + if isinstance(obj, atomic_ok): + return obj + + if isinstance(obj, bytes): + # unanmbiguous binary data is base64-encoded + # (this probably should have happened upstream) + return b2a_base64(obj, newline=False).decode("ascii") + + if isinstance(obj, container_to_list) or ( + hasattr(obj, "__iter__") and hasattr(obj, "__next__") + ): + obj = list(obj) + + if isinstance(obj, list): + return [json_clean(x) for x in obj] + + if isinstance(obj, dict): + # First, validate that the dict won't lose data in conversion due to + # key collisions after stringification. This can happen with keys like + # True and 'true' or 1 and '1', which collide in JSON. + nkeys = len(obj) + nkeys_collapsed = len(set(map(str, obj))) + if nkeys != nkeys_collapsed: + msg = ( + "dict cannot be safely converted to JSON: " + "key collision would lead to dropped values" + ) + raise ValueError(msg) + # If all OK, proceed by making the new dict that will be json-safe + out = {} + for k, v in obj.items(): + out[str(k)] = json_clean(v) + return out + + if isinstance(obj, datetime): + return obj.strftime(ISO8601) + + # we don't understand it, it's probably an unserializable object + raise ValueError("Can't clean for JSON: %r" % obj) + + +def create_task(coro: Coroutine, pending_tasks: Set[asyncio.Task], **kwargs) -> asyncio.Task: + """ + Create a strongly referenced task to avoid it being garbage collected. + + Note that the call should hold a strong reference to pending_tasks. + + See the asyncio docs for more info: https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task. + """ + task = asyncio.create_task(coro, **kwargs) + pending_tasks.add(task) + task.add_done_callback(pending_tasks.remove) + return task + + +async def cancel_tasks(tasks: Set[asyncio.Task]) -> None: + """ + Cancel and await a set of tasks. + """ + for task in tasks: + task.cancel() + await asyncio.gather(*tasks) + tasks.clear() + + +def safe_isinstance(obj: Any, module: str, class_name: str, *attrs: str) -> bool: + """ + Check if `obj` is an instance of module.class_name if loaded. + + Adapted from `IPython.core.completer._safe_isinstance`. + """ + if module in sys.modules: + m = sys.modules[module] + for attr in [class_name, *attrs]: + m = getattr(m, attr) + if not isinstance(m, type): + raise ValueError(f"{module}.{class_name}.{'.'.join(attrs)} is not a type") + return isinstance(obj, m) + return False + + +def not_none(value: Optional[T]) -> T: + """ + Assert that a value is not None. + """ + assert value is not None + return value + + +def alias_home(path: Path) -> Path: + """ + Alias the home directory to ~ in a path. + """ + home_dir = Path.home() + try: + # relative_to will raise a ValueError if path is not within the home directory + return Path("~") / path.relative_to(home_dir) + except ValueError: + return path + + +def positron_ipykernel_usage(): + """ + + Positron Console Help + ========================================= + + The Positron Console offers a fully compatible replacement for the standard Python + interpreter, with convenient shell features, special commands, command + history mechanism and output results caching. It is an adapted version of an + [IPython](https://ipython.readthedocs.io/en/stable/) kernel. For more information and + documentation, check out the [Positron Beta GitHub](https://github.com/posit-dev/positron-beta). + + GETTING HELP + ------------ + + Within the Positron Console you have various ways to get help: + + - `?` -> Introduction and overview of IPython's features (this screen). + - `object?` -> View 'object' in Help pane. + - `object??` -> View source code for 'object' + - `help(object)` -> View 'object' in Help pane. + - `%quickref` -> Quick reference of all IPython specific syntax and magics. + + + + MAIN FEATURES + ------------- + + * View tabular data in the data explorer via the %view command. + + * Magic commands: type %magic for information on the magic subsystem. + + * System command aliases, via the %alias command or the configuration file(s). + + * Dynamic object information: + + Typing ?word or word? sends 'word' to the help pane. + + Typing ??word or word?? displays source code for 'word'. + + If you just want to see an object's docstring, type '%pdoc object' (without + quotes, and without % if you have automagic on). + + * Tab completion in the local namespace: + + At any time, hitting tab will complete any available Python commands or + variable names, and show you a list of the possible completions if there's + no unambiguous one. It will also complete filenames in the current directory. + + * Search previous command history in multiple ways: + + - Use arrow keys up/down to navigate through the history of executed commands. + - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches + your history for lines that match what you've typed so far, completing as + much as it can. + + - %hist: search history by index. + + * Persistent command history across sessions. + + * System shell with !. Typing !ls will run 'ls' in the current directory. + + * Verbose and colored exception traceback printouts. See the magic xmode and + xcolor functions for details (just type %magic). + + * Clickable links in exception traceback printouts. + + """ + pass diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables.py new file mode 100644 index 00000000000..a9d2b514436 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables.py @@ -0,0 +1,726 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# +from __future__ import annotations + +import asyncio +import logging +import time +import types +from collections.abc import Iterable, Mapping +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple + +from comm.base_comm import BaseComm + +from .access_keys import decode_access_key, encode_access_key +from .inspectors import get_inspector +from .positron_comm import CommMessage, JsonRpcErrorCode, PositronComm +from .utils import JsonData, JsonRecord, cancel_tasks, create_task, get_qualname +from .variables_comm import ( + ClearRequest, + ClipboardFormatFormat, + ClipboardFormatRequest, + DeleteRequest, + FormattedVariable, + InspectedVariable, + InspectRequest, + ListRequest, + RefreshParams, + UpdateParams, + Variable, + VariableKind, + VariableList, + VariablesBackendMessageContent, + VariablesFrontendEvent, + ViewRequest, +) + +if TYPE_CHECKING: + from .positron_ipkernel import PositronIPyKernel + +logger = logging.getLogger(__name__) + +# Maximum number of children to show in an object's expanded view. +MAX_CHILDREN: int = 100 + +# Maximum number of items to send in an update event. If exceeded, a +# full refresh is sent instead. +MAX_ITEMS: int = 10000 + +# Budget for number of "units" of work to allow for namespace change +# detection. The costs are defined in inspectors.py +MAX_SNAPSHOT_COMPARISON_BUDGET: int = 10_000_000 + + +def _resolve_value_from_path(context: Any, path: Iterable[str]) -> Any: + """ + Use inspectors to possibly resolve nested value from context + """ + is_known = False + value = None + for access_key in path: + # Check for membership via inspector + inspector = get_inspector(context) + key = decode_access_key(access_key) + is_known = inspector.has_child(key) + if is_known: + value = inspector.get_child(key) + + # Subsequent segment starts from the value + context = value + + # But we stop if the path segment was unknown + if not is_known: + break + return is_known, value + + +class VariablesService: + def __init__(self, kernel: PositronIPyKernel) -> None: + self.kernel = kernel + + self._comm: Optional[PositronComm] = None + + # Hold strong references to pending tasks to prevent them from being garbage collected + self._pending_tasks: Set[asyncio.Task] = set() + + self._snapshot: Optional[Dict[str, Any]] = None + + def on_comm_open(self, comm: BaseComm, msg: JsonRecord) -> None: + """ + Setup positron.variables comm to receive messages. + """ + self._comm = PositronComm(comm) + self._comm.on_msg(self.handle_msg, VariablesBackendMessageContent) + + # Send list on comm initialization + self.send_refresh_event() + + def handle_msg( + self, + msg: CommMessage[VariablesBackendMessageContent], + raw_msg: JsonRecord, + ) -> None: + """ + Handle messages received from the client via the positron.variables comm. + """ + request = msg.content.data + + if isinstance(request, ListRequest): + self._send_list() + + elif isinstance(request, ClearRequest): + self._delete_all_vars(raw_msg) + + elif isinstance(request, DeleteRequest): + self._delete_vars(request.params.names, raw_msg) + + elif isinstance(request, InspectRequest): + self._inspect_var(request.params.path) + + elif isinstance(request, ClipboardFormatRequest): + self._send_formatted_var(request.params.path, request.params.format) + + elif isinstance(request, ViewRequest): + self._open_data_explorer(request.params.path) + + else: + logger.warning(f"Unhandled request: {request}") + + def _send_update(self, assigned: Mapping[str, Any], removed: Set[str]) -> None: + """ + Sends the list of variables that have changed in the current + user session through the variables comm to the client. + + TODO: Fix below docstring, see positron#2319 + + For example: + { + "data": { + "method": "refresh", + "params: { + "assigned": [{ + "display_name": "newvar1", + "display_value": "Hello", + "kind": "string" + }], + "removed": ["oldvar1", "oldvar2"] + } + } + ... + } + """ + # Look for any assigned or removed variables that are active + # in the data explorer service + exp_service = self.kernel.data_explorer_service + for name in removed: + if exp_service.variable_has_active_explorers(name): + exp_service.handle_variable_deleted(name) + + for name, value in assigned.items(): + if exp_service.variable_has_active_explorers(name): + exp_service.handle_variable_updated(name, value) + + # Ensure the number of changes does not exceed our maximum items + if len(assigned) > MAX_ITEMS or len(removed) > MAX_ITEMS: + return self.send_refresh_event() + + # Filter out hidden assigned variables + variables = self._get_filtered_vars(assigned) + filtered_assigned = _summarize_children(variables) + + # Filter out hidden removed variables and encode access keys + hidden = self._get_user_ns_hidden() + filtered_removed = [ + encode_access_key(name) for name in sorted(removed) if name not in hidden + ] + + if filtered_assigned or filtered_removed: + msg = UpdateParams( + assigned=filtered_assigned, + removed=filtered_removed, + version=0, + ) + self._send_event(VariablesFrontendEvent.Update.value, msg.dict()) + + def send_refresh_event(self) -> None: + """ + Sends a refresh message summarizing the variables of the current user + session through the variables comm to the client. + + For example: + { + "data": { + "method": "refresh", + "variables": [{ + "display_name": "mygreeting", + "display_value": "Hello", + "kind": "string" + }] + } + ... + } + """ + variables = self._get_filtered_vars() + filtered_variables = _summarize_children(variables) + + msg = RefreshParams( + variables=filtered_variables, + length=len(filtered_variables), + version=0, + ) + self._send_event(VariablesFrontendEvent.Refresh.value, msg.dict()) + + async def shutdown(self) -> None: + # Cancel and await pending tasks + await cancel_tasks(self._pending_tasks) + + if self._comm is not None: + try: + self._comm.close() + except Exception: + pass + + def poll_variables(self) -> None: + # First check pre_execute snapshot exists + if self._snapshot is None: + return + + try: + # Try to detect the changes made since the last execution + assigned, removed = self._compare_user_ns() + self._send_update(assigned, removed) + except Exception as err: + logger.warning(err, exc_info=True) + + def snapshot_user_ns(self) -> None: + """ + Creates a conservative "snapshot" of the user namespace to + enable variable change detection without having to do a full + refresh of the variables view any time the user executes + code. Because many objects (any mutable Python collection, or + some data structures like pandas, NumPy, or PyTorch objects) + require a deep copy to support change detection, we only + copy-and-compare such objects up to a certain limit to keep + the execution overhead to a minimum when namespaces get large + or contain many large mutable objects. + """ + ns = self._get_user_ns() + hidden = self._get_user_ns_hidden() + + # Variables which are immutable and thus can be compared by + # reference + immutable_vars = {} + + # Mutable variables which fall within the limit of + # "reasonable" expense for a copy and deep comparison after + # code execution. + mutable_vars_copied = {} + + # Names of mutable variables that are excluded from the change + # detection logic either because the cost is too large or + # cannot be estimated easily (for example, any collection + # containing arbitrary Python objects may be arbitrarily + # expensive to deepcopy and do comparisons on) + mutable_vars_excluded = {} + + comparison_cost = 0 + + start = time.time() + + for key, value in ns.items(): + if key in hidden: + continue + + inspector = get_inspector(value) + + if inspector.is_mutable(): + cost = inspector.get_comparison_cost() + if comparison_cost + cost > MAX_SNAPSHOT_COMPARISON_BUDGET: + mutable_vars_excluded[key] = value + else: + comparison_cost += cost + mutable_vars_copied[key] = inspector.copy() + else: + immutable_vars[key] = value + + self._snapshot = { + "immutable": immutable_vars, + "mutable_copied": mutable_vars_copied, + "mutable_excluded": mutable_vars_excluded, + } + elapsed = time.time() - start + logger.debug(f"Snapshotting namespace took {elapsed:.4f} seconds") + + copied = repr(list(self._snapshot["mutable_copied"].keys())) + logger.debug(f"Variables copied: {copied}") + + def _compare_user_ns(self) -> Tuple[Dict[str, Any], Set[str]]: + """ + Attempts to detect changes to variables in the user's environment. + + Returns: + A tuple (dict, set) containing a dict of variables that + were modified (added or updated) and a set of variables + that were removed. + """ + assigned = {} + removed = set() + + if self._snapshot is None: + return assigned, removed + + after = self._get_user_ns() + hidden = self._get_user_ns_hidden() + + snapshot = self._snapshot + + def _compare_immutable(v1, v2): + # For immutable objects we can compare object references + return v1 is not v2 + + def _compare_mutable(v1, v2): + inspector1 = get_inspector(v1) + inspector2 = get_inspector(v2) + + return type(inspector1) is not type(inspector2) or not inspector1.equals(v2) + + def _compare_always_different(v1, v2): + return True + + all_snapshot_keys = set() + + def _check_ns_subset(ns_subset, are_different_func): + all_snapshot_keys.update(ns_subset.keys()) + + for key, value in ns_subset.items(): + try: + if key in hidden: + continue + + if key not in after: + # Key was removed + removed.add(key) + elif are_different_func(value, after[key]): + assigned[key] = after[key] + except Exception as err: + logger.warning("err: %s", err, exc_info=True) + raise + + start = time.time() + + _check_ns_subset(snapshot["immutable"], _compare_immutable) + _check_ns_subset(snapshot["mutable_copied"], _compare_mutable) + _check_ns_subset(snapshot["mutable_excluded"], _compare_always_different) + + for key, value in after.items(): + if key in hidden: + continue + + if key not in all_snapshot_keys: + assigned[key] = value + + elapsed = time.time() - start + logger.debug(f"Detecting namespace changes took {elapsed:.4f} seconds") + + return assigned, removed + + def _get_user_ns(self) -> Dict[str, Any]: + return self.kernel.shell.user_ns or {} + + def _get_user_ns_hidden(self) -> Dict[str, Any]: + return self.kernel.shell.user_ns_hidden or {} + + # -- Private Methods -- + + def _get_filtered_vars(self, variables: Optional[Mapping[str, Any]] = None) -> Dict[str, Any]: + """ + Returns: + A filtered dict of the variables, excluding hidden variables. If variables + is None, the current user namespace in the environment is used. + """ + hidden = self._get_user_ns_hidden() + + if variables is None: + variables = self._get_user_ns() + + filtered_variables = {} + for key, value in variables.items(): + if key not in hidden: + filtered_variables[key] = value + return filtered_variables + + def _find_var(self, path: Iterable[str]) -> Tuple[bool, Any]: + """ + Finds the variable at the requested path in the current user session. + + Args: + path: A list of path segments that will be traversed to find + the requested variable. + context: The context from which to start the search. + + Returns: + A tuple (bool, Any) containing a boolean indicating whether the + variable was found, as well as the value of the variable, if found. + """ + + if path is None: + return False, None + + return _resolve_value_from_path(self._get_user_ns(), path) + + def _list_all_vars(self) -> List[Variable]: + variables = self._get_filtered_vars() + return _summarize_children(variables) + + def _send_list(self) -> None: + filtered_variables = self._list_all_vars() + msg = VariableList( + variables=filtered_variables, + length=len(filtered_variables), + version=0, + ) + self._send_result(msg.dict()) + + def _delete_all_vars(self, parent: Dict[str, Any]) -> None: + """ + Deletes all of the variables in the current user session. + + Args: + parent: + A dict providing the parent context for the response, + e.g. the client message requesting the clear operation + """ + create_task(self._soft_reset(parent), self._pending_tasks) + + # Notify the frontend that the request is complete. + # Note that this must be received before the update/refresh event from the async task. + self._send_result({}) + + async def _soft_reset(self, parent: Dict[str, Any]) -> None: + """ + Use %reset with the soft switch to delete all user defined + variables from the environment. + """ + # Run the %reset magic to clear user variables + code = "%reset -sf" + await self.kernel.do_execute(code, silent=False, store_history=False) + + # Publish an input to inform clients of the "delete all" operation + self.kernel.publish_execute_input(code, parent) + + # Refresh the client state + self.send_refresh_event() + + def _delete_vars(self, names: Iterable[str], parent: Dict[str, Any]) -> None: + """ + Deletes the requested variables by name from the current user session. + + Args: + names: + A list of variable names to delete + parent: + A dict providing the parent context for the response, + e.g. the client message requesting the delete operation + """ + if names is None: + return + + self.snapshot_user_ns() + + for name in names: + try: + self.kernel.shell.del_var(name, False) + except Exception: + logger.warning(f"Unable to delete variable '{name}'") + pass + + _, removed = self._compare_user_ns() + + # Publish an input to inform clients of the variables that were deleted + if len(removed) > 0: + code = "del " + ", ".join(removed) + self.kernel.publish_execute_input(code, parent) + + # Look for any removed variables that are active in the data + # explorer service + exp_service = self.kernel.data_explorer_service + for name in removed: + if exp_service.variable_has_active_explorers(name): + exp_service.handle_variable_deleted(name) + + self._send_result([encode_access_key(name) for name in sorted(removed)]) + + def _inspect_var(self, path: List[str]) -> None: + """ + Describes the variable at the requested path in the current user session. + + Args: + path: + A list of names describing the path to the variable. + """ + + is_known, value = self._find_var(path) + if is_known: + self._send_details(path, value) + else: + self._send_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to inspect", + ) + + def _open_data_explorer(self, path: List[str]) -> None: + """Opens a DataExplorer comm for the variable at the requested + path in the current user session. + + """ + if path is None: + return + + is_known, value = self._find_var(path) + if not is_known: + return self._send_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to view", + ) + + # Use the leaf segment to get the title + access_key = path[-1] + + title = str(decode_access_key(access_key)) + self.kernel.data_explorer_service.register_table(value, title, variable_path=path) + self._send_result({}) + + def _send_event(self, name: str, payload: JsonRecord) -> None: + """ + Send an event payload to the client. + """ + if self._comm is not None: + self._comm.send_event(name, payload) + else: + logger.warning(f"Cannot send {name} event: comm is not open") + + def _send_error(self, code: JsonRpcErrorCode, message: str) -> None: + """ + Send an error message to the client. + """ + if self._comm is not None: + self._comm.send_error(code, message) + else: + logger.warning(f"Cannot send error {message} (code {code}): comm is not open)") + + def _send_result(self, data: JsonData = None) -> None: + """ + Send an RPC result value to the client. + """ + if self._comm is not None: + self._comm.send_result(data) + else: + logger.warning(f"Cannot send RPC result: {data}: comm is not open") + + def _send_formatted_var( + self, + path: List[str], + clipboard_format: ClipboardFormatFormat = ClipboardFormatFormat.TextPlain, + ) -> None: + """ + Formats the variable at the requested path in the current user session + using the requested clipboard format and sends the result through the + variables comm to the client. + + Args: + path: + A list of names describing the path to the variable. + clipboard_format: + The format to use for the clipboard copy, described as a mime type. + Defaults to "text/plain". + """ + if path is None: + return + + is_known, value = self._find_var(path) + if is_known: + content = _format_value(value, clipboard_format) + msg = FormattedVariable(content=content) + self._send_result(msg.dict()) + else: + self._send_error( + JsonRpcErrorCode.INVALID_PARAMS, + f"Cannot find variable at '{path}' to format", + ) + + def _send_details(self, path: List[str], value: Any = None): + """ + Sends a detailed list of children of the value (or just the value + itself, if is a leaf node on the path) as a message through the + variables comm to the client. + + For example: + { + "data": { + "result": { + "children": [{ + "display_name": "property1", + "display_value": "Hello", + "kind": "string", + "display_type": "str" + },{ + "display_name": "property2", + "display_value": "123", + "kind": "number" + "display_type": "int" + }] + } + } + ... + } + + Args: + path: + A list of names describing the path to the variable. + value: + The variable's value to summarize. + """ + + children = [] + inspector = get_inspector(value) + if inspector.has_children(): + children = _summarize_children(value) + else: + # Otherwise, treat as a simple value at given path + summary = _summarize_variable("", value) + if summary is not None: + children.append(summary) + # TODO: Handle scalar objects with a specific message type + + msg = InspectedVariable(children=children, length=len(children)) + self._send_result(msg.dict()) + + +def _summarize_variable(key: Any, value: Any) -> Optional[Variable]: + """ + Summarizes the given variable into a Variable object. + + Args: + key: + The actual key of the variable in its parent object, used as an input to determine the + variable's string access key. + value: + The variable's value. + + + Returns: + An Variable summary, or None if the variable should be skipped. + """ + # Hide module types for now + if isinstance(value, types.ModuleType): + return None + + try: + # Use an inspector to summarize the value + ins = get_inspector(value) + + display_name = ins.get_display_name(key) + kind_str = ins.get_kind() + kind = VariableKind(kind_str) + display_value, is_truncated = ins.get_display_value() + display_type = ins.get_display_type() + type_info = ins.get_type_info() + access_key = encode_access_key(key) + length = ins.get_length() + size = ins.get_size() + has_children = ins.has_children() + has_viewer = ins.has_viewer() + + return Variable( + display_name=display_name, + display_value=display_value, + display_type=display_type, + kind=kind, + type_info=type_info, + access_key=access_key, + length=length, + size=size, + has_children=has_children, + has_viewer=has_viewer, + is_truncated=is_truncated, + ) + + except Exception as err: + logger.warning(err, exc_info=True) + return Variable( + display_name=str(key), + display_value=get_qualname(value), + display_type="", + kind=VariableKind.Other, + type_info="", + access_key="", + length=0, + size=0, + has_children=False, + has_viewer=False, + is_truncated=False, + ) + + +def _summarize_children(parent: Any) -> List[Variable]: + children = [] + for i, (key, value) in enumerate(get_inspector(parent).get_items()): + if i > MAX_CHILDREN: + break + summary = _summarize_variable(key, value) + if summary is not None: + children.append(summary) + return children + + +def _format_value(value: Any, clipboard_format: ClipboardFormatFormat) -> str: + """ + Formats the given value using the requested clipboard format. + """ + inspector = get_inspector(value) + + if clipboard_format == ClipboardFormatFormat.TextHtml: + return inspector.to_html() + else: + return inspector.to_plaintext() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables_comm.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables_comm.py new file mode 100644 index 00000000000..53bae0bca67 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/variables_comm.py @@ -0,0 +1,441 @@ +# +# Copyright (C) 2024 Posit Software, PBC. All rights reserved. +# + +# +# AUTO-GENERATED from variables.json; do not edit. +# + +# flake8: noqa + +# For forward declarations +from __future__ import annotations + +import enum +from typing import Any, List, Literal, Optional, Union + +from ._vendor.pydantic import BaseModel, Field + + +@enum.unique +class ClipboardFormatFormat(str, enum.Enum): + """ + Possible values for Format in ClipboardFormat + """ + + TextHtml = "text/html" + + TextPlain = "text/plain" + + +@enum.unique +class VariableKind(str, enum.Enum): + """ + Possible values for Kind in Variable + """ + + Boolean = "boolean" + + Bytes = "bytes" + + Class = "class" + + Collection = "collection" + + Empty = "empty" + + Function = "function" + + Map = "map" + + Number = "number" + + Other = "other" + + String = "string" + + Table = "table" + + Lazy = "lazy" + + +class VariableList(BaseModel): + """ + A view containing a list of variables in the session. + """ + + variables: List[Variable] = Field( + description="A list of variables in the session.", + ) + + length: int = Field( + description="The total number of variables in the session. This may be greater than the number of variables in the 'variables' array if the array is truncated.", + ) + + version: Optional[int] = Field( + default=None, + description="The version of the view (incremented with each update)", + ) + + +class InspectedVariable(BaseModel): + """ + An inspected variable. + """ + + children: List[Variable] = Field( + description="The children of the inspected variable.", + ) + + length: int = Field( + description="The total number of children. This may be greater than the number of children in the 'children' array if the array is truncated.", + ) + + +class FormattedVariable(BaseModel): + """ + An object formatted for copying to the clipboard. + """ + + content: str = Field( + description="The formatted content of the variable.", + ) + + +class Variable(BaseModel): + """ + A single variable in the runtime. + """ + + access_key: str = Field( + description="A key that uniquely identifies the variable within the runtime and can be used to access the variable in `inspect` requests", + ) + + display_name: str = Field( + description="The name of the variable, formatted for display", + ) + + display_value: str = Field( + description="A string representation of the variable's value, formatted for display and possibly truncated", + ) + + display_type: str = Field( + description="The variable's type, formatted for display", + ) + + type_info: str = Field( + description="Extended information about the variable's type", + ) + + size: int = Field( + description="The size of the variable's value in bytes", + ) + + kind: VariableKind = Field( + description="The kind of value the variable represents, such as 'string' or 'number'", + ) + + length: int = Field( + description="The number of elements in the variable, if it is a collection", + ) + + has_children: bool = Field( + description="Whether the variable has child variables", + ) + + has_viewer: bool = Field( + description="True if there is a viewer available for this variable (i.e. the runtime can handle a 'view' request for this variable)", + ) + + is_truncated: bool = Field( + description="True if the 'value' field is a truncated representation of the variable's value", + ) + + +@enum.unique +class VariablesBackendRequest(str, enum.Enum): + """ + An enumeration of all the possible requests that can be sent to the backend variables comm. + """ + + # List all variables + List = "list" + + # Clear all variables + Clear = "clear" + + # Deletes a set of named variables + Delete = "delete" + + # Inspect a variable + Inspect = "inspect" + + # Format for clipboard + ClipboardFormat = "clipboard_format" + + # Request a viewer for a variable + View = "view" + + +class ListRequest(BaseModel): + """ + Returns a list of all the variables in the current session. + """ + + method: Literal[VariablesBackendRequest.List] = Field( + description="The JSON-RPC method name (list)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ClearParams(BaseModel): + """ + Clears (deletes) all variables in the current session. + """ + + include_hidden_objects: bool = Field( + description="Whether to clear hidden objects in addition to normal variables", + ) + + +class ClearRequest(BaseModel): + """ + Clears (deletes) all variables in the current session. + """ + + params: ClearParams = Field( + description="Parameters to the Clear method", + ) + + method: Literal[VariablesBackendRequest.Clear] = Field( + description="The JSON-RPC method name (clear)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class DeleteParams(BaseModel): + """ + Deletes the named variables from the current session. + """ + + names: List[str] = Field( + description="The names of the variables to delete.", + ) + + +class DeleteRequest(BaseModel): + """ + Deletes the named variables from the current session. + """ + + params: DeleteParams = Field( + description="Parameters to the Delete method", + ) + + method: Literal[VariablesBackendRequest.Delete] = Field( + description="The JSON-RPC method name (delete)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class InspectParams(BaseModel): + """ + Returns the children of a variable, as an array of variables. + """ + + path: List[str] = Field( + description="The path to the variable to inspect, as an array of access keys.", + ) + + +class InspectRequest(BaseModel): + """ + Returns the children of a variable, as an array of variables. + """ + + params: InspectParams = Field( + description="Parameters to the Inspect method", + ) + + method: Literal[VariablesBackendRequest.Inspect] = Field( + description="The JSON-RPC method name (inspect)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ClipboardFormatParams(BaseModel): + """ + Requests a formatted representation of a variable for copying to the + clipboard. + """ + + path: List[str] = Field( + description="The path to the variable to format, as an array of access keys.", + ) + + format: ClipboardFormatFormat = Field( + description="The requested format for the variable, as a MIME type", + ) + + +class ClipboardFormatRequest(BaseModel): + """ + Requests a formatted representation of a variable for copying to the + clipboard. + """ + + params: ClipboardFormatParams = Field( + description="Parameters to the ClipboardFormat method", + ) + + method: Literal[VariablesBackendRequest.ClipboardFormat] = Field( + description="The JSON-RPC method name (clipboard_format)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class ViewParams(BaseModel): + """ + Request that the runtime open a data viewer to display the data in a + variable. + """ + + path: List[str] = Field( + description="The path to the variable to view, as an array of access keys.", + ) + + +class ViewRequest(BaseModel): + """ + Request that the runtime open a data viewer to display the data in a + variable. + """ + + params: ViewParams = Field( + description="Parameters to the View method", + ) + + method: Literal[VariablesBackendRequest.View] = Field( + description="The JSON-RPC method name (view)", + ) + + jsonrpc: str = Field( + default="2.0", + description="The JSON-RPC version specifier", + ) + + +class VariablesBackendMessageContent(BaseModel): + comm_id: str + data: Union[ + ListRequest, + ClearRequest, + DeleteRequest, + InspectRequest, + ClipboardFormatRequest, + ViewRequest, + ] = Field(..., discriminator="method") + + +@enum.unique +class VariablesFrontendEvent(str, enum.Enum): + """ + An enumeration of all the possible events that can be sent to the frontend variables comm. + """ + + # Update variables + Update = "update" + + # Refresh variables + Refresh = "refresh" + + +class UpdateParams(BaseModel): + """ + Update variables + """ + + assigned: List[Variable] = Field( + description="An array of variables that have been newly assigned.", + ) + + removed: List[str] = Field( + description="An array of variable names that have been removed.", + ) + + version: int = Field( + description="The version of the view (incremented with each update), or 0 if the backend doesn't track versions.", + ) + + +class RefreshParams(BaseModel): + """ + Refresh variables + """ + + variables: List[Variable] = Field( + description="An array listing all the variables in the current session.", + ) + + length: int = Field( + description="The number of variables in the current session.", + ) + + version: int = Field( + description="The version of the view (incremented with each update), or 0 if the backend doesn't track versions.", + ) + + +VariableList.update_forward_refs() + +InspectedVariable.update_forward_refs() + +FormattedVariable.update_forward_refs() + +Variable.update_forward_refs() + +ListRequest.update_forward_refs() + +ClearParams.update_forward_refs() + +ClearRequest.update_forward_refs() + +DeleteParams.update_forward_refs() + +DeleteRequest.update_forward_refs() + +InspectParams.update_forward_refs() + +InspectRequest.update_forward_refs() + +ClipboardFormatParams.update_forward_refs() + +ClipboardFormatRequest.update_forward_refs() + +ViewParams.update_forward_refs() + +ViewRequest.update_forward_refs() + +UpdateParams.update_forward_refs() + +RefreshParams.update_forward_refs() diff --git a/extensions/positron-python/pythonFiles/positron/positron_ipykernel/widget.py b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/widget.py new file mode 100644 index 00000000000..f9682e1cde9 --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_ipykernel/widget.py @@ -0,0 +1,96 @@ +# +# Copyright (C) 2023-2024 Posit Software, PBC. All rights reserved. +# + +import enum +import logging +from typing import Dict, List, Optional + +import comm + +from ._vendor.pydantic import BaseModel, Field, ValidationError + +logger = logging.getLogger(__name__) + +_WIDGET_MIME_TYPE = "application/vnd.jupyter.widget-view+json" + + +@enum.unique +class WidgetRequest(str, enum.Enum): + """ + The possible types of messages that can be sent to the frontend as + requests from the language runtime. + """ + + # A request to display a widget + display = "display" + + +class WidgetDisplayMessage(BaseModel): + """ + A message used to request the frontend display a specific widget or list of widgets. + """ + + msg_type: WidgetRequest = WidgetRequest.display + view_ids: List[str] = Field( + default_factory=list, + description="The list of widget view ids to display", + ) + + +class PositronWidgetHook: + def __init__(self, target_name, comm_manager): + self.comms: Dict[str, comm.base_comm.BaseComm] = {} + self.target_name = target_name + self.comm_manager = comm_manager + + def __call__(self, msg, *args, **kwargs) -> Optional[dict]: + if msg["msg_type"] == "display_data": + # If there is no widget, let the parent deal with the msg. + data = msg["content"]["data"] + if _WIDGET_MIME_TYPE not in data: + logger.warning("No widget MIME type found.") + return msg + + comm_id = data[_WIDGET_MIME_TYPE].get("model_id") + + if comm_id is None: + logger.warning("No comm associated with widget.") + return msg + + # find comm associated with the widget + self.comms[comm_id] = self.comm_manager.get_comm(comm_id) + self._receive_message(comm_id) + + return None + + return msg + + def _receive_message(self, comm_id) -> None: + """ + Handle client messages to render a widget figure. + """ + widget_comm = self.comms.get(comm_id) + + if widget_comm is None: + logger.warning(f"Widget comm {comm_id} not found") + return + + try: + data = WidgetDisplayMessage(view_ids=[comm_id]) + except ValidationError as exception: + logger.warning(f"Widget invalid data: {exception}") + return + + widget_comm.send(data=data.dict()) + + def shutdown(self) -> None: + """ + Shutdown widget comms and release any resources. + """ + for widget_comm in self.comms.values(): + try: + widget_comm.close() + except Exception: + pass + self.comms.clear() diff --git a/extensions/positron-python/pythonFiles/positron/positron_language_server.py b/extensions/positron-python/pythonFiles/positron/positron_language_server.py new file mode 100644 index 00000000000..77da0b54b0c --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/positron_language_server.py @@ -0,0 +1,156 @@ +""" +Custom entry point for launching Positron's extensions to the Jedi Language +Server and IPyKernel in the same environment. +""" + +import argparse +import asyncio +import logging +import os +import sys + +from positron_ipykernel.positron_ipkernel import PositronIPKernelApp +from positron_ipykernel.positron_jedilsp import POSITRON +from traitlets.config import Config + +logger = logging.getLogger(__name__) + + +def parse_args() -> argparse.Namespace: + # Given we're using TCP, support a subset of the Jedi LSP configuration + parser = argparse.ArgumentParser( + prog="positron-language-server", + formatter_class=argparse.RawDescriptionHelpFormatter, + description="Positron Jedi language server: an LSP wrapper for jedi.", + ) + + parser.add_argument( + "--debugport", + help="port for debugpy debugger", + type=int, + default=None, + ) + parser.add_argument( + "--logfile", + help="redirect logs to file specified", + type=str, + ) + parser.add_argument( + "--loglevel", + help="logging level", + type=str, + default="error", + choices=["critical", "error", "warn", "info", "debug"], + ) + parser.add_argument( + "-f", + "--connection-file", + help="location of the IPyKernel connection file", + type=str, + ) + parser.add_argument( + "-q", + "--quiet", + help="Suppress console startup banner information", + action="store_true", + ) + parser.add_argument( + "--session-mode", + help="session mode in which the kernel is to be started", + type=str, + default="console", + choices=["console", "notebook", "background"], + ) + args = parser.parse_args() + args.loglevel = args.loglevel.upper() + + return args + + +if __name__ == "__main__": + exit_status = 0 + + # Parse command-line arguments + args = parse_args() + + # Start the debugpy debugger if a port was specified + if args.debugport is not None: + try: + import debugpy + + debugpy.listen(args.debugport) + except Exception as error: + logger.warning(f"Unable to start debugpy: {error}", exc_info=True) + + # Configure logging by passing the IPKernelApp traitlets application by passing a logging config + # dict. See: https://docs.python.org/3/library/logging.config.html#logging-config-dictschema for + # more info about this schema. + handlers = ["console"] if args.logfile is None else ["file"] + logging_config = { + "loggers": { + "": { + "level": args.loglevel, + "handlers": handlers, + }, + "PositronIPKernelApp": { + "level": args.loglevel, + "handlers": handlers, + }, + } + } + if args.logfile is not None: + logging_config["handlers"] = { + "file": { + "class": "logging.FileHandler", + "formatter": "console", + "level": args.loglevel, + "filename": args.logfile, + } + } + + # Start Positron's IPyKernel as the interpreter for our console. + # IPKernelApp expects an empty string if no connection_file is provided. + if args.connection_file is None: + args.connection_file = "" + + config = Config( + IPKernelApp={ + "connection_file": args.connection_file, + "log_level": args.loglevel, + "logging_config": logging_config, + }, + ) + + app: PositronIPKernelApp = PositronIPKernelApp.instance(config=config) + # Initialize with empty argv, otherwise BaseIPythonApplication.initialize reuses our + # command-line arguments in unexpected ways (e.g. logfile instructs it to log executed code). + app.initialize(argv=[]) + assert app.kernel is not None, "Kernel was not initialized" + + # Disable the banner if running in quiet mode. + if args.quiet: + app.kernel.shell.banner1 = "" + + app.kernel.start() + + logger.info(f"Process ID {os.getpid()}") + + # IPyKernel uses Tornado which (as of version 5.0) shares the same event + # loop as asyncio. + loop = asyncio.get_event_loop_policy().get_event_loop() + + # Enable asyncio debug mode. + if args.loglevel == "DEBUG": + loop.set_debug(True) + POSITRON.set_debug(True) + + try: + loop.run_forever() + except (KeyboardInterrupt, SystemExit): + logger.exception("Unexpected exception in event loop") + exit_status = 1 + finally: + loop.close() + + logger.info(f"Exiting process with status {exit_status}") + sys.exit(exit_status) diff --git a/extensions/positron-python/pythonFiles/positron/test-requirements.txt b/extensions/positron-python/pythonFiles/positron/test-requirements.txt new file mode 100644 index 00000000000..7e0b188e02b --- /dev/null +++ b/extensions/positron-python/pythonFiles/positron/test-requirements.txt @@ -0,0 +1,12 @@ +fastcore +ipykernel +ipywidgets +matplotlib +numpy +pandas +polars +pytest<8.1.1 +pytest-asyncio +pytest-mock +torch; python_version < '3.12' +sqlalchemy diff --git a/extensions/positron-python/pythonFiles/printEnvVariables.py b/extensions/positron-python/pythonFiles/printEnvVariables.py new file mode 100644 index 00000000000..353149f237d --- /dev/null +++ b/extensions/positron-python/pythonFiles/printEnvVariables.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import json + +print(json.dumps(dict(os.environ))) diff --git a/extensions/positron-python/pythonFiles/printEnvVariablesToFile.py b/extensions/positron-python/pythonFiles/printEnvVariablesToFile.py new file mode 100644 index 00000000000..a4e0d24abbe --- /dev/null +++ b/extensions/positron-python/pythonFiles/printEnvVariablesToFile.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import sys + +# Last argument is the target file into which we'll write the env variables line by line. +output_file = sys.argv[-1] + +with open(output_file, "w") as outfile: + for key, val in os.environ.items(): + outfile.write(f"{key}={val}\n") diff --git a/extensions/positron-python/pythonFiles/pyproject.toml b/extensions/positron-python/pythonFiles/pyproject.toml new file mode 100644 index 00000000000..878c4a8b6b6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/pyproject.toml @@ -0,0 +1,88 @@ +[tool.autopep8] +max_line_length = 100 + +[tool.black] +include = 'positron\/.*.py$|positron\/positron_ipykernel\/.*.py$|positron\/positron_ipykernel\/test_positron_.*.py$' +exclude = ''' + +( + /( + .data + | .vscode + | lib + | positron/positron_ipykernel/_vendor + )/ +) +''' +line-length = 100 + +[tool.isort] +profile = "black" +extend_skip = ['positron/positron_ipykernel/_vendor'] +known_first_party = ['positron_ipykernel'] +known_third_party = ['positron_ipykernel._vendor'] + +[tool.pyright] +exclude = ['lib', 'positron/positron_ipykernel/_vendor'] +extraPaths = ['lib/python', 'positron/positron_ipykernel/_vendor'] +ignore = [ + # Ignore all pre-existing code with issues + 'get-pip.py', + 'install_debugpy.py', + 'normalizeSelection.py', + 'tensorboard_launcher.py', + 'testlauncher.py', + 'visualstudio_py_testlauncher.py', + 'testing_tools/unittest_discovery.py', + 'testing_tools/adapter/report.py', + 'testing_tools/adapter/util.py', + 'testing_tools/adapter/pytest/_discovery.py', + 'testing_tools/adapter/pytest/_pytest_item.py', + 'tests/debug_adapter/test_install_debugpy.py', + 'tests/unittestadapter/helpers.py', + 'tests/testing_tools/adapter/.data', + 'tests/testing_tools/adapter/test___main__.py', + 'tests/testing_tools/adapter/test_discovery.py', + 'tests/testing_tools/adapter/test_functional.py', + 'tests/testing_tools/adapter/test_report.py', + 'tests/testing_tools/adapter/test_util.py', + 'tests/testing_tools/adapter/pytest/test_cli.py', + 'tests/testing_tools/adapter/pytest/test_discovery.py', + 'tests/unittestadapter/.data/unittest_skip/unittest_skip_function.py', + 'tests/pytestadapter/helpers.py' +] + +[tool.ruff] +line-length = 100 +lint.ignore = ["E402"] +exclude = [ + # Ignore testing_tools files same as Pyright way + 'get-pip.py', + 'install_debugpy.py', + 'tensorboard_launcher.py', + 'testlauncher.py', + 'visualstudio_py_testlauncher.py', + 'testing_tools/unittest_discovery.py', + 'testing_tools/adapter/util.py', + 'testing_tools/adapter/pytest/_discovery.py', + 'testing_tools/adapter/pytest/_pytest_item.py', + 'tests/debug_adapter/test_install_debugpy.py', + 'tests/testing_tools/adapter/.data', + 'tests/testing_tools/adapter/test___main__.py', + 'tests/testing_tools/adapter/test_discovery.py', + 'tests/testing_tools/adapter/test_functional.py', + 'tests/testing_tools/adapter/test_report.py', + 'tests/testing_tools/adapter/test_util.py', + 'tests/testing_tools/adapter/pytest/test_cli.py', + 'tests/testing_tools/adapter/pytest/test_discovery.py', + 'pythonFiles/testing_tools/*', + 'pythonFiles/testing_tools/adapter/pytest/__init__.py', + 'pythonFiles/tests/pytestadapter/expected_execution_test_output.py', + 'pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py', + 'pythonFiles/tests/unittestadapter/test_utils.py', + # --- Start Positron --- + # Ignore vendored dependencies + 'lib/', + 'positron/positron_ipykernel/_vendor/', + # --- End Positron --- +] diff --git a/extensions/positron-python/pythonFiles/pythonrc.py b/extensions/positron-python/pythonFiles/pythonrc.py new file mode 100644 index 00000000000..374888ddada --- /dev/null +++ b/extensions/positron-python/pythonFiles/pythonrc.py @@ -0,0 +1,80 @@ +import sys + +if sys.platform != "win32": + import readline + +original_ps1 = ">>> " + + +class repl_hooks: + def __init__(self): + self.global_exit = None + self.failure_flag = False + self.original_excepthook = sys.excepthook + self.original_displayhook = sys.displayhook + sys.excepthook = self.my_excepthook + sys.displayhook = self.my_displayhook + + def my_displayhook(self, value): + if value is None: + self.failure_flag = False + + self.original_displayhook(value) + + def my_excepthook(self, type, value, traceback): + self.global_exit = value + self.failure_flag = True + + self.original_excepthook(type, value, traceback) + + +def get_last_command(): + # Get the last history item + last_command = "" + if sys.platform != "win32": + last_command = readline.get_history_item(readline.get_current_history_length()) + + return last_command + + +class ps1: + hooks = repl_hooks() + sys.excepthook = hooks.my_excepthook + sys.displayhook = hooks.my_displayhook + + # str will get called for every prompt with exit code to show success/failure + def __str__(self): + exit_code = 0 + if self.hooks.failure_flag: + exit_code = 1 + else: + exit_code = 0 + self.hooks.failure_flag = False + # Guide following official VS Code doc for shell integration sequence: + result = "" + # For non-windows allow recent_command history. + if sys.platform != "win32": + result = "{command_finished}{prompt_started}{prompt}{command_start}{command_executed}{command_line}".format( + command_finished="\x1b]633;D;" + str(exit_code) + "\x07", + prompt_started="\x1b]633;A\x07", + prompt=original_ps1, + command_start="\x1b]633;B\x07", + command_executed="\x1b]633;C\x07", + command_line="\x1b]633;E;" + str(get_last_command()) + "\x07", + ) + else: + result = "{command_finished}{prompt_started}{prompt}{command_start}{command_executed}".format( + command_finished="\x1b]633;D;" + str(exit_code) + "\x07", + prompt_started="\x1b]633;A\x07", + prompt=original_ps1, + command_start="\x1b]633;B\x07", + command_executed="\x1b]633;C\x07", + ) + + # result = f"{chr(27)}]633;D;{exit_code}{chr(7)}{chr(27)}]633;A{chr(7)}{original_ps1}{chr(27)}]633;B{chr(7)}{chr(27)}]633;C{chr(7)}" + + return result + + +if sys.platform != "win32": + sys.ps1 = ps1() diff --git a/extensions/positron-python/pythonFiles/run-jedi-language-server.py b/extensions/positron-python/pythonFiles/run-jedi-language-server.py new file mode 100644 index 00000000000..31095121409 --- /dev/null +++ b/extensions/positron-python/pythonFiles/run-jedi-language-server.py @@ -0,0 +1,11 @@ +import sys +import os + +# Add the lib path to our sys path so jedi_language_server can find its references +EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, os.path.join(EXTENSION_ROOT, "pythonFiles", "lib", "jedilsp")) + + +from jedi_language_server.cli import cli + +sys.exit(cli()) diff --git a/extensions/positron-python/pythonFiles/shell_exec.py b/extensions/positron-python/pythonFiles/shell_exec.py new file mode 100644 index 00000000000..4987399a53e --- /dev/null +++ b/extensions/positron-python/pythonFiles/shell_exec.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import subprocess +import sys + +# This is a simple solution to waiting for completion of commands sent to terminal. +# 1. Intercept commands send to a terminal +# 2. Send commands to our script file with an additional argument +# 3. In here create a file that'll log the progress. +# 4. Calling code monitors the contents of the file to determine state of execution. + +# Last argument is a file that's used for synchronizing the actions in the terminal with the calling code in extension. +lock_file = sys.argv[-1] +shell_args = sys.argv[1:-1] + +print("Executing command in shell >> " + " ".join(shell_args)) + +with open(lock_file, "w") as fp: + try: + # Signal start of execution. + fp.write("START\n") + fp.flush() + + subprocess.check_call(shell_args, stdout=sys.stdout, stderr=sys.stderr) + + # Signal start of execution. + fp.write("END\n") + fp.flush() + except Exception: + import traceback + + print(traceback.format_exc()) + # Signal end of execution with failure state. + fp.write("FAIL\n") + fp.flush() + try: + # ALso log the error for use from the other side. + with open(lock_file + ".error", "w") as fpError: + fpError.write(traceback.format_exc()) + except Exception: + pass diff --git a/extensions/positron-python/pythonFiles/tensorboard_launcher.py b/extensions/positron-python/pythonFiles/tensorboard_launcher.py new file mode 100644 index 00000000000..bad1ef09fc6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tensorboard_launcher.py @@ -0,0 +1,36 @@ +import time +import sys +import os +import mimetypes +from tensorboard import program + + +def main(logdir): + # Environment variable for PyTorch profiler TensorBoard plugin + # to detect when it's running inside VS Code + os.environ["VSCODE_TENSORBOARD_LAUNCH"] = "1" + + # Work around incorrectly configured MIME types on Windows + mimetypes.add_type("application/javascript", ".js") + + # Start TensorBoard using their Python API + tb = program.TensorBoard() + tb.configure(bind_all=False, logdir=logdir) + url = tb.launch() + sys.stdout.write("TensorBoard started at %s\n" % (url)) + sys.stdout.flush() + + while True: + try: + time.sleep(60) + except KeyboardInterrupt: + break + sys.stdout.write("TensorBoard is shutting down") + sys.stdout.flush() + + +if __name__ == "__main__": + if len(sys.argv) == 2: + logdir = str(sys.argv[1]) + sys.stdout.write("Starting TensorBoard with logdir %s" % (logdir)) + main(logdir) diff --git a/extensions/positron-python/pythonFiles/testing_tools/__init__.py b/extensions/positron-python/pythonFiles/testing_tools/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/__init__.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/__main__.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/__main__.py new file mode 100644 index 00000000000..218456897df --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/__main__.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +import argparse +import sys + +from . import pytest, report +from .errors import UnsupportedCommandError, UnsupportedToolError + +TOOLS = { + "pytest": { + "_add_subparser": pytest.add_cli_subparser, + "discover": pytest.discover, + }, +} +REPORTERS = { + "discover": report.report_discovered, +} + + +def parse_args( + # the args to parse + argv=sys.argv[1:], + # the program name + prog=sys.argv[0], +): + """ + Return the subcommand & tool to run, along with its args. + + This defines the standard CLI for the different testing frameworks. + """ + parser = argparse.ArgumentParser( + description="Run Python testing operations.", + prog=prog, + # ... + ) + cmdsubs = parser.add_subparsers(dest="cmd") + + # Add "run" and "debug" subcommands when ready. + for cmdname in ["discover"]: + sub = cmdsubs.add_parser(cmdname) + subsubs = sub.add_subparsers(dest="tool") + for toolname in sorted(TOOLS): + try: + add_subparser = TOOLS[toolname]["_add_subparser"] + except KeyError: + continue + subsub = add_subparser(cmdname, toolname, subsubs) + if cmdname == "discover": + subsub.add_argument("--simple", action="store_true") + subsub.add_argument( + "--no-hide-stdio", dest="hidestdio", action="store_false" + ) + subsub.add_argument("--pretty", action="store_true") + + # Parse the args! + if "--" in argv: + sep_index = argv.index("--") + toolargs = argv[sep_index + 1 :] + argv = argv[:sep_index] + else: + toolargs = [] + args = parser.parse_args(argv) + ns = vars(args) + + cmd = ns.pop("cmd") + if not cmd: + parser.error("missing command") + + tool = ns.pop("tool") + if not tool: + parser.error("missing tool") + + return tool, cmd, ns, toolargs + + +def main( + toolname, + cmdname, + subargs, + toolargs, + # internal args (for testing): + _tools=TOOLS, + _reporters=REPORTERS, +): + try: + tool = _tools[toolname] + except KeyError: + raise UnsupportedToolError(toolname) + + try: + run = tool[cmdname] + report_result = _reporters[cmdname] + except KeyError: + raise UnsupportedCommandError(cmdname) + + parents, result = run(toolargs, **subargs) + report_result(result, parents, **subargs) + + +if __name__ == "__main__": + tool, cmd, subargs, toolargs = parse_args() + main(tool, cmd, subargs, toolargs) diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/discovery.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/discovery.py new file mode 100644 index 00000000000..798aea1e93f --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/discovery.py @@ -0,0 +1,117 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import re + +from .util import fix_fileid, DIRNAME, NORMCASE +from .info import ParentInfo + + +FILE_ID_RE = re.compile( + r""" + ^ + (?: + ( .* [.] (?: py | txt ) \b ) # .txt for doctest files + ( [^.] .* )? + ) + $ + """, + re.VERBOSE, +) + + +def fix_nodeid( + nodeid, + kind, + rootdir=None, + # *, + _fix_fileid=fix_fileid, +): + if not nodeid: + raise ValueError("missing nodeid") + if nodeid == ".": + return nodeid + + fileid = nodeid + remainder = "" + if kind not in ("folder", "file"): + m = FILE_ID_RE.match(nodeid) + if m: + fileid, remainder = m.groups() + elif len(nodeid) > 1: + fileid = nodeid[:2] + remainder = nodeid[2:] + fileid = _fix_fileid(fileid, rootdir) + return fileid + (remainder or "") + + +class DiscoveredTests(object): + """A container for the discovered tests and their parents.""" + + def __init__(self): + self.reset() + + def __len__(self): + return len(self._tests) + + def __getitem__(self, index): + return self._tests[index] + + @property + def parents(self): + return sorted( + self._parents.values(), + # Sort by (name, id). + key=lambda p: (NORMCASE(p.root or p.name), p.id), + ) + + def reset(self): + """Clear out any previously discovered tests.""" + self._parents = {} + self._tests = [] + + def add_test(self, test, parents): + """Add the given test and its parents.""" + parentid = self._ensure_parent(test.path, parents) + # Updating the parent ID and the test ID aren't necessary if the + # provided test and parents (from the test collector) are + # properly generated. However, we play it safe here. + test = test._replace( + # Clean up the ID. + id=fix_nodeid(test.id, "test", test.path.root), + parentid=parentid, + ) + self._tests.append(test) + + def _ensure_parent( + self, + path, + parents, + # *, + _dirname=DIRNAME, + ): + rootdir = path.root + relpath = path.relfile + + _parents = iter(parents) + nodeid, name, kind = next(_parents) + # As in add_test(), the node ID *should* already be correct. + nodeid = fix_nodeid(nodeid, kind, rootdir) + _parentid = nodeid + for parentid, parentname, parentkind in _parents: + # As in add_test(), the parent ID *should* already be correct. + parentid = fix_nodeid(parentid, kind, rootdir) + if kind in ("folder", "file"): + info = ParentInfo(nodeid, kind, name, rootdir, relpath, parentid) + relpath = _dirname(relpath) + else: + info = ParentInfo(nodeid, kind, name, rootdir, None, parentid) + self._parents[(rootdir, nodeid)] = info + nodeid, name, kind = parentid, parentname, parentkind + assert nodeid == "." + info = ParentInfo(nodeid, kind, name=rootdir) + self._parents[(rootdir, nodeid)] = info + + return _parentid diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/errors.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/errors.py new file mode 100644 index 00000000000..3e6ae5189cb --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/errors.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class UnsupportedToolError(ValueError): + def __init__(self, tool): + msg = "unsupported tool {!r}".format(tool) + super(UnsupportedToolError, self).__init__(msg) + self.tool = tool + + +class UnsupportedCommandError(ValueError): + def __init__(self, cmd): + msg = "unsupported cmd {!r}".format(cmd) + super(UnsupportedCommandError, self).__init__(msg) + self.cmd = cmd diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/info.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/info.py new file mode 100644 index 00000000000..d518a29dd97 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/info.py @@ -0,0 +1,119 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from collections import namedtuple + + +class SingleTestPath(namedtuple("TestPath", "root relfile func sub")): + """Where to find a single test.""" + + def __new__(cls, root, relfile, func, sub=None): + self = super(SingleTestPath, cls).__new__( + cls, + str(root) if root else None, + str(relfile) if relfile else None, + str(func) if func else None, + [str(s) for s in sub] if sub else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.root is None: + raise TypeError("missing id") + if self.relfile is None: + raise TypeError("missing kind") + # self.func may be None (e.g. for doctests). + # self.sub may be None. + + +class ParentInfo(namedtuple("ParentInfo", "id kind name root relpath parentid")): + KINDS = ("folder", "file", "suite", "function", "subtest") + + def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): + self = super(ParentInfo, cls).__new__( + cls, + id=str(id) if id else None, + kind=str(kind) if kind else None, + name=str(name) if name else None, + root=str(root) if root else None, + relpath=str(relpath) if relpath else None, + parentid=str(parentid) if parentid else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.kind is None: + raise TypeError("missing kind") + if self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + if self.name is None: + raise TypeError("missing name") + if self.root is None: + if self.parentid is not None or self.kind != "folder": + raise TypeError("missing root") + if self.relpath is not None: + raise TypeError("unexpected relpath {}".format(self.relpath)) + elif self.parentid is None: + raise TypeError("missing parentid") + elif self.relpath is None and self.kind in ("folder", "file"): + raise TypeError("missing relpath") + + +class SingleTestInfo( + namedtuple("TestInfo", "id name path source markers parentid kind") +): + """Info for a single test.""" + + MARKERS = ("skip", "skip-if", "expected-failure") + KINDS = ("function", "doctest") + + def __new__(cls, id, name, path, source, markers, parentid, kind="function"): + self = super(SingleTestInfo, cls).__new__( + cls, + str(id) if id else None, + str(name) if name else None, + path or None, + str(source) if source else None, + [str(marker) for marker in markers or ()], + str(parentid) if parentid else None, + str(kind) if kind else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.name is None: + raise TypeError("missing name") + if self.path is None: + raise TypeError("missing path") + if self.source is None: + raise TypeError("missing source") + else: + srcfile, _, lineno = self.source.rpartition(":") + if not srcfile or not lineno or int(lineno) < 0: + raise ValueError("bad source {!r}".format(self.source)) + if self.markers: + badmarkers = [m for m in self.markers if m not in self.MARKERS] + if badmarkers: + raise ValueError("unsupported markers {!r}".format(badmarkers)) + if self.parentid is None: + raise TypeError("missing parentid") + if self.kind is None: + raise TypeError("missing kind") + elif self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + + @property + def root(self): + return self.path.root + + @property + def srcfile(self): + return self.source.rpartition(":")[0] + + @property + def lineno(self): + return int(self.source.rpartition(":")[-1]) diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/__init__.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/__init__.py new file mode 100644 index 00000000000..89b7c066a45 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +from ._cli import add_subparser as add_cli_subparser # noqa: F401 +from ._discovery import discover # noqa: F401 diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_cli.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_cli.py new file mode 100644 index 00000000000..3d3eec09a19 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_cli.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +from ..errors import UnsupportedCommandError + + +def add_subparser(cmd, name, parent): + """Add a new subparser to the given parent and add args to it.""" + parser = parent.add_parser(name) + if cmd == "discover": + # For now we don't have any tool-specific CLI options to add. + pass + else: + raise UnsupportedCommandError(cmd) + return parser diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_discovery.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_discovery.py new file mode 100644 index 00000000000..4b852ecf81c --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_discovery.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import sys + +import pytest + +from .. import discovery, util +from ._pytest_item import parse_item + + +def discover( + pytestargs=None, + hidestdio=False, + # *, + _pytest_main=pytest.main, + _plugin=None, + **_ignored +): + """Return the results of test discovery.""" + if _plugin is None: + _plugin = TestCollector() + + pytestargs = _adjust_pytest_args(pytestargs) + # We use this helper rather than "-pno:terminal" due to possible + # platform-dependent issues. + with util.hide_stdio() if hidestdio else util.noop_cm() as stdio: + ec = _pytest_main(pytestargs, [_plugin]) + # See: https://docs.pytest.org/en/latest/usage.html#possible-exit-codes + if ec == 5: + # No tests were discovered. + pass + elif ec == 1: + # Some tests where collected but with errors. + pass + elif ec != 0: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery failed (exit code {})".format(ec)) + if not _plugin._started: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery did not start") + return ( + _plugin._tests.parents, + list(_plugin._tests), + ) + + +def _adjust_pytest_args(pytestargs): + """Return a corrected copy of the given pytest CLI args.""" + pytestargs = list(pytestargs) if pytestargs else [] + # Duplicate entries should be okay. + pytestargs.insert(0, "--collect-only") + # TODO: pull in code from: + # src/client/testing/pytest/services/discoveryService.ts + # src/client/testing/pytest/services/argsService.ts + return pytestargs + + +class TestCollector(object): + """This is a pytest plugin that collects the discovered tests.""" + + @classmethod + def parse_item(cls, item): + return parse_item(item) + + def __init__(self, tests=None): + if tests is None: + tests = discovery.DiscoveredTests() + self._tests = tests + self._started = False + + # Relevant plugin hooks: + # https://docs.pytest.org/en/latest/reference.html#collection-hooks + + def pytest_collection_modifyitems(self, session, config, items): + self._started = True + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) + + # This hook is not specified in the docs, so we also provide + # the "modifyitems" hook just in case. + def pytest_collection_finish(self, session): + self._started = True + try: + items = session.items + except AttributeError: + # TODO: Is there an alternative? + return + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py new file mode 100644 index 00000000000..ccfe1412231 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py @@ -0,0 +1,610 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +""" +During "collection", pytest finds all the tests it supports. These are +called "items". The process is top-down, mostly tracing down through +the file system. Aside from its own machinery, pytest supports hooks +that find tests. Effectively, pytest starts with a set of "collectors"; +objects that can provide a list of tests and sub-collectors. All +collectors in the resulting tree are visited and the tests aggregated. +For the most part, each test's (and collector's) parent is identified +as the collector that collected it. + +Collectors and items are collectively identified as "nodes". The pytest +API relies on collector and item objects providing specific methods and +attributes. In addition to corresponding base classes, pytest provides +a number of concrete implementations. + +The following are the known pytest node types: + + Node + Collector + FSCollector + Session (the top-level collector) + File + Module + Package + DoctestTextfile + DoctestModule + PyCollector + (Module) + (...) + Class + UnitTestCase + Instance + Item + Function + TestCaseFunction + DoctestItem + +Here are the unique attrs for those classes: + + Node + name + nodeid (readonly) + config + session + (parent) - the parent node + (fspath) - the file from which the node was collected + ---- + own_marksers - explicit markers (e.g. with @pytest.mark()) + keywords + extra_keyword_matches + + Item + location - where the actual test source code is: (relfspath, lno, fullname) + user_properties + + PyCollector + module + class + instance + obj + + Function + module + class + instance + obj + function + (callspec) + (fixturenames) + funcargs + originalname - w/o decorations, e.g. [...] for parameterized + + DoctestItem + dtest + obj + +When parsing an item, we make use of the following attributes: + +* name +* nodeid +* __class__ + + __name__ +* fspath +* location +* function + + __name__ + + __code__ + + __closure__ +* own_markers +""" + +from __future__ import absolute_import, print_function + +import sys + +import _pytest.doctest +import _pytest.unittest +import pytest + +from ..info import SingleTestInfo, SingleTestPath +from ..util import NORMCASE, PATH_SEP, fix_fileid + + +def should_never_reach_here(item, **extra): + """Indicates a code path we should never reach.""" + print("The Python extension has run into an unexpected situation") + print("while processing a pytest node during test discovery. Please") + print("Please open an issue at:") + print(" https://github.com/microsoft/vscode-python/issues") + print("and paste the following output there.") + print() + for field, info in _summarize_item(item): + print("{}: {}".format(field, info)) + if extra: + print() + print("extra info:") + for name, info in extra.items(): + print("{:10}".format(name + ":"), end="") + if isinstance(info, str): + print(info) + else: + try: + print(*info) + except TypeError: + print(info) + print() + print("traceback:") + import traceback + + traceback.print_stack() + + msg = "Unexpected pytest node (see printed output)." + exc = NotImplementedError(msg) + exc.item = item + return exc + + +def parse_item( + item, + # *, + _get_item_kind=(lambda *a: _get_item_kind(*a)), + _parse_node_id=(lambda *a: _parse_node_id(*a)), + _split_fspath=(lambda *a: _split_fspath(*a)), + _get_location=(lambda *a: _get_location(*a)), +): + """Return (TestInfo, [suite ID]) for the given item. + + The suite IDs, if any, are in parent order with the item's direct + parent at the beginning. The parent of the last suite ID (or of + the test if there are no suites) is the file ID, which corresponds + to TestInfo.path. + + """ + # _debug_item(item, showsummary=True) + kind, _ = _get_item_kind(item) + # Skip plugin generated tests + if kind is None: + return None, None + + if kind == "function" and item.originalname and item.originalname != item.name: + # split out parametrized decorations `node[params]`) before parsing + # and manually attach parametrized portion back in when done. + parameterized = item.name[len(item.originalname) :] + (parentid, parents, fileid, testfunc, _) = _parse_node_id( + item.nodeid[: -len(parameterized)], kind + ) + nodeid = "{}{}".format(parentid, parameterized) + parents = [(parentid, item.originalname, kind)] + parents + name = parameterized[1:-1] or "" + else: + (nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id( + item.nodeid, kind + ) + name = item.name + + # Note: testfunc does not necessarily match item.function.__name__. + # This can result from importing a test function from another module. + + # Figure out the file. + testroot, relfile = _split_fspath(str(item.fspath), fileid, item) + location, fullname = _get_location(item, testroot, relfile) + if kind == "function": + if testfunc and fullname != testfunc + parameterized: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + parameterized=parameterized, + # ... + ) + elif kind == "doctest": + if testfunc and fullname != testfunc and fullname != "[doctest] " + testfunc: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + # ... + ) + testfunc = None + + # Sort out the parent. + if parents: + parentid, _, _ = parents[0] + else: + parentid = None + + # Sort out markers. + # See: https://docs.pytest.org/en/latest/reference.html#marks + markers = set() + for marker in getattr(item, "own_markers", []): + if marker.name == "parameterize": + # We've already covered these. + continue + elif marker.name == "skip": + markers.add("skip") + elif marker.name == "skipif": + markers.add("skip-if") + elif marker.name == "xfail": + markers.add("expected-failure") + # We can add support for other markers as we need them? + + test = SingleTestInfo( + id=nodeid, + name=name, + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=testfunc, + sub=[parameterized] if parameterized else None, + ), + source=location, + markers=sorted(markers) if markers else None, + parentid=parentid, + ) + if parents and parents[-1] == (".", None, "folder"): # This should always be true? + parents[-1] = (".", testroot, "folder") + return test, parents + + +def _split_fspath( + fspath, + fileid, + item, + # *, + _normcase=NORMCASE, +): + """Return (testroot, relfile) for the given fspath. + + "relfile" will match "fileid". + """ + # "fileid" comes from nodeid and is always relative to the testroot + # (with a "./" prefix). There are no guarantees about casing, so we + # normcase just be to sure. + relsuffix = fileid[1:] # Drop (only) the "." prefix. + if not _normcase(fspath).endswith(_normcase(relsuffix)): + raise should_never_reach_here( + item, + fspath=fspath, + fileid=fileid, + # ... + ) + testroot = fspath[: -len(fileid) + 1] # Ignore the "./" prefix. + relfile = "." + fspath[-len(fileid) + 1 :] # Keep the pathsep. + return testroot, relfile + + +def _get_location( + item, + testroot, + relfile, + # *, + _matches_relfile=(lambda *a: _matches_relfile(*a)), + _is_legacy_wrapper=(lambda *a: _is_legacy_wrapper(*a)), + _unwrap_decorator=(lambda *a: _unwrap_decorator(*a)), + _pathsep=PATH_SEP, +): + """Return (loc str, fullname) for the given item.""" + # When it comes to normcase, we favor relfile (from item.fspath) + # over item.location in this function. + + srcfile, lineno, fullname = item.location + if _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + else: + # pytest supports discovery of tests imported from other + # modules. This is reflected by a different filename + # in item.location. + + if _is_legacy_wrapper(srcfile): + srcfile = relfile + unwrapped = _unwrap_decorator(item.function) + if unwrapped is None: + # It was an invalid legacy wrapper so we just say + # "somewhere in relfile". + lineno = None + else: + _srcfile, lineno = unwrapped + if not _matches_relfile(_srcfile, testroot, relfile): + # For legacy wrappers we really expect the wrapped + # function to be in relfile. So here we ignore any + # other file and just say "somewhere in relfile". + lineno = None + elif _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + # Otherwise we just return the info from item.location as-is. + + if not srcfile.startswith("." + _pathsep): + srcfile = "." + _pathsep + srcfile + + if lineno is None: + lineno = -1 # i.e. "unknown" + + # from pytest, line numbers are 0-based + location = "{}:{}".format(srcfile, int(lineno) + 1) + return location, fullname + + +def _matches_relfile( + srcfile, + testroot, + relfile, + # *, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Return True if "srcfile" matches the given relfile.""" + testroot = _normcase(testroot) + srcfile = _normcase(srcfile) + relfile = _normcase(relfile) + if srcfile == relfile: + return True + elif srcfile == relfile[len(_pathsep) + 1 :]: + return True + elif srcfile == testroot + relfile[1:]: + return True + else: + return False + + +def _is_legacy_wrapper( + srcfile, + # *, + _pathsep=PATH_SEP, + _pyversion=sys.version_info, +): + """Return True if the test might be wrapped. + + In Python 2 unittest's decorators (e.g. unittest.skip) do not wrap + properly, so we must manually unwrap them. + """ + if _pyversion > (3,): + return False + if (_pathsep + "unittest" + _pathsep + "case.py") not in srcfile: + return False + return True + + +def _unwrap_decorator(func): + """Return (filename, lineno) for the func the given func wraps. + + If the wrapped func cannot be identified then return None. Likewise + for the wrapped filename. "lineno" is None if it cannot be found + but the filename could. + """ + try: + func = func.__closure__[0].cell_contents + except (IndexError, AttributeError): + return None + else: + if not callable(func): + return None + try: + filename = func.__code__.co_filename + except AttributeError: + return None + else: + try: + lineno = func.__code__.co_firstlineno - 1 + except AttributeError: + return (filename, None) + else: + return filename, lineno + + +def _parse_node_id( + testid, + kind, + # *, + _iter_nodes=(lambda *a: _iter_nodes(*a)), +): + """Return the components of the given node ID, in heirarchical order.""" + nodes = iter(_iter_nodes(testid, kind)) + + testid, name, kind = next(nodes) + parents = [] + parameterized = None + if kind == "doctest": + parents = list(nodes) + fileid, _, _ = parents[0] + return testid, parents, fileid, name, parameterized + elif kind is None: + fullname = None + else: + if kind == "subtest": + node = next(nodes) + parents.append(node) + funcid, funcname, _ = node + parameterized = testid[len(funcid) :] + elif kind == "function": + funcname = name + else: + raise should_never_reach_here( + testid, + kind=kind, + # ... + ) + fullname = funcname + + for node in nodes: + parents.append(node) + parentid, name, kind = node + if kind == "file": + fileid = parentid + break + elif fullname is None: + # We don't guess how to interpret the node ID for these tests. + continue + elif kind == "suite": + fullname = name + "." + fullname + else: + raise should_never_reach_here( + testid, + node=node, + # ... + ) + else: + fileid = None + parents.extend(nodes) # Add the rest in as-is. + + return ( + testid, + parents, + fileid, + fullname, + parameterized or "", + ) + + +def _iter_nodes( + testid, + kind, + # *, + _normalize_test_id=(lambda *a: _normalize_test_id(*a)), + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Yield (nodeid, name, kind) for the given node ID and its parents.""" + nodeid, testid = _normalize_test_id(testid, kind) + if len(nodeid) > len(testid): + testid = "." + _pathsep + testid + + parentid, _, name = nodeid.rpartition("::") + if not parentid: + if kind is None: + # This assumes that plugins can generate nodes that do not + # have a parent. All the builtin nodes have one. + yield (nodeid, name, kind) + return + # We expect at least a filename and a name. + raise should_never_reach_here( + nodeid, + # ... + ) + yield (nodeid, name, kind) + + # Extract the suites. + while "::" in parentid: + suiteid = parentid + parentid, _, name = parentid.rpartition("::") + yield (suiteid, name, "suite") + + # Extract the file and folders. + fileid = parentid + raw = testid[: len(fileid)] + _parentid, _, filename = _normcase(fileid).rpartition(_pathsep) + parentid = fileid[: len(_parentid)] + raw, name = raw[: len(_parentid)], raw[-len(filename) :] + yield (fileid, name, "file") + # We're guaranteed at least one (the test root). + while _pathsep in _normcase(parentid): + folderid = parentid + _parentid, _, foldername = _normcase(folderid).rpartition(_pathsep) + parentid = folderid[: len(_parentid)] + raw, name = raw[: len(parentid)], raw[-len(foldername) :] + yield (folderid, name, "folder") + # We set the actual test root later at the bottom of parse_item(). + testroot = None + yield (parentid, testroot, "folder") + + +def _normalize_test_id( + testid, + kind, + # *, + _fix_fileid=fix_fileid, + _pathsep=PATH_SEP, +): + """Return the canonical form for the given node ID.""" + while "::()::" in testid: + testid = testid.replace("::()::", "::") + while ":::" in testid: + testid = testid.replace(":::", "::") + if kind is None: + return testid, testid + orig = testid + + # We need to keep the testid as-is, or else pytest won't recognize + # it when we try to use it later (e.g. to run a test). The only + # exception is that we add a "./" prefix for relative paths. + # Note that pytest always uses "/" as the path separator in IDs. + fileid, sep, remainder = testid.partition("::") + fileid = _fix_fileid(fileid) + if not fileid.startswith("./"): # Absolute "paths" not expected. + raise should_never_reach_here( + testid, + fileid=fileid, + # ... + ) + testid = fileid + sep + remainder + + return testid, orig + + +def _get_item_kind(item): + """Return (kind, isunittest) for the given item.""" + if isinstance(item, _pytest.doctest.DoctestItem): + return "doctest", False + elif isinstance(item, _pytest.unittest.TestCaseFunction): + return "function", True + elif isinstance(item, pytest.Function): + # We *could* be more specific, e.g. "method", "subtest". + return "function", False + else: + return None, False + + +############################# +# useful for debugging + +_FIELDS = [ + "nodeid", + "kind", + "class", + "name", + "fspath", + "location", + "function", + "markers", + "user_properties", + "attrnames", +] + + +def _summarize_item(item): + if not hasattr(item, "nodeid"): + yield "nodeid", item + return + + for field in _FIELDS: + try: + if field == "kind": + yield field, _get_item_kind(item) + elif field == "class": + yield field, item.__class__.__name__ + elif field == "markers": + yield field, item.own_markers + # yield field, list(item.iter_markers()) + elif field == "attrnames": + yield field, dir(item) + else: + yield field, getattr(item, field, "") + except Exception as exc: + yield field, "".format(exc) + + +def _debug_item(item, showsummary=False): + item._debugging = True + try: + summary = dict(_summarize_item(item)) + finally: + item._debugging = False + + if showsummary: + print(item.nodeid) + for key in ( + "kind", + "class", + "name", + "fspath", + "location", + "func", + "markers", + "props", + ): + print(" {:12} {}".format(key, summary[key])) + print() + + return summary diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/report.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/report.py new file mode 100644 index 00000000000..bacdef7b9a0 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/report.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import print_function + +import json + + +def report_discovered( + tests, + parents, + # *, + pretty=False, + simple=False, + _send=print, + **_ignored +): + """Serialize the discovered tests and write to stdout.""" + if simple: + data = [ + { + "id": test.id, + "name": test.name, + "testroot": test.path.root, + "relfile": test.path.relfile, + "lineno": test.lineno, + "testfunc": test.path.func, + "subtest": test.path.sub or None, + "markers": test.markers or [], + } + for test in tests + ] + else: + byroot = {} + for parent in parents: + rootdir = parent.name if parent.root is None else parent.root + try: + root = byroot[rootdir] + except KeyError: + root = byroot[rootdir] = { + "id": rootdir, + "parents": [], + "tests": [], + } + if not parent.root: + root["id"] = parent.id + continue + root["parents"].append( + { + # "id" must match what the testing framework recognizes. + "id": parent.id, + "kind": parent.kind, + "name": parent.name, + "parentid": parent.parentid, + } + ) + if parent.relpath is not None: + root["parents"][-1]["relpath"] = parent.relpath + for test in tests: + # We are guaranteed that the parent was added. + root = byroot[test.path.root] + testdata = { + # "id" must match what the testing framework recognizes. + "id": test.id, + "name": test.name, + # TODO: Add a "kind" field + # (e.g. "unittest", "function", "doctest") + "source": test.source, + "markers": test.markers or [], + "parentid": test.parentid, + } + root["tests"].append(testdata) + data = [ + { + "rootid": byroot[root]["id"], + "root": root, + "parents": byroot[root]["parents"], + "tests": byroot[root]["tests"], + } + for root in sorted(byroot) + ] + + kwargs = {} + if pretty: + # human-formatted + kwargs = dict( + sort_keys=True, + indent=4, + separators=(",", ": "), + # ... + ) + serialized = json.dumps(data, **kwargs) + + _send(serialized) diff --git a/extensions/positron-python/pythonFiles/testing_tools/adapter/util.py b/extensions/positron-python/pythonFiles/testing_tools/adapter/util.py new file mode 100644 index 00000000000..c7a178311b8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/adapter/util.py @@ -0,0 +1,289 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import contextlib +import io + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # 2.7 + +import os +import os.path +import sys +import tempfile + + +@contextlib.contextmanager +def noop_cm(): + yield + + +def group_attr_names(attrnames): + grouped = { + "dunder": [], + "private": [], + "constants": [], + "classes": [], + "vars": [], + "other": [], + } + for name in attrnames: + if name.startswith("__") and name.endswith("__"): + group = "dunder" + elif name.startswith("_"): + group = "private" + elif name.isupper(): + group = "constants" + elif name.islower(): + group = "vars" + elif name == name.capitalize(): + group = "classes" + else: + group = "other" + grouped[group].append(name) + return grouped + + +if sys.version_info < (3,): + _str_to_lower = lambda val: val.decode().lower() +else: + _str_to_lower = str.lower + + +############################# +# file paths + +_os_path = os.path +# Uncomment to test Windows behavior on non-windows OS: +# import ntpath as _os_path +PATH_SEP = _os_path.sep +NORMCASE = _os_path.normcase +DIRNAME = _os_path.dirname +BASENAME = _os_path.basename +IS_ABS_PATH = _os_path.isabs +PATH_JOIN = _os_path.join +ABS_PATH = _os_path.abspath + + +def fix_path( + path, + # *, + _pathsep=PATH_SEP, +): + """Return a platform-appropriate path for the given path.""" + if not path: + return "." + return path.replace("/", _pathsep) + + +def fix_relpath( + path, + # *, + _fix_path=fix_path, + _path_isabs=IS_ABS_PATH, + _pathsep=PATH_SEP, +): + """Return a ./-prefixed, platform-appropriate path for the given path.""" + path = _fix_path(path) + if path in (".", ".."): + return path + if not _path_isabs(path): + if not path.startswith("." + _pathsep): + path = "." + _pathsep + path + return path + + +def _resolve_relpath( + path, + rootdir=None, + # *, + _path_isabs=IS_ABS_PATH, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + # "path" is expected to use "/" for its path separator, regardless + # of the provided "_pathsep". + + if path.startswith("./"): + return path[2:] + if not _path_isabs(path): + return path + + # Deal with root-dir-as-fileid. + _, sep, relpath = path.partition("/") + if sep and not relpath.replace("/", ""): + return "" + + if rootdir is None: + return None + rootdir = _normcase(rootdir) + if not rootdir.endswith(_pathsep): + rootdir += _pathsep + + if not _normcase(path).startswith(rootdir): + return None + return path[len(rootdir) :] + + +def fix_fileid( + fileid, + rootdir=None, + # *, + normalize=False, + strictpathsep=None, + _pathsep=PATH_SEP, + **kwargs +): + """Return a pathsep-separated file ID ("./"-prefixed) for the given value. + + The file ID may be absolute. If so and "rootdir" is + provided then make the file ID relative. If absolute but "rootdir" + is not provided then leave it absolute. + """ + if not fileid or fileid == ".": + return fileid + + # We default to "/" (forward slash) as the final path sep, since + # that gives us a consistent, cross-platform result. (Windows does + # actually support "/" as a path separator.) Most notably, node IDs + # from pytest use "/" as the path separator by default. + _fileid = fileid.replace(_pathsep, "/") + + relpath = _resolve_relpath( + _fileid, + rootdir, + _pathsep=_pathsep, + # ... + **kwargs + ) + if relpath: # Note that we treat "" here as an absolute path. + _fileid = "./" + relpath + + if normalize: + if strictpathsep: + raise ValueError("cannot normalize *and* keep strict path separator") + _fileid = _str_to_lower(_fileid) + elif strictpathsep: + # We do not use _normcase since we want to preserve capitalization. + _fileid = _fileid.replace("/", _pathsep) + return _fileid + + +############################# +# stdio + + +@contextlib.contextmanager +def _replace_fd(file, target): + """ + Temporarily replace the file descriptor for `file`, + for which sys.stdout or sys.stderr is passed. + """ + try: + fd = file.fileno() + except (AttributeError, io.UnsupportedOperation): + # `file` does not have fileno() so it's been replaced from the + # default sys.stdout, etc. Return with noop. + yield + return + target_fd = target.fileno() + + # Keep the original FD to be restored in the finally clause. + dup_fd = os.dup(fd) + try: + # Point the FD at the target. + os.dup2(target_fd, fd) + try: + yield + finally: + # Point the FD back at the original. + os.dup2(dup_fd, fd) + finally: + os.close(dup_fd) + + +@contextlib.contextmanager +def _replace_stdout(target): + orig = sys.stdout + sys.stdout = target + try: + yield orig + finally: + sys.stdout = orig + + +@contextlib.contextmanager +def _replace_stderr(target): + orig = sys.stderr + sys.stderr = target + try: + yield orig + finally: + sys.stderr = orig + + +if sys.version_info < (3,): + _coerce_unicode = lambda s: unicode(s) +else: + _coerce_unicode = lambda s: s + + +@contextlib.contextmanager +def _temp_io(): + sio = StringIO() + with tempfile.TemporaryFile("r+") as tmp: + try: + yield sio, tmp + finally: + tmp.seek(0) + buff = tmp.read() + sio.write(_coerce_unicode(buff)) + + +@contextlib.contextmanager +def hide_stdio(): + """Swallow stdout and stderr.""" + with _temp_io() as (sio, fileobj): + with _replace_fd(sys.stdout, fileobj): + with _replace_stdout(fileobj): + with _replace_fd(sys.stderr, fileobj): + with _replace_stderr(fileobj): + yield sio + + +############################# +# shell + + +def shlex_unsplit(argv): + """Return the shell-safe string for the given arguments. + + This effectively the equivalent of reversing shlex.split(). + """ + argv = [_quote_arg(a) for a in argv] + return " ".join(argv) + + +try: + from shlex import quote as _quote_arg +except ImportError: + + def _quote_arg(arg): + parts = None + for i, c in enumerate(arg): + if c.isspace(): + pass + elif c == '"': + pass + elif c == "'": + c = "'\"'\"'" + else: + continue + if parts is None: + parts = list(arg) + parts[i] = c + if parts is not None: + arg = "'" + "".join(parts) + "'" + return arg diff --git a/extensions/positron-python/pythonFiles/testing_tools/process_json_util.py b/extensions/positron-python/pythonFiles/testing_tools/process_json_util.py new file mode 100644 index 00000000000..f116b0d9a8f --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/process_json_util.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import io +import json +from typing import List + +CONTENT_LENGTH: str = "Content-Length:" + + +def process_rpc_json(data: str) -> List[str]: + """Process the JSON data which comes from the server.""" + str_stream: io.StringIO = io.StringIO(data) + + length: int = 0 + + while True: + line: str = str_stream.readline() + if CONTENT_LENGTH.lower() in line.lower(): + length = int(line[len(CONTENT_LENGTH) :]) + break + + if not line or line.isspace(): + raise ValueError("Header does not contain Content-Length") + + while True: + line: str = str_stream.readline() + if not line or line.isspace(): + break + + raw_json: str = str_stream.read(length) + return json.loads(raw_json) diff --git a/extensions/positron-python/pythonFiles/testing_tools/run_adapter.py b/extensions/positron-python/pythonFiles/testing_tools/run_adapter.py new file mode 100644 index 00000000000..1eeef194f8f --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/run_adapter.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Replace the "." entry. +import os.path +import sys + +sys.path.insert( + 1, + os.path.dirname( # pythonFiles + os.path.dirname( # pythonFiles/testing_tools + os.path.abspath(__file__) # this file + ) + ), +) + +from testing_tools.adapter.__main__ import parse_args, main + + +if __name__ == "__main__": + tool, cmd, subargs, toolargs = parse_args() + main(tool, cmd, subargs, toolargs) diff --git a/extensions/positron-python/pythonFiles/testing_tools/socket_manager.py b/extensions/positron-python/pythonFiles/testing_tools/socket_manager.py new file mode 100644 index 00000000000..b2afbf0e5a1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/socket_manager.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import socket +import sys + + +class SocketManager(object): + """Create a socket and connect to the given address. + + The address is a (host: str, port: int) tuple. + Example usage: + + ``` + with SocketManager(("localhost", 6767)) as sock: + request = json.dumps(payload) + result = s.socket.sendall(request.encode("utf-8")) + ``` + """ + + def __init__(self, addr): + self.addr = addr + self.socket = None + + def __enter__(self): + return self.connect() + + def __exit__(self, *_): + self.close() + + def connect(self): + self.socket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP + ) + if sys.platform == "win32": + addr_use = socket.SO_EXCLUSIVEADDRUSE + else: + addr_use = socket.SO_REUSEADDR + self.socket.setsockopt(socket.SOL_SOCKET, addr_use, 1) + self.socket.connect(self.addr) + + return self + + def close(self): + if self.socket: + try: + self.socket.shutdown(socket.SHUT_RDWR) + except Exception: + pass + self.socket.close() diff --git a/extensions/positron-python/pythonFiles/testing_tools/unittest_discovery.py b/extensions/positron-python/pythonFiles/testing_tools/unittest_discovery.py new file mode 100644 index 00000000000..2988092c387 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testing_tools/unittest_discovery.py @@ -0,0 +1,65 @@ +import inspect +import os +import sys +import traceback +import unittest + +start_dir = sys.argv[1] +pattern = sys.argv[2] +top_level_dir = sys.argv[3] if len(sys.argv) >= 4 else None +sys.path.insert(0, os.getcwd()) + + +def get_sourceline(obj): + try: + s, n = inspect.getsourcelines(obj) + except: + try: + # this handles `tornado` case we need a better + # way to get to the wrapped function. + # This is a temporary solution + s, n = inspect.getsourcelines(obj.orig_method) + except: + return "*" + + for i, v in enumerate(s): + if v.strip().startswith(("def", "async def")): + return str(n + i) + return "*" + + +def generate_test_cases(suite): + for test in suite: + if isinstance(test, unittest.TestCase): + yield test + else: + for test_case in generate_test_cases(test): + yield test_case + + +try: + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern=pattern, top_level_dir=top_level_dir) + + print("start") # Don't remove this line + loader_errors = [] + for s in generate_test_cases(suite): + tm = getattr(s, s._testMethodName) + testId = s.id() + if testId.startswith("unittest.loader._FailedTest"): + loader_errors.append(s._exception) + else: + print(testId.replace(".", ":") + ":" + get_sourceline(tm)) +except: + print("=== exception start ===") + traceback.print_exc() + print("=== exception end ===") + + +for error in loader_errors: + try: + print("=== exception start ===") + print(error.msg) + print("=== exception end ===") + except: + pass diff --git a/extensions/positron-python/pythonFiles/testlauncher.py b/extensions/positron-python/pythonFiles/testlauncher.py new file mode 100644 index 00000000000..3278815b380 --- /dev/null +++ b/extensions/positron-python/pythonFiles/testlauncher.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import sys + + +def parse_argv(): + """Parses arguments for use with the test launcher. + Arguments are: + 1. Working directory. + 2. Test runner `pytest` + 3. Rest of the arguments are passed into the test runner. + """ + cwd = sys.argv[1] + testRunner = sys.argv[2] + args = sys.argv[3:] + + return (cwd, testRunner, args) + + +def run(cwd, testRunner, args): + """Runs the test + cwd -- the current directory to be set + testRunner -- test runner to be used `pytest` + args -- arguments passed into the test runner + """ + + sys.path[0] = os.getcwd() + os.chdir(cwd) + + try: + if testRunner == "pytest": + import pytest + + pytest.main(args) + sys.exit(0) + finally: + pass + + +if __name__ == "__main__": + cwd, testRunner, args = parse_argv() + run(cwd, testRunner, args) diff --git a/extensions/positron-python/pythonFiles/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/__init__.py new file mode 100644 index 00000000000..4f762cd1f81 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import os.path + +TEST_ROOT = os.path.dirname(__file__) +SRC_ROOT = os.path.dirname(TEST_ROOT) +PROJECT_ROOT = os.path.dirname(SRC_ROOT) +TESTING_TOOLS_ROOT = os.path.join(SRC_ROOT, "testing_tools") +DEBUG_ADAPTER_ROOT = os.path.join(SRC_ROOT, "debug_adapter") + +PYTHONFILES = os.path.join(SRC_ROOT, "lib", "python") diff --git a/extensions/positron-python/pythonFiles/tests/__main__.py b/extensions/positron-python/pythonFiles/tests/__main__.py new file mode 100644 index 00000000000..901385d41d8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/__main__.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import sys + +import pytest + +from . import DEBUG_ADAPTER_ROOT, SRC_ROOT, TEST_ROOT, TESTING_TOOLS_ROOT + + +def parse_args(): + parser = argparse.ArgumentParser() + # To mark a test as functional: (decorator) @pytest.mark.functional + parser.add_argument( + "--functional", dest="markers", action="append_const", const="functional" + ) + parser.add_argument( + "--no-functional", dest="markers", action="append_const", const="not functional" + ) + args, remainder = parser.parse_known_args() + + ns = vars(args) + + if remainder: + for arg in remainder: + if arg.startswith("-") and arg not in ("-v", "--verbose", "-h", "--help"): + specific = False + break + else: + specific = True + else: + specific = False + args.specific = specific + + return ns, remainder + + +def main(pytestargs, markers=None, specific=False): + sys.path.insert(1, TESTING_TOOLS_ROOT) + sys.path.insert(1, DEBUG_ADAPTER_ROOT) + + if not specific: + pytestargs.insert(0, TEST_ROOT) + pytestargs.insert(0, "--rootdir") + pytestargs.insert(1, SRC_ROOT) + for marker in reversed(markers or ()): + pytestargs.insert(0, marker) + pytestargs.insert(0, "-m") + + ec = pytest.main(pytestargs) + return ec + + +if __name__ == "__main__": + mainkwargs, pytestargs = parse_args() + ec = main(pytestargs, **mainkwargs) + sys.exit(ec) diff --git a/extensions/positron-python/pythonFiles/tests/debug_adapter/__init__.py b/extensions/positron-python/pythonFiles/tests/debug_adapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/debug_adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/debug_adapter/test_install_debugpy.py b/extensions/positron-python/pythonFiles/tests/debug_adapter/test_install_debugpy.py new file mode 100644 index 00000000000..8e2ed33a1da --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/debug_adapter/test_install_debugpy.py @@ -0,0 +1,32 @@ +import os +import pytest +import subprocess +import sys + + +def _check_binaries(dir_path): + expected_endswith = ( + "win_amd64.pyd", + "win32.pyd", + "darwin.so", + "x86_64-linux-gnu.so", + ) + + binaries = list(p for p in os.listdir(dir_path) if p.endswith(expected_endswith)) + + assert len(binaries) == len(expected_endswith) + + +def test_install_debugpy(tmpdir): + import install_debugpy + + install_debugpy.main(str(tmpdir)) + dir_path = os.path.join( + str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_bundle" + ) + _check_binaries(dir_path) + + dir_path = os.path.join( + str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_frame_eval" + ) + _check_binaries(dir_path) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/nested_folder_one/test_bottom_folder.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/nested_folder_one/test_bottom_folder.py new file mode 100644 index 00000000000..59738aeba37 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/nested_folder_one/test_bottom_folder.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +# This test's id is dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t. +# This test passes. +def test_bottom_function_t(): # test_marker--test_bottom_function_t + assert True + + +# This test's id is dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f. +# This test fails. +def test_bottom_function_f(): # test_marker--test_bottom_function_f + assert False diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/test_top_folder.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/test_top_folder.py new file mode 100644 index 00000000000..010c54cf446 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/dual_level_nested_folder/test_top_folder.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +# This test's id is dual_level_nested_folder/test_top_folder.py::test_top_function_t. +# This test passes. +def test_top_function_t(): # test_marker--test_top_function_t + assert True + + +# This test's id is dual_level_nested_folder/test_top_folder.py::test_top_function_f. +# This test fails. +def test_top_function_f(): # test_marker--test_top_function_f + assert False diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/empty_discovery.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/empty_discovery.py new file mode 100644 index 00000000000..5f4ea27aec7 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/empty_discovery.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +# This file has no tests in it; the discovery will return an empty list of tests. +def function_function(string): + return string diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py new file mode 100644 index 00000000000..8e48224edf3 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import pytest + + +# This test has an error which will appear on pytest discovery. +# This error is intentional and is meant to test pytest discovery error handling. +@pytest.mark.parametrize("actual,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)]) +def test_function(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt new file mode 100644 index 00000000000..7d65dee2ccc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +@pytest.mark.parametrize("num", range(1, 89)) +def test_odd_even(num): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py new file mode 100644 index 00000000000..2506089abe0 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +@pytest.fixture +def raise_fixture(): + raise Exception("Dummy exception") + + +class TestSomething: + def test_a(self, raise_fixture): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_syntax_discovery.txt b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_syntax_discovery.txt new file mode 100644 index 00000000000..78627fffb35 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/error_syntax_discovery.txt @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# This test has a syntax error. +# This error is intentional and is meant to test pytest discovery error handling. +def test_function() + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/folder_a/folder_b/folder_a/test_nest.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/folder_a/folder_b/folder_a/test_nest.py new file mode 100644 index 00000000000..9ac9f7017f8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/folder_a/folder_b/folder_a/test_nest.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +# This test's id is double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function. +# This test passes. +def test_function(): # test_marker--test_function + assert 1 == 1 diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param1.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param1.py new file mode 100644 index 00000000000..a16d0f49f41 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param1.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import pytest + + +@pytest.mark.parametrize("num", ["a", "b", "c"]) +def test_odd_even(num): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param2.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param2.py new file mode 100644 index 00000000000..c0ea8010e35 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/param_same_name/test_param2.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import pytest + + +@pytest.mark.parametrize("num", range(1, 4)) +def test_odd_even(num): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py new file mode 100644 index 00000000000..c4dbadc32d6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +# Testing pytest with parametrized tests. The first two pass, the third fails. +# The tests ids are parametrize_tests.py::test_adding[3+5-8] and so on. +@pytest.mark.parametrize( # test_marker--test_adding + "actual, expected", [("3+5", 8), ("2+4", 6), ("6+9", 16)] +) +def test_adding(actual, expected): + assert eval(actual) == expected + + +# Testing pytest with parametrized tests. All three pass. +# The tests ids are parametrize_tests.py::test_under_ten[1] and so on. +@pytest.mark.parametrize( # test_marker--test_string + "string", ["hello", "complicated split [] ()"] +) +def test_string(string): + assert string == "hello" diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/pytest.ini b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/pytest.ini new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_a.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_a.py new file mode 100644 index 00000000000..3ec3dd9626c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_a.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +def test_a_function(): # test_marker--test_a_function + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_b.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_b.py new file mode 100644 index 00000000000..0d3148641f8 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/root/tests/test_b.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +def test_b_function(): # test_marker--test_b_function + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/simple_pytest.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/simple_pytest.py new file mode 100644 index 00000000000..9f9bfb014f3 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/simple_pytest.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +# This test passes. +def test_function(): # test_marker--test_function + assert 1 == 1 diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/skip_tests.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/skip_tests.py new file mode 100644 index 00000000000..871b0e7bf5c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/skip_tests.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + +# Testing pytest with skipped tests. The first passes, the second three are skipped. + + +def test_something(): # test_marker--test_something + # This tests passes successfully. + assert 1 + 1 == 2 + + +def test_another_thing(): # test_marker--test_another_thing + # Skip this test with a reason. + pytest.skip("Skipping this test for now") + + +@pytest.mark.skip( + reason="Skipping this test as it requires additional setup" # test_marker--test_complex_thing +) +def test_decorator_thing(): + # Skip this test as well, with a reason. This one uses a decorator. + assert True + + +@pytest.mark.skipif(1 < 5, reason="is always true") # test_marker--test_complex_thing_2 +def test_decorator_thing_2(): + # Skip this test as well, with a reason. This one uses a decorator with a condition. + assert True + + +# With this test, the entire class is skipped. +@pytest.mark.skip(reason="Skip TestClass") +class TestClass: + def test_class_function_a(self): # test_marker--test_class_function_a + assert True + + def test_class_function_b(self): # test_marker--test_class_function_b + assert False diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_env_vars.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_env_vars.py new file mode 100644 index 00000000000..c8a3add5676 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_env_vars.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os + + +def test_clear_env(monkeypatch): + # Clear all environment variables + monkeypatch.setattr(os, "environ", {}) + + # Now os.environ should be empty + assert not os.environ + + # After the test finishes, the environment variables will be reset to their original state + + +def test_check_env(): + # This test will have access to the original environment variables + assert "PATH" in os.environ + + +def test_clear_env_unsafe(): + # Clear all environment variables + os.environ.clear() + # Now os.environ should be empty + assert not os.environ + + +def test_check_env_unsafe(): + # ("PATH" in os.environ) is False here if it runs after test_clear_env_unsafe. + # Regardless, this test will pass and TEST_PORT and TEST_UUID will still be set correctly + assert "PATH" not in os.environ diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_logging.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_logging.py new file mode 100644 index 00000000000..058ad807571 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_logging.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import logging +import sys + + +def test_logging2(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) + assert False + + +def test_logging(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py new file mode 100644 index 00000000000..209f9d51915 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class TestFirstClass: + class TestSecondClass: + def test_second(self): # test_marker--test_second + assert 1 == 2 + + def test_first(self): # test_marker--test_first + assert 1 == 2 + + class TestSecondClass2: + def test_second2(self): # test_marker--test_second2 + assert 1 == 1 + + +def test_independent(): # test_marker--test_independent + assert 1 == 1 diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/text_docstring.txt b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/text_docstring.txt new file mode 100644 index 00000000000..b29132c10b5 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/text_docstring.txt @@ -0,0 +1,4 @@ +This is a doctest test which passes #test_marker--text_docstring.txt +>>> x = 3 +>>> x +3 diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_add.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_add.py new file mode 100644 index 00000000000..e9bdda0ad2a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_add.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + + +def add(a, b): + return a + b + + +class TestAddFunction(unittest.TestCase): + # This test's id is unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers. + # This test passes. + def test_add_positive_numbers(self): # test_marker--test_add_positive_numbers + result = add(2, 3) + self.assertEqual(result, 5) + + # This test's id is unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers. + # This test passes. + def test_add_negative_numbers(self): # test_marker--test_add_negative_numbers + result = add(-2, -3) + self.assertEqual(result, -5) + + +class TestDuplicateFunction(unittest.TestCase): + # This test's id is unittest_folder/test_subtract.py::TestDuplicateFunction::test_dup_a. It has the same class name as + # another test, but it's in a different file, so it should not be confused. + # This test passes. + def test_dup_a(self): # test_marker--test_dup_a + self.assertEqual(1, 1) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py new file mode 100644 index 00000000000..634a6d81f9e --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + + +def subtract(a, b): + return a - b + + +class TestSubtractFunction(unittest.TestCase): + # This test's id is unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers. + # This test passes. + def test_subtract_positive_numbers( # test_marker--test_subtract_positive_numbers + self, + ): + result = subtract(5, 3) + self.assertEqual(result, 2) + + # This test's id is unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers. + # This test passes. + def test_subtract_negative_numbers( # test_marker--test_subtract_negative_numbers + self, + ): + result = subtract(-2, -3) + # This is intentional to test assertion failures + self.assertEqual(result, 100000) + + +class TestDuplicateFunction(unittest.TestCase): + # This test's id is unittest_folder/test_subtract.py::TestDuplicateFunction::test_dup_s. It has the same class name as + # another test, but it's in a different file, so it should not be confused. + # This test passes. + def test_dup_s(self): # test_marker--test_dup_s + self.assertEqual(1, 1) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_pytest_same_file.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_pytest_same_file.py new file mode 100644 index 00000000000..ac66779b9cb --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_pytest_same_file.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class TestExample(unittest.TestCase): + # This test's id is unittest_pytest_same_file.py::TestExample::test_true_unittest. + # Test type is unittest and this test passes. + def test_true_unittest(self): # test_marker--test_true_unittest + assert True + + +# This test's id is unittest_pytest_same_file.py::test_true_pytest. +# Test type is pytest and this test passes. +def test_true_pytest(): # test_marker--test_true_pytest + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_skiptest_file_level.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_skiptest_file_level.py new file mode 100644 index 00000000000..362c74cbb76 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/.data/unittest_skiptest_file_level.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest +from unittest import SkipTest + +# Due to the skip at the file level, no tests will be discovered. +raise SkipTest("Skip all tests in this file, they should not be recognized by pytest.") + + +class SimpleTest(unittest.TestCase): + def testadd1(self): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/__init__.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py new file mode 100644 index 00000000000..7fbb0c5c43e --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py @@ -0,0 +1,1071 @@ +import os + + +from .helpers import TEST_DATA_PATH, find_test_line_number, get_absolute_test_id + +# This file contains the expected output dictionaries for tests discovery and is used in test_discovery.py. + +# This is the expected output for the empty_discovery.py file. +# └── +TEST_DATA_PATH_STR = os.fspath(TEST_DATA_PATH) +empty_discovery_pytest_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the simple_pytest.py file. +# └── simple_pytest.py +# └── test_function +simple_test_file_path = TEST_DATA_PATH / "simple_pytest.py" +simple_discovery_pytest_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "simple_pytest.py", + "path": os.fspath(simple_test_file_path), + "type_": "file", + "id_": os.fspath(simple_test_file_path), + "children": [ + { + "name": "test_function", + "path": os.fspath(simple_test_file_path), + "lineno": find_test_line_number( + "test_function", + simple_test_file_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "simple_pytest.py::test_function", simple_test_file_path + ), + "runID": get_absolute_test_id( + "simple_pytest.py::test_function", simple_test_file_path + ), + } + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the unittest_pytest_same_file.py file. +# ├── unittest_pytest_same_file.py +# ├── TestExample +# │ └── test_true_unittest +# └── test_true_pytest +unit_pytest_same_file_path = TEST_DATA_PATH / "unittest_pytest_same_file.py" +unit_pytest_same_file_discovery_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "unittest_pytest_same_file.py", + "path": os.fspath(unit_pytest_same_file_path), + "type_": "file", + "id_": os.fspath(unit_pytest_same_file_path), + "children": [ + { + "name": "TestExample", + "path": os.fspath(unit_pytest_same_file_path), + "type_": "class", + "children": [ + { + "name": "test_true_unittest", + "path": os.fspath(unit_pytest_same_file_path), + "lineno": find_test_line_number( + "test_true_unittest", + os.fspath(unit_pytest_same_file_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ), + "runID": get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ), + } + ], + "id_": "unittest_pytest_same_file.py::TestExample", + }, + { + "name": "test_true_pytest", + "path": os.fspath(unit_pytest_same_file_path), + "lineno": find_test_line_number( + "test_true_pytest", + unit_pytest_same_file_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", + unit_pytest_same_file_path, + ), + "runID": get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", + unit_pytest_same_file_path, + ), + }, + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the unittest_skip_file_level test. +# └── unittest_skiptest_file_level.py +unittest_skip_file_level_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the unittest_folder tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers +# │ └── test_add_positive_numbers +# │ └── TestDuplicateFunction +# │ └── test_dup_a +# └── test_subtract.py +# └── TestSubtractFunction +# ├── test_subtract_negative_numbers +# └── test_subtract_positive_numbers +# │ └── TestDuplicateFunction +# │ └── test_dup_s +unittest_folder_path = TEST_DATA_PATH / "unittest_folder" +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +unittest_folder_discovery_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "unittest_folder", + "path": os.fspath(unittest_folder_path), + "type_": "folder", + "id_": os.fspath(unittest_folder_path), + "children": [ + { + "name": "test_add.py", + "path": os.fspath(test_add_path), + "type_": "file", + "id_": os.fspath(test_add_path), + "children": [ + { + "name": "TestAddFunction", + "path": os.fspath(test_add_path), + "type_": "class", + "children": [ + { + "name": "test_add_negative_numbers", + "path": os.fspath(test_add_path), + "lineno": find_test_line_number( + "test_add_negative_numbers", + os.fspath(test_add_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + test_add_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + test_add_path, + ), + }, + { + "name": "test_add_positive_numbers", + "path": os.fspath(test_add_path), + "lineno": find_test_line_number( + "test_add_positive_numbers", + os.fspath(test_add_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + test_add_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + test_add_path, + ), + }, + ], + "id_": "unittest_folder/test_add.py::TestAddFunction", + }, + { + "name": "TestDuplicateFunction", + "path": os.fspath(test_add_path), + "type_": "class", + "children": [ + { + "name": "test_dup_a", + "path": os.fspath(test_add_path), + "lineno": find_test_line_number( + "test_dup_a", + os.fspath(test_add_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_add.py::TestDuplicateFunction::test_dup_a", + test_add_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_add.py::TestDuplicateFunction::test_dup_a", + test_add_path, + ), + }, + ], + "id_": "unittest_folder/test_add.py::TestDuplicateFunction", + }, + ], + }, + { + "name": "test_subtract.py", + "path": os.fspath(test_subtract_path), + "type_": "file", + "id_": os.fspath(test_subtract_path), + "children": [ + { + "name": "TestSubtractFunction", + "path": os.fspath(test_subtract_path), + "type_": "class", + "children": [ + { + "name": "test_subtract_negative_numbers", + "path": os.fspath(test_subtract_path), + "lineno": find_test_line_number( + "test_subtract_negative_numbers", + os.fspath(test_subtract_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + test_subtract_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + test_subtract_path, + ), + }, + { + "name": "test_subtract_positive_numbers", + "path": os.fspath(test_subtract_path), + "lineno": find_test_line_number( + "test_subtract_positive_numbers", + os.fspath(test_subtract_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + test_subtract_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + test_subtract_path, + ), + }, + ], + "id_": "unittest_folder/test_subtract.py::TestSubtractFunction", + }, + { + "name": "TestDuplicateFunction", + "path": os.fspath(test_subtract_path), + "type_": "class", + "children": [ + { + "name": "test_dup_s", + "path": os.fspath(test_subtract_path), + "lineno": find_test_line_number( + "test_dup_s", + os.fspath(test_subtract_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestDuplicateFunction::test_dup_s", + test_subtract_path, + ), + "runID": get_absolute_test_id( + "unittest_folder/test_subtract.py::TestDuplicateFunction::test_dup_s", + test_subtract_path, + ), + }, + ], + "id_": "unittest_folder/test_subtract.py::TestDuplicateFunction", + }, + ], + }, + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + + +# This is the expected output for the dual_level_nested_folder tests +# └── dual_level_nested_folder +# └── test_top_folder.py +# └── test_top_function_t +# └── test_top_function_f +# └── nested_folder_one +# └── test_bottom_folder.py +# └── test_bottom_function_t +# └── test_bottom_function_f +dual_level_nested_folder_path = TEST_DATA_PATH / "dual_level_nested_folder" +test_top_folder_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) + +test_nested_folder_one_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "nested_folder_one" +) + +test_bottom_folder_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) + + +dual_level_nested_folder_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "dual_level_nested_folder", + "path": os.fspath(dual_level_nested_folder_path), + "type_": "folder", + "id_": os.fspath(dual_level_nested_folder_path), + "children": [ + { + "name": "test_top_folder.py", + "path": os.fspath(test_top_folder_path), + "type_": "file", + "id_": os.fspath(test_top_folder_path), + "children": [ + { + "name": "test_top_function_t", + "path": os.fspath(test_top_folder_path), + "lineno": find_test_line_number( + "test_top_function_t", + test_top_folder_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + test_top_folder_path, + ), + "runID": get_absolute_test_id( + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + test_top_folder_path, + ), + }, + { + "name": "test_top_function_f", + "path": os.fspath(test_top_folder_path), + "lineno": find_test_line_number( + "test_top_function_f", + test_top_folder_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + test_top_folder_path, + ), + "runID": get_absolute_test_id( + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + test_top_folder_path, + ), + }, + ], + }, + { + "name": "nested_folder_one", + "path": os.fspath(test_nested_folder_one_path), + "type_": "folder", + "id_": os.fspath(test_nested_folder_one_path), + "children": [ + { + "name": "test_bottom_folder.py", + "path": os.fspath(test_bottom_folder_path), + "type_": "file", + "id_": os.fspath(test_bottom_folder_path), + "children": [ + { + "name": "test_bottom_function_t", + "path": os.fspath(test_bottom_folder_path), + "lineno": find_test_line_number( + "test_bottom_function_t", + test_bottom_folder_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + test_bottom_folder_path, + ), + "runID": get_absolute_test_id( + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + test_bottom_folder_path, + ), + }, + { + "name": "test_bottom_function_f", + "path": os.fspath(test_bottom_folder_path), + "lineno": find_test_line_number( + "test_bottom_function_f", + test_bottom_folder_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + test_bottom_folder_path, + ), + "runID": get_absolute_test_id( + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + test_bottom_folder_path, + ), + }, + ], + } + ], + }, + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the double_nested_folder tests. +# └── folder_a +# └── folder_b +# └── folder_a +# └── test_nest.py +# └── test_function + +folder_a_path = TEST_DATA_PATH / "folder_a" +folder_b_path = TEST_DATA_PATH / "folder_a" / "folder_b" +folder_a_nested_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" +test_nest_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +double_nested_folder_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "folder_a", + "path": os.fspath(folder_a_path), + "type_": "folder", + "id_": os.fspath(folder_a_path), + "children": [ + { + "name": "folder_b", + "path": os.fspath(folder_b_path), + "type_": "folder", + "id_": os.fspath(folder_b_path), + "children": [ + { + "name": "folder_a", + "path": os.fspath(folder_a_nested_path), + "type_": "folder", + "id_": os.fspath(folder_a_nested_path), + "children": [ + { + "name": "test_nest.py", + "path": os.fspath(test_nest_path), + "type_": "file", + "id_": os.fspath(test_nest_path), + "children": [ + { + "name": "test_function", + "path": os.fspath(test_nest_path), + "lineno": find_test_line_number( + "test_function", + test_nest_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", + test_nest_path, + ), + "runID": get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", + test_nest_path, + ), + } + ], + } + ], + } + ], + } + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the nested_folder tests. +# └── parametrize_tests.py +# └── test_adding +# └── [3+5-8] +# └── [2+4-6] +# └── [6+9-16] +parameterize_tests_path = TEST_DATA_PATH / "parametrize_tests.py" +parametrize_tests_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "parametrize_tests.py", + "path": os.fspath(parameterize_tests_path), + "type_": "file", + "id_": os.fspath(parameterize_tests_path), + "children": [ + { + "name": "test_adding", + "path": os.fspath(parameterize_tests_path), + "type_": "function", + "id_": "parametrize_tests.py::test_adding", + "children": [ + { + "name": "[3+5-8]", + "path": os.fspath(parameterize_tests_path), + "lineno": find_test_line_number( + "test_adding[3+5-8]", + parameterize_tests_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", + parameterize_tests_path, + ), + "runID": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", + parameterize_tests_path, + ), + }, + { + "name": "[2+4-6]", + "path": os.fspath(parameterize_tests_path), + "lineno": find_test_line_number( + "test_adding[2+4-6]", + parameterize_tests_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", + parameterize_tests_path, + ), + "runID": get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", + parameterize_tests_path, + ), + }, + { + "name": "[6+9-16]", + "path": os.fspath(parameterize_tests_path), + "lineno": find_test_line_number( + "test_adding[6+9-16]", + parameterize_tests_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", + parameterize_tests_path, + ), + "runID": get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", + parameterize_tests_path, + ), + }, + ], + }, + { + "name": "test_string", + "path": os.fspath(parameterize_tests_path), + "type_": "function", + "children": [ + { + "name": "[hello]", + "path": os.fspath(parameterize_tests_path), + "lineno": find_test_line_number( + "test_string[hello]", + parameterize_tests_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "parametrize_tests.py::test_string[hello]", + parameterize_tests_path, + ), + "runID": get_absolute_test_id( + "parametrize_tests.py::test_string[hello]", + parameterize_tests_path, + ), + }, + { + "name": "[complicated split [] ()]", + "path": os.fspath(parameterize_tests_path), + "lineno": find_test_line_number( + "test_string[1]", + parameterize_tests_path, + ), + "type_": "test", + "id_": get_absolute_test_id( + "parametrize_tests.py::test_string[complicated split [] ()]", + parameterize_tests_path, + ), + "runID": get_absolute_test_id( + "parametrize_tests.py::test_string[complicated split [] ()]", + parameterize_tests_path, + ), + }, + ], + "id_": "parametrize_tests.py::test_string", + }, + ], + }, + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the text_docstring.txt tests. +# └── text_docstring.txt +text_docstring_path = TEST_DATA_PATH / "text_docstring.txt" +doctest_pytest_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "text_docstring.txt", + "path": os.fspath(text_docstring_path), + "type_": "file", + "id_": os.fspath(text_docstring_path), + "children": [ + { + "name": "text_docstring.txt", + "path": os.fspath(text_docstring_path), + "lineno": find_test_line_number( + "text_docstring.txt", + os.fspath(text_docstring_path), + ), + "type_": "test", + "id_": get_absolute_test_id( + "text_docstring.txt::text_docstring.txt", text_docstring_path + ), + "runID": get_absolute_test_id( + "text_docstring.txt::text_docstring.txt", text_docstring_path + ), + } + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +# This is the expected output for the param_same_name tests. +# └── param_same_name +# └── test_param1.py +# └── test_odd_even +# └── [a] +# └── [b] +# └── [c] +# └── test_param2.py +# └── test_odd_even +# └── [1] +# └── [2] +# └── [3] +param1_path = TEST_DATA_PATH / "param_same_name" / "test_param1.py" +param2_path = TEST_DATA_PATH / "param_same_name" / "test_param2.py" +param_same_name_expected_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "param_same_name", + "path": os.fspath(TEST_DATA_PATH / "param_same_name"), + "type_": "folder", + "id_": os.fspath(TEST_DATA_PATH / "param_same_name"), + "children": [ + { + "name": "test_param1.py", + "path": os.fspath(param1_path), + "type_": "file", + "id_": os.fspath(param1_path), + "children": [ + { + "name": "test_odd_even", + "path": os.fspath(param1_path), + "type_": "function", + "children": [ + { + "name": "[a]", + "path": os.fspath(param1_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[a]", + param1_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[a]", + param1_path, + ), + }, + { + "name": "[b]", + "path": os.fspath(param1_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[b]", + param1_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[b]", + param1_path, + ), + }, + { + "name": "[c]", + "path": os.fspath(param1_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[c]", + param1_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param1.py::test_odd_even[c]", + param1_path, + ), + }, + ], + "id_": "param_same_name/test_param1.py::test_odd_even", + } + ], + }, + { + "name": "test_param2.py", + "path": os.fspath(param2_path), + "type_": "file", + "id_": os.fspath(param2_path), + "children": [ + { + "name": "test_odd_even", + "path": os.fspath(param2_path), + "type_": "function", + "children": [ + { + "name": "[1]", + "path": os.fspath(param2_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[1]", + param2_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[1]", + param2_path, + ), + }, + { + "name": "[2]", + "path": os.fspath(param2_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[2]", + param2_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[2]", + param2_path, + ), + }, + { + "name": "[3]", + "path": os.fspath(param2_path), + "lineno": "6", + "type_": "test", + "id_": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[3]", + param2_path, + ), + "runID": get_absolute_test_id( + "param_same_name/test_param2.py::test_odd_even[3]", + param2_path, + ), + }, + ], + "id_": "param_same_name/test_param2.py::test_odd_even", + } + ], + }, + ], + } + ], + "id_": TEST_DATA_PATH_STR, +} + +tests_path = TEST_DATA_PATH / "root" / "tests" +tests_a_path = TEST_DATA_PATH / "root" / "tests" / "test_a.py" +tests_b_path = TEST_DATA_PATH / "root" / "tests" / "test_b.py" +# This is the expected output for the root folder tests. +# └── tests +# └── test_a.py +# └── test_a_function +# └── test_b.py +# └── test_b_function +root_with_config_expected_output = { + "name": "tests", + "path": os.fspath(tests_path), + "type_": "folder", + "children": [ + { + "name": "test_a.py", + "path": os.fspath(tests_a_path), + "type_": "file", + "id_": os.fspath(tests_a_path), + "children": [ + { + "name": "test_a_function", + "path": os.fspath(os.path.join(tests_path, "test_a.py")), + "lineno": find_test_line_number( + "test_a_function", + os.path.join(tests_path, "test_a.py"), + ), + "type_": "test", + "id_": get_absolute_test_id( + "tests/test_a.py::test_a_function", tests_a_path + ), + "runID": get_absolute_test_id( + "tests/test_a.py::test_a_function", tests_a_path + ), + } + ], + }, + { + "name": "test_b.py", + "path": os.fspath(tests_b_path), + "type_": "file", + "id_": os.fspath(tests_b_path), + "children": [ + { + "name": "test_b_function", + "path": os.fspath(os.path.join(tests_path, "test_b.py")), + "lineno": find_test_line_number( + "test_b_function", + os.path.join(tests_path, "test_b.py"), + ), + "type_": "test", + "id_": get_absolute_test_id( + "tests/test_b.py::test_b_function", tests_b_path + ), + "runID": get_absolute_test_id( + "tests/test_b.py::test_b_function", tests_b_path + ), + } + ], + }, + ], + "id_": os.fspath(tests_path), +} +TEST_MULTI_CLASS_NEST_PATH = TEST_DATA_PATH / "test_multi_class_nest.py" + +nested_classes_expected_test_output = { + "name": ".data", + "path": TEST_DATA_PATH_STR, + "type_": "folder", + "children": [ + { + "name": "test_multi_class_nest.py", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "type_": "file", + "id_": str(TEST_MULTI_CLASS_NEST_PATH), + "children": [ + { + "name": "TestFirstClass", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "type_": "class", + "id_": "test_multi_class_nest.py::TestFirstClass", + "children": [ + { + "name": "TestSecondClass", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "type_": "class", + "id_": "test_multi_class_nest.py::TestFirstClass::TestSecondClass", + "children": [ + { + "name": "test_second", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "lineno": find_test_line_number( + "test_second", + str(TEST_MULTI_CLASS_NEST_PATH), + ), + "type_": "test", + "id_": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::TestSecondClass::test_second", + TEST_MULTI_CLASS_NEST_PATH, + ), + "runID": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::TestSecondClass::test_second", + TEST_MULTI_CLASS_NEST_PATH, + ), + } + ], + }, + { + "name": "test_first", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "lineno": find_test_line_number( + "test_first", str(TEST_MULTI_CLASS_NEST_PATH) + ), + "type_": "test", + "id_": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::test_first", + TEST_MULTI_CLASS_NEST_PATH, + ), + "runID": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::test_first", + TEST_MULTI_CLASS_NEST_PATH, + ), + }, + { + "name": "TestSecondClass2", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "type_": "class", + "id_": "test_multi_class_nest.py::TestFirstClass::TestSecondClass2", + "children": [ + { + "name": "test_second2", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "lineno": find_test_line_number( + "test_second2", + str(TEST_MULTI_CLASS_NEST_PATH), + ), + "type_": "test", + "id_": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::TestSecondClass2::test_second2", + TEST_MULTI_CLASS_NEST_PATH, + ), + "runID": get_absolute_test_id( + "test_multi_class_nest.py::TestFirstClass::TestSecondClass2::test_second2", + TEST_MULTI_CLASS_NEST_PATH, + ), + } + ], + }, + ], + }, + { + "name": "test_independent", + "path": str(TEST_MULTI_CLASS_NEST_PATH), + "lineno": find_test_line_number( + "test_independent", str(TEST_MULTI_CLASS_NEST_PATH) + ), + "type_": "test", + "id_": get_absolute_test_id( + "test_multi_class_nest.py::test_independent", + TEST_MULTI_CLASS_NEST_PATH, + ), + "runID": get_absolute_test_id( + "test_multi_class_nest.py::test_independent", + TEST_MULTI_CLASS_NEST_PATH, + ), + }, + ], + } + ], + "id_": str(TEST_DATA_PATH), +} +SYMLINK_FOLDER_PATH = TEST_DATA_PATH / "symlink_folder" +SYMLINK_FOLDER_PATH_TESTS = TEST_DATA_PATH / "symlink_folder" / "tests" +SYMLINK_FOLDER_PATH_TESTS_TEST_A = ( + TEST_DATA_PATH / "symlink_folder" / "tests" / "test_a.py" +) +SYMLINK_FOLDER_PATH_TESTS_TEST_B = ( + TEST_DATA_PATH / "symlink_folder" / "tests" / "test_b.py" +) + +symlink_expected_discovery_output = { + "name": "symlink_folder", + "path": str(SYMLINK_FOLDER_PATH), + "type_": "folder", + "children": [ + { + "name": "tests", + "path": str(SYMLINK_FOLDER_PATH_TESTS), + "type_": "folder", + "id_": str(SYMLINK_FOLDER_PATH_TESTS), + "children": [ + { + "name": "test_a.py", + "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_A), + "type_": "file", + "id_": str(SYMLINK_FOLDER_PATH_TESTS_TEST_A), + "children": [ + { + "name": "test_a_function", + "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_A), + "lineno": find_test_line_number( + "test_a_function", + os.path.join(tests_path, "test_a.py"), + ), + "type_": "test", + "id_": get_absolute_test_id( + "tests/test_a.py::test_a_function", + SYMLINK_FOLDER_PATH_TESTS_TEST_A, + ), + "runID": get_absolute_test_id( + "tests/test_a.py::test_a_function", + SYMLINK_FOLDER_PATH_TESTS_TEST_A, + ), + } + ], + }, + { + "name": "test_b.py", + "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_B), + "type_": "file", + "id_": str(SYMLINK_FOLDER_PATH_TESTS_TEST_B), + "children": [ + { + "name": "test_b_function", + "path": str(SYMLINK_FOLDER_PATH_TESTS_TEST_B), + "lineno": find_test_line_number( + "test_b_function", + os.path.join(tests_path, "test_b.py"), + ), + "type_": "test", + "id_": get_absolute_test_id( + "tests/test_b.py::test_b_function", + SYMLINK_FOLDER_PATH_TESTS_TEST_B, + ), + "runID": get_absolute_test_id( + "tests/test_b.py::test_b_function", + SYMLINK_FOLDER_PATH_TESTS_TEST_B, + ), + } + ], + }, + ], + } + ], + "id_": str(SYMLINK_FOLDER_PATH), +} diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_execution_test_output.py new file mode 100644 index 00000000000..db4e493c3da --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -0,0 +1,698 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +from .helpers import TEST_DATA_PATH, get_absolute_test_id + +TEST_SUBTRACT_FUNCTION = "unittest_folder/test_subtract.py::TestSubtractFunction::" +TEST_ADD_FUNCTION = "unittest_folder/test_add.py::TestAddFunction::" +SUCCESS = "success" +FAILURE = "failure" + +# This is the expected output for the unittest_folder execute tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# ├── test_subtract_negative_numbers: failure +# └── test_subtract_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +uf_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ), + "outcome": FAILURE, + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder only execute add.py tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_single_file_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder execute only signle method +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +uf_single_method_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the unittest_folder tests run where two tests +# run are in different files. +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# └── test_subtract_positive_numbers: success +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_non_adjacent_tests_execution_expected_output = { + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", test_subtract_path + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the simple_pytest.py file. +# └── simple_pytest.py +# └── test_function: success +simple_pytest_path = TEST_DATA_PATH / "unittest_folder" / "simple_pytest.py" + +simple_execution_pytest_expected_output = { + get_absolute_test_id("test_function", simple_pytest_path): { + "test": get_absolute_test_id("test_function", simple_pytest_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the unittest_pytest_same_file.py file. +# ├── unittest_pytest_same_file.py +# ├── TestExample +# │ └── test_true_unittest: success +# └── test_true_pytest: success +unit_pytest_same_file_path = TEST_DATA_PATH / "unittest_pytest_same_file.py" +unit_pytest_same_file_execution_expected_output = { + get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", unit_pytest_same_file_path + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the error_raised_exception.py file. +# └── error_raise_exception.py +# ├── TestSomething +# │ └── test_a: failure +error_raised_exception_path = TEST_DATA_PATH / "error_raise_exception.py" +error_raised_exception_execution_expected_output = { + get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", error_raised_exception_path + ): { + "test": get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", + error_raised_exception_path, + ), + "outcome": "error", + "message": "ERROR MESSAGE", + "traceback": "TRACEBACK", + "subtest": None, + } +} + +# This is the expected output for the skip_tests.py file. +# └── test_something: success +# └── test_another_thing: skipped +# └── test_decorator_thing: skipped +# └── test_decorator_thing_2: skipped +# ├── TestClass +# │ └── test_class_function_a: skipped +# │ └── test_class_function_b: skipped + +skip_tests_path = TEST_DATA_PATH / "skip_tests.py" +skip_tests_execution_expected_output = { + get_absolute_test_id("skip_tests.py::test_something", skip_tests_path): { + "test": get_absolute_test_id("skip_tests.py::test_something", skip_tests_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_another_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_another_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing_2", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing_2", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the dual_level_nested_folder.py tests +# └── dual_level_nested_folder +# └── test_top_folder.py +# └── test_top_function_t: success +# └── test_top_function_f: failure +# └── nested_folder_one +# └── test_bottom_folder.py +# └── test_bottom_function_t: success +# └── test_bottom_function_f: failure +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +dual_level_nested_folder_execution_expected_output = { + get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the nested_folder tests. +# └── folder_a +# └── folder_b +# └── folder_a +# └── test_nest.py +# └── test_function: success + +nested_folder_path = ( + TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +) +double_nested_folder_expected_execution_output = { + get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ): { + "test": get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} +# This is the expected output for the nested_folder tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +# └── test_adding[2+4-6]: success +# └── test_adding[6+9-16]: failure +parametrize_tests_path = TEST_DATA_PATH / "parametrize_tests.py" + +parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +single_parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── text_docstring.txt +# └── text_docstring: success +doc_test_path = TEST_DATA_PATH / "text_docstring.txt" +doctest_pytest_expected_execution_output = { + get_absolute_test_id("text_docstring.txt::text_docstring.txt", doc_test_path): { + "test": get_absolute_test_id( + "text_docstring.txt::text_docstring.txt", doc_test_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# Will run all tests in the cwd that fit the test file naming pattern. +folder_a_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +unittest_folder_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +unittest_folder_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" + +no_test_ids_pytest_execution_expected_output = { + get_absolute_test_id("test_function", folder_a_path): { + "test": get_absolute_test_id("test_function", folder_a_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_t", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_f", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the root folder with the config file referenced. +# └── test_a.py +# └── test_a_function: success +test_add_path = TEST_DATA_PATH / "root" / "tests" / "test_a.py" +config_file_pytest_expected_execution_output = { + get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path): { + "test": get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the test logging file. +# └── test_logging.py +# └── test_logging2: failure +# └── test_logging: success +test_logging_path = TEST_DATA_PATH / "test_logging.py" + +logging_test_expected_execution_output = { + get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging2", test_logging_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_logging.py::test_logging", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging", test_logging_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test safe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env: success +# └── test_check_env: success + +test_safe_clear_env_vars_path = TEST_DATA_PATH / "test_env_vars.py" +safe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test unsafe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env_unsafe: success +# └── test_check_env_unsafe: success +unsafe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# Constant for the symlink execution test where TEST_DATA_PATH / "root" the target and TEST_DATA_PATH / "symlink_folder" the symlink +test_a_symlink_path = TEST_DATA_PATH / "symlink_folder" / "tests" / "test_a.py" +symlink_run_expected_execution_output = { + get_absolute_test_id("test_a.py::test_a_function", test_a_symlink_path): { + "test": get_absolute_test_id("test_a.py::test_a_function", test_a_symlink_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/helpers.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/helpers.py new file mode 100644 index 00000000000..a3ed21cc553 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/helpers.py @@ -0,0 +1,231 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import contextlib +import io +import json +import os +import pathlib +import socket +import subprocess +import sys +import threading +import uuid +from typing import Any, Dict, List, Optional, Tuple + +script_dir = pathlib.Path(__file__).parent.parent.parent +sys.path.append(os.fspath(script_dir)) +sys.path.append(os.fspath(script_dir / "lib" / "python")) + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" +from typing_extensions import TypedDict + + +def get_absolute_test_id(test_id: str, testPath: pathlib.Path) -> str: + split_id = test_id.split("::")[1:] + absolute_test_id = "::".join([str(testPath), *split_id]) + print("absolute path", absolute_test_id) + return absolute_test_id + + +@contextlib.contextmanager +def create_symlink(root: pathlib.Path, target_ext: str, destination_ext: str): + try: + destination = root / destination_ext + target = root / target_ext + if destination.exists(): + print("destination already exists", destination) + try: + destination.symlink_to(target) + except Exception as e: + print("error occurred when attempting to create a symlink", e) + yield target, destination + finally: + destination.unlink() + print("destination unlinked", destination) + + +def create_server( + host: str = "127.0.0.1", + port: int = 0, + backlog: int = socket.SOMAXCONN, + timeout: int = 1000, +) -> socket.socket: + """Return a local server socket listening on the given port.""" + server: socket.socket = _new_sock() + if port: + # If binding to a specific port, make sure that the user doesn't have + # to wait until the OS times out waiting for socket in order to use + # that port again if the server or the adapter crash or are force-killed. + if sys.platform == "win32": + server.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + else: + try: + server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except (AttributeError, OSError): + pass # Not available everywhere + server.bind((host, port)) + if timeout: + server.settimeout(timeout) + server.listen(backlog) + return server + + +def _new_sock() -> socket.socket: + sock: socket.socket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP + ) + options = [ + ("SOL_SOCKET", "SO_KEEPALIVE", 1), + ("IPPROTO_TCP", "TCP_KEEPIDLE", 1), + ("IPPROTO_TCP", "TCP_KEEPINTVL", 3), + ("IPPROTO_TCP", "TCP_KEEPCNT", 5), + ] + + for level, name, value in options: + try: + sock.setsockopt(getattr(socket, level), getattr(socket, name), value) + except (AttributeError, OSError): + pass # May not be available everywhere. + + return sock + + +CONTENT_LENGTH: str = "Content-Length:" +Env_Dict = TypedDict( + "Env_Dict", {"TEST_UUID": str, "TEST_PORT": str, "PYTHONPATH": str} +) + + +def process_rpc_message(data: str) -> Tuple[Dict[str, Any], str]: + """Process the JSON data which comes from the server which runs the pytest discovery.""" + str_stream: io.StringIO = io.StringIO(data) + + length: int = 0 + + while True: + line: str = str_stream.readline() + if CONTENT_LENGTH.lower() in line.lower(): + length = int(line[len(CONTENT_LENGTH) :]) + break + + if not line or line.isspace(): + raise ValueError("Header does not contain Content-Length") + + while True: + line: str = str_stream.readline() + if not line or line.isspace(): + break + + raw_json: str = str_stream.read(length) + return json.loads(raw_json), str_stream.read() + + +def process_rpc_json(data: str) -> List[Dict[str, Any]]: + """Process the JSON data which comes from the server which runs the pytest discovery.""" + json_messages = [] + remaining = data + while remaining: + json_data, remaining = process_rpc_message(remaining) + json_messages.append(json_data) + + return json_messages + + +def runner(args: List[str]) -> Optional[List[Dict[str, Any]]]: + """Run the pytest discovery and return the JSON data from the server.""" + return runner_with_cwd(args, TEST_DATA_PATH) + + +def runner_with_cwd( + args: List[str], path: pathlib.Path +) -> Optional[List[Dict[str, Any]]]: + """Run the pytest discovery and return the JSON data from the server.""" + process_args: List[str] = [ + sys.executable, + "-m", + "pytest", + "-p", + "vscode_pytest", + "-s", + ] + args + listener: socket.socket = create_server() + _, port = listener.getsockname() + listener.listen() + + env = os.environ.copy() + env.update( + { + "TEST_UUID": str(uuid.uuid4()), + "TEST_PORT": str(port), + "PYTHONPATH": os.fspath(pathlib.Path(__file__).parent.parent.parent), + } + ) + completed = threading.Event() + + result = [] + t1: threading.Thread = threading.Thread( + target=_listen_on_socket, args=(listener, result, completed) + ) + t1.start() + + t2 = threading.Thread( + target=_run_test_code, + args=(process_args, env, path, completed), + ) + t2.start() + + t1.join() + t2.join() + + return process_rpc_json(result[0]) if result else None + + +def _listen_on_socket( + listener: socket.socket, result: List[str], completed: threading.Event +): + """Listen on the socket for the JSON data from the server. + Created as a separate function for clarity in threading. + """ + sock, (other_host, other_port) = listener.accept() + listener.settimeout(1) + all_data: list = [] + while True: + data: bytes = sock.recv(1024 * 1024) + if not data: + if completed.is_set(): + break + else: + try: + sock, (other_host, other_port) = listener.accept() + except socket.timeout: + result.append("".join(all_data)) + return + all_data.append(data.decode("utf-8")) + result.append("".join(all_data)) + + +def _run_test_code( + proc_args: List[str], proc_env, proc_cwd: str, completed: threading.Event +): + result = subprocess.run(proc_args, env=proc_env, cwd=proc_cwd) + completed.set() + return result + + +def find_test_line_number(test_name: str, test_file_path) -> str: + """Function which finds the correct line number for a test by looking for the "test_marker--[test_name]" string. + + The test_name is split on the "[" character to remove the parameterization information. + + Args: + test_name: The name of the test to find the line number for, will be unique per file. + test_file_path: The path to the test file where the test is located. + """ + test_file_unique_id: str = "test_marker--" + test_name.split("[")[0] + with open(test_file_path) as f: + for i, line in enumerate(f): + if test_file_unique_id in line: + return str(i + 1) + error_str: str = f"Test {test_name!r} not found on any line in {test_file_path}" + raise ValueError(error_str) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/test_discovery.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/test_discovery.py new file mode 100644 index 00000000000..a1f4e4f266a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/test_discovery.py @@ -0,0 +1,306 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import json +import os +import pathlib +import shutil +import sys +from typing import Any, Dict, List, Optional + +import pytest + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) + +from tests.tree_comparison_helper import is_same_tree + +from . import expected_discovery_test_output +from .helpers import TEST_DATA_PATH, runner, runner_with_cwd, create_symlink + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="See https://github.com/microsoft/vscode-python/issues/22965", +) +def test_import_error(tmp_path): + """Test pytest discovery on a file that has a pytest marker but does not import pytest. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest discovery on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_pytest_import.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_pytest_import.py" + shutil.copyfile(file_path, p) + actual: Optional[List[Dict[str, Any]]] = runner(["--collect-only", os.fspath(p)]) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + + # Ensure that 'error' is a list and then check its length + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 2 + else: + assert False + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="See https://github.com/microsoft/vscode-python/issues/22965", +) +def test_syntax_error(tmp_path): + """Test pytest discovery on a file that has a syntax error. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest discovery on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["--collect-only", os.fspath(p)]) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + + # Ensure that 'error' is a list and then check its length + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 2 + else: + assert False + + +def test_parameterized_error_collect(): + """Tests pytest discovery on specific file that incorrectly uses parametrize. + + The json should still be returned but the errors list should be present. + """ + file_path_str = "error_parametrize_discovery.py" + actual = runner(["--collect-only", file_path_str]) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + + # Ensure that 'error' is a list and then check its length + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 2 + else: + assert False + + +@pytest.mark.parametrize( + "file, expected_const", + [ + ( + "test_multi_class_nest.py", + expected_discovery_test_output.nested_classes_expected_test_output, + ), + ( + "unittest_skiptest_file_level.py", + expected_discovery_test_output.unittest_skip_file_level_expected_output, + ), + ( + "param_same_name", + expected_discovery_test_output.param_same_name_expected_output, + ), + ( + "parametrize_tests.py", + expected_discovery_test_output.parametrize_tests_expected_output, + ), + ( + "empty_discovery.py", + expected_discovery_test_output.empty_discovery_pytest_expected_output, + ), + ( + "simple_pytest.py", + expected_discovery_test_output.simple_discovery_pytest_expected_output, + ), + ( + "unittest_pytest_same_file.py", + expected_discovery_test_output.unit_pytest_same_file_discovery_expected_output, + ), + ( + "unittest_folder", + expected_discovery_test_output.unittest_folder_discovery_expected_output, + ), + ( + "dual_level_nested_folder", + expected_discovery_test_output.dual_level_nested_folder_expected_output, + ), + ( + "folder_a", + expected_discovery_test_output.double_nested_folder_expected_output, + ), + ( + "text_docstring.txt", + expected_discovery_test_output.doctest_pytest_expected_output, + ), + ], +) +def test_pytest_collect(file, expected_const): + """ + Test to test pytest discovery on a variety of test files/ folder structures. + Uses variables from expected_discovery_test_output.py to store the expected dictionary return. + Only handles discovery and therefore already contains the arg --collect-only. + All test discovery will succeed, be in the correct cwd, and match expected test output. + + Keyword arguments: + file -- a string with the file or folder to run pytest discovery on. + expected_const -- the expected output from running pytest discovery on the file. + """ + actual = runner( + [ + "--collect-only", + os.fspath(TEST_DATA_PATH / file), + ] + ) + + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + actual_item = actual_list.pop(0) + assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + assert is_same_tree(actual_item.get("tests"), expected_const) + + +def test_symlink_root_dir(): + """ + Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + Discovery should succeed and testids should be relative to the symlinked root directory. + """ + with create_symlink(TEST_DATA_PATH, "root", "symlink_folder") as ( + source, + destination, + ): + assert destination.is_symlink() + + # Run pytest with the cwd being the resolved symlink path (as it will be when we run the subprocess from node). + actual = runner_with_cwd( + ["--collect-only", f"--rootdir={os.fspath(destination)}"], source + ) + expected = expected_discovery_test_output.symlink_expected_discovery_output + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + actual_item = actual_list.pop(0) + try: + # Check if all requirements + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ), "Required keys are missing" + assert actual_item.get("status") == "success", "Status is not 'success'" + assert actual_item.get("cwd") == os.fspath( + destination + ), f"CWD does not match: {os.fspath(destination)}" + assert ( + actual_item.get("tests") == expected + ), "Tests do not match expected value" + except AssertionError as e: + # Print the actual_item in JSON format if an assertion fails + print(json.dumps(actual_item, indent=4)) + pytest.fail(str(e)) + + +def test_pytest_root_dir(): + """ + Test to test pytest discovery with the command line arg --rootdir specified to be a subfolder + of the workspace root. Discovery should succeed and testids should be relative to workspace root. + """ + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + actual = runner_with_cwd( + [ + "--collect-only", + rd, + ], + TEST_DATA_PATH / "root", + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + actual_item = actual_list.pop(0) + assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH / "root") + assert is_same_tree( + actual_item.get("tests"), + expected_discovery_test_output.root_with_config_expected_output, + ) + + +def test_pytest_config_file(): + """ + Test to test pytest discovery with the command line arg -c with a specified config file which + changes the workspace root. Discovery should succeed and testids should be relative to workspace root. + """ + actual = runner_with_cwd( + [ + "--collect-only", + "tests/", + ], + TEST_DATA_PATH / "root", + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + actual_item = actual_list.pop(0) + assert all(item in actual_item.keys() for item in ("status", "cwd", "error")) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH / "root") + assert is_same_tree( + actual_item.get("tests"), + expected_discovery_test_output.root_with_config_expected_output, + ) diff --git a/extensions/positron-python/pythonFiles/tests/pytestadapter/test_execution.py b/extensions/positron-python/pythonFiles/tests/pytestadapter/test_execution.py new file mode 100644 index 00000000000..a8336089d0a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/pytestadapter/test_execution.py @@ -0,0 +1,337 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import json +import os +import shutil +from typing import Any, Dict, List + +import pytest +import sys + +from tests.pytestadapter import expected_execution_test_output + +from .helpers import ( + TEST_DATA_PATH, + create_symlink, + get_absolute_test_id, + runner, + runner_with_cwd, +) + + +def test_config_file(): + """Test pytest execution when a config file is specified.""" + args = [ + "-c", + "tests/pytest.ini", + str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), + ] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +def test_rootdir_specified(): + """Test pytest execution when a --rootdir is specified.""" + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + args = [rd, "tests/test_a.py::test_a_function"] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="See https://github.com/microsoft/vscode-python/issues/22965", +) +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest execution on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. + + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +@pytest.mark.parametrize( + "test_ids, expected_const", + [ + ( + [ + "test_env_vars.py::test_clear_env", + "test_env_vars.py::test_check_env", + ], + expected_execution_test_output.safe_clear_env_vars_expected_execution_output, + ), + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + "skip_tests.py::TestClass::test_class_function_a", + "skip_tests.py::TestClass::test_class_function_b", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), + ( + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], + expected_execution_test_output.logging_test_expected_execution_output, + ), + ], +) +def test_pytest_execution(test_ids, expected_const): + """ + Test that pytest discovery works as expected where run pytest is always successful + but the actual test results are both successes and failures.: + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. + 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. + 3. uf_execution_expected_output: unittest tests run on multiple files. + 4. uf_single_file_expected_output: test run on a single file. + 5. uf_single_method_execution_expected_output: test run on a single method in a file. + 6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file + at the top level and one test file in a nested folder. + 9. double_nested_folder_expected_execution_output: test run on a double nested folder. + 10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 11. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 12. doctest_pytest_expected_execution_output: test run on doctest file. + 13. logging_test_expected_execution_output: test run on a file with logging. + + + Keyword arguments: + test_ids -- an array of test_ids to run. + expected_const -- a dictionary of the expected output from running pytest discovery on the files. + """ + args = test_ids + actual = runner(args) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + actual_result_dict.update(actual_item["result"]) + for key in actual_result_dict: + if ( + actual_result_dict[key]["outcome"] == "failure" + or actual_result_dict[key]["outcome"] == "error" + ): + actual_result_dict[key]["message"] = "ERROR MESSAGE" + if actual_result_dict[key]["traceback"] is not None: + actual_result_dict[key]["traceback"] = "TRACEBACK" + assert actual_result_dict == expected_const + + +def test_symlink_run(): + """ + Test to test pytest discovery with the command line arg --rootdir specified as a symlink path. + Discovery should succeed and testids should be relative to the symlinked root directory. + """ + with create_symlink(TEST_DATA_PATH, "root", "symlink_folder") as ( + source, + destination, + ): + assert destination.is_symlink() + test_a_path = TEST_DATA_PATH / "symlink_folder" / "tests" / "test_a.py" + test_a_id = get_absolute_test_id( + "tests/test_a.py::test_a_function", + test_a_path, + ) + + # Run pytest with the cwd being the resolved symlink path (as it will be when we run the subprocess from node). + actual = runner_with_cwd( + [f"--rootdir={os.fspath(destination)}", test_a_id], source + ) + + expected_const = ( + expected_execution_test_output.symlink_run_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + if actual_list is not None: + assert actual_list.pop(-1).get("eot") + actual_item = actual_list.pop(0) + try: + # Check if all requirements + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ), "Required keys are missing" + assert actual_item.get("status") == "success", "Status is not 'success'" + assert actual_item.get("cwd") == os.fspath( + destination + ), f"CWD does not match: {os.fspath(destination)}" + actual_result_dict = dict() + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + except AssertionError as e: + # Print the actual_item in JSON format if an assertion fails + print(json.dumps(actual_item, indent=4)) + pytest.fail(str(e)) diff --git a/extensions/positron-python/pythonFiles/tests/run_all.py b/extensions/positron-python/pythonFiles/tests/run_all.py new file mode 100644 index 00000000000..ce5a6264996 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/run_all.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Replace the "." entry. +import os.path +import sys + +sys.path[0] = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +from tests.__main__ import main, parse_args + + +if __name__ == "__main__": + mainkwargs, pytestargs = parse_args() + ec = main(pytestargs, **mainkwargs) + sys.exit(ec) diff --git a/extensions/positron-python/pythonFiles/tests/test_create_conda.py b/extensions/positron-python/pythonFiles/tests/test_create_conda.py new file mode 100644 index 00000000000..29dc323402e --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_create_conda.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import importlib +import sys + +import create_conda +import pytest + + +@pytest.mark.parametrize("env_exists", [True, False]) +@pytest.mark.parametrize("git_ignore", [True, False]) +@pytest.mark.parametrize("install", [True, False]) +@pytest.mark.parametrize("python", [True, False]) +def test_create_env(env_exists, git_ignore, install, python): + importlib.reload(create_conda) + create_conda.conda_env_exists = lambda _n: env_exists + + install_packages_called = False + + def install_packages(_name): + nonlocal install_packages_called + install_packages_called = True + + create_conda.install_packages = install_packages + + run_process_called = False + + def run_process(args, error_message): + nonlocal run_process_called + run_process_called = True + version = ( + "12345" if python else f"{sys.version_info.major}.{sys.version_info.minor}" + ) + if not env_exists: + assert args == [ + sys.executable, + "-m", + "conda", + "create", + "--yes", + "--prefix", + create_conda.CONDA_ENV_NAME, + f"python={version}", + ] + assert error_message == "CREATE_CONDA.ENV_FAILED_CREATION" + + create_conda.run_process = run_process + + add_gitignore_called = False + + def add_gitignore(_name): + nonlocal add_gitignore_called + add_gitignore_called = True + + create_conda.add_gitignore = add_gitignore + + args = [] + if git_ignore: + args.append("--git-ignore") + if install: + args.append("--install") + if python: + args.extend(["--python", "12345"]) + create_conda.main(args) + assert install_packages_called == install + + # run_process is called when the venv does not exist + assert run_process_called != env_exists + + # add_gitignore is called when new venv is created and git_ignore is True + assert add_gitignore_called == (not env_exists and git_ignore) diff --git a/extensions/positron-python/pythonFiles/tests/test_create_microvenv.py b/extensions/positron-python/pythonFiles/tests/test_create_microvenv.py new file mode 100644 index 00000000000..e5d4e68802e --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_create_microvenv.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import importlib +import os +import sys + +import create_microvenv + + +def test_create_microvenv(): + importlib.reload(create_microvenv) + run_process_called = False + + def run_process(args, error_message): + nonlocal run_process_called + run_process_called = True + assert args == [ + sys.executable, + os.fspath(create_microvenv.LIB_ROOT / "microvenv.py"), + create_microvenv.VENV_NAME, + ] + assert error_message == "CREATE_MICROVENV.MICROVENV_FAILED_CREATION" + + create_microvenv.run_process = run_process + + create_microvenv.main() + assert run_process_called is True diff --git a/extensions/positron-python/pythonFiles/tests/test_create_venv.py b/extensions/positron-python/pythonFiles/tests/test_create_venv.py new file mode 100644 index 00000000000..57df0a7fb3c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_create_venv.py @@ -0,0 +1,295 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import contextlib +import importlib +import io +import json +import os +import sys + +import pytest + +import create_venv + + +@pytest.mark.skipif( + sys.platform == "win32", reason="Windows does not have micro venv fallback." +) +def test_venv_not_installed_unix(): + importlib.reload(create_venv) + create_venv.is_installed = lambda module: module != "venv" + run_process_called = False + + def run_process(args, error_message): + nonlocal run_process_called + microvenv_path = os.fspath(create_venv.MICROVENV_SCRIPT_PATH) + if microvenv_path in args: + run_process_called = True + assert args == [ + sys.executable, + microvenv_path, + "--name", + ".test_venv", + ] + assert error_message == "CREATE_VENV.MICROVENV_FAILED_CREATION" + + create_venv.run_process = run_process + + create_venv.main(["--name", ".test_venv"]) + + # run_process is called when the venv does not exist + assert run_process_called is True + + +@pytest.mark.skipif( + sys.platform != "win32", reason="Windows does not have microvenv fallback." +) +def test_venv_not_installed_windows(): + importlib.reload(create_venv) + create_venv.is_installed = lambda module: module != "venv" + with pytest.raises(create_venv.VenvError) as e: + create_venv.main() + assert str(e.value) == "CREATE_VENV.VENV_NOT_FOUND" + + +@pytest.mark.parametrize("env_exists", ["hasEnv", "noEnv"]) +@pytest.mark.parametrize("git_ignore", ["useGitIgnore", "skipGitIgnore"]) +@pytest.mark.parametrize("install", ["requirements", "toml", "skipInstall"]) +def test_create_env(env_exists, git_ignore, install): + importlib.reload(create_venv) + create_venv.is_installed = lambda _x: True + create_venv.venv_exists = lambda _n: env_exists == "hasEnv" + create_venv.upgrade_pip = lambda _x: None + + install_packages_called = False + + def install_packages(_env, _name): + nonlocal install_packages_called + install_packages_called = True + + create_venv.install_requirements = install_packages + create_venv.install_toml = install_packages + + run_process_called = False + + def run_process(args, error_message): + nonlocal run_process_called + run_process_called = True + if env_exists == "noEnv": + assert args == [sys.executable, "-m", "venv", create_venv.VENV_NAME] + assert error_message == "CREATE_VENV.VENV_FAILED_CREATION" + + create_venv.run_process = run_process + + add_gitignore_called = False + + def add_gitignore(_name): + nonlocal add_gitignore_called + add_gitignore_called = True + + create_venv.add_gitignore = add_gitignore + + args = [] + if git_ignore == "useGitIgnore": + args += ["--git-ignore"] + if install == "requirements": + args += ["--requirements", "requirements-for-test.txt"] + elif install == "toml": + args += ["--toml", "pyproject.toml", "--extras", "test"] + + create_venv.main(args) + assert install_packages_called == (install != "skipInstall") + + # run_process is called when the venv does not exist + assert run_process_called == (env_exists == "noEnv") + + # add_gitignore is called when new venv is created and git_ignore is True + assert add_gitignore_called == ( + (env_exists == "noEnv") and (git_ignore == "useGitIgnore") + ) + + +@pytest.mark.parametrize("install_type", ["requirements", "pyproject", "both"]) +def test_install_packages(install_type): + importlib.reload(create_venv) + create_venv.is_installed = lambda _x: True + create_venv.file_exists = lambda x: install_type in x + + pip_upgraded = False + installing = None + + order = [] + + def run_process(args, error_message): + nonlocal pip_upgraded, installing, order + if args[1:] == ["-m", "pip", "install", "--upgrade", "pip"]: + pip_upgraded = True + assert error_message == "CREATE_VENV.UPGRADE_PIP_FAILED" + elif args[1:-1] == ["-m", "pip", "install", "-r"]: + installing = "requirements" + order += ["requirements"] + assert error_message == "CREATE_VENV.PIP_FAILED_INSTALL_REQUIREMENTS" + elif args[1:] == ["-m", "pip", "install", "-e", ".[test]"]: + installing = "pyproject" + order += ["pyproject"] + assert error_message == "CREATE_VENV.PIP_FAILED_INSTALL_PYPROJECT" + + create_venv.run_process = run_process + + if install_type == "requirements": + create_venv.main(["--requirements", "requirements-for-test.txt"]) + elif install_type == "pyproject": + create_venv.main(["--toml", "pyproject.toml", "--extras", "test"]) + elif install_type == "both": + create_venv.main( + [ + "--requirements", + "requirements-for-test.txt", + "--toml", + "pyproject.toml", + "--extras", + "test", + ] + ) + + assert pip_upgraded + if install_type == "both": + assert order == ["requirements", "pyproject"] + else: + assert installing == install_type + + +@pytest.mark.parametrize( + ("extras", "expected"), + [ + ([], ["-m", "pip", "install", "-e", "."]), + (["test"], ["-m", "pip", "install", "-e", ".[test]"]), + (["test", "doc"], ["-m", "pip", "install", "-e", ".[test,doc]"]), + ], +) +def test_toml_args(extras, expected): + importlib.reload(create_venv) + + actual = [] + + def run_process(args, error_message): + nonlocal actual + actual = args[1:] + + create_venv.run_process = run_process + + create_venv.install_toml(sys.executable, extras) + + assert actual == expected + + +@pytest.mark.parametrize( + ("extras", "expected"), + [ + ([], []), + ( + ["requirements/test.txt"], + [[sys.executable, "-m", "pip", "install", "-r", "requirements/test.txt"]], + ), + ( + ["requirements/test.txt", "requirements/doc.txt"], + [ + [sys.executable, "-m", "pip", "install", "-r", "requirements/test.txt"], + [sys.executable, "-m", "pip", "install", "-r", "requirements/doc.txt"], + ], + ), + ], +) +def test_requirements_args(extras, expected): + importlib.reload(create_venv) + + actual = [] + + def run_process(args, error_message): + nonlocal actual + actual.append(args) + + create_venv.run_process = run_process + + create_venv.install_requirements(sys.executable, extras) + + assert actual == expected + + +def test_create_venv_missing_pip(): + importlib.reload(create_venv) + create_venv.venv_exists = lambda _n: True + create_venv.is_installed = lambda module: module != "pip" + + download_pip_pyz_called = False + + def download_pip_pyz(name): + nonlocal download_pip_pyz_called + download_pip_pyz_called = True + assert name == create_venv.VENV_NAME + + create_venv.download_pip_pyz = download_pip_pyz + + run_process_called = False + + def run_process(args, error_message): + if "install" in args and "pip" in args: + nonlocal run_process_called + run_process_called = True + pip_pyz_path = os.fspath( + create_venv.CWD / create_venv.VENV_NAME / "pip.pyz" + ) + assert args[1:] == [pip_pyz_path, "install", "pip"] + assert error_message == "CREATE_VENV.INSTALL_PIP_FAILED" + + create_venv.run_process = run_process + create_venv.main([]) + + +@contextlib.contextmanager +def redirect_io(stream: str, new_stream): + """Redirect stdio streams to a custom stream.""" + old_stream = getattr(sys, stream) + setattr(sys, stream, new_stream) + yield + setattr(sys, stream, old_stream) + + +class CustomIO(io.TextIOWrapper): + """Custom stream object to replace stdio.""" + + name: str = "customio" # type: ignore ReportIncompatibleMethodOverride (remove once updated upstream) + + def __init__(self, name: str, encoding="utf-8", newline=None): + self._buffer = io.BytesIO() + self._buffer.name = name + super().__init__(self._buffer, encoding=encoding, newline=newline) + + def close(self): + """Provide this close method which is used by some tools.""" + # This is intentionally empty. + + def get_value(self) -> str: + """Returns value from the buffer as string.""" + self.seek(0) + return self.read() + + +def test_requirements_from_stdin(): + importlib.reload(create_venv) + + cli_requirements = [f"cli-requirement{i}.txt" for i in range(3)] + args = argparse.Namespace() + args.__dict__.update({"stdin": True, "requirements": cli_requirements}) + + stdin_requirements = [f"stdin-requirement{i}.txt" for i in range(20)] + text = json.dumps({"requirements": stdin_requirements}) + str_input = CustomIO("", encoding="utf-8", newline="\n") + with redirect_io("stdin", str_input): + str_input.write(text) + str_input.seek(0) + actual = create_venv.get_requirements_from_args(args) + + assert actual == stdin_requirements + cli_requirements diff --git a/extensions/positron-python/pythonFiles/tests/test_data/missing-deps.data b/extensions/positron-python/pythonFiles/tests/test_data/missing-deps.data new file mode 100644 index 00000000000..c8c911f218a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_data/missing-deps.data @@ -0,0 +1,121 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --generate-hashes --resolver=backtracking requirements-test.in +# +flake8-csv==0.2.0 \ + --hash=sha256:246e07207fefbf8f80a59ff7e878f153635f562ebaf20cf796a2b00b1528ea9a \ + --hash=sha256:bf3ac6aecbaebe36a2c7d5d275f310996fcc33b7370cdd81feec04b79af2e07c + # via -r requirements-test.in +levenshtein==0.21.0 \ + --hash=sha256:01dd427cf72b4978b09558e3d36e3f92c8eef467e3eb4653c3fdccd8d70aaa08 \ + --hash=sha256:0236c8ff4648c50ebd81ac3692430d2241b134936ac9d86d7ca32ba6ab4a4e63 \ + --hash=sha256:023ca95c833ca548280e444e9a4c34fdecb3be3851e96af95bad290ae0c708b9 \ + --hash=sha256:024302c82d49fc1f1d044794997ef7aa9d01b509a9040e222480b64a01cd4b80 \ + --hash=sha256:04046878a57129da4e2352c032df7c1fceaa54870916d12772cad505ef998290 \ + --hash=sha256:04850a0719e503014acb3fee6d4ec7d7f170a2c7375ffbc5833c7256b7cd10ee \ + --hash=sha256:0cc3679978cd0250bf002963cf2e08855b93f70fa0fc9f74956115c343983fbb \ + --hash=sha256:0f42b8dba2cce257cd34efd1ce9678d06f3248cb0bb2a92a5db8402e1e4a6f30 \ + --hash=sha256:13e8a5b1b58de49befea555bb913dc394614f2d3553bc5b86bc672c69ef1a85a \ + --hash=sha256:1f19fe25ea0dd845d0f48505e8947f6080728e10b7642ba0dad34e9b48c81130 \ + --hash=sha256:1fde464f937878e6f5c30c234b95ce2cb969331a175b3089367e077113428062 \ + --hash=sha256:2290732763e3b75979888364b26acce79d72b8677441b5762a4e97b3630cc3d9 \ + --hash=sha256:24843f28cbbdcbcfc18b08e7d3409dbaad7896fb7113442592fa978590a7bbf0 \ + --hash=sha256:25576ad9c337ecb342306fe87166b54b2f49e713d4ff592c752cc98e0046296e \ + --hash=sha256:26c6fb012538a245d78adea786d2cfe3c1506b835762c1c523a4ed6b9e08dc0b \ + --hash=sha256:31cb59d86a5f99147cd4a67ebced8d6df574b5d763dcb63c033a642e29568746 \ + --hash=sha256:32dfda2e64d0c50553e47d0ab2956413970f940253351c196827ad46f17916d5 \ + --hash=sha256:3305262cb85ff78ace9e2d8d2dfc029b34dc5f93aa2d24fd20b6ed723e2ad501 \ + --hash=sha256:37a99d858fa1d88b1a917b4059a186becd728534e5e889d583086482356b7ca1 \ + --hash=sha256:3c6858cfd84568bc1df3ad545553b5c27af6ed3346973e8f4b57d23c318cf8f4 \ + --hash=sha256:3e1723d515ab287b9b2c2e4a111894dc6b474f5d28826fff379647486cae98d2 \ + --hash=sha256:3e22d31375d5fea5797c9b7aa0f8cc36579c31dcf5754e9931ca86c27d9011f8 \ + --hash=sha256:426883be613d912495cf6ee2a776d2ab84aa6b3de5a8d82c43a994267ea6e0e3 \ + --hash=sha256:4357bf8146cbadb10016ad3a950bba16e042f79015362a575f966181d95b4bc7 \ + --hash=sha256:4515f9511cb91c66d254ee30154206aad76b57d8b25f64ba1402aad43efdb251 \ + --hash=sha256:457442911df185e28a32fd8b788b14ca22ab3a552256b556e7687173d5f18bc4 \ + --hash=sha256:46dab8c6e8fae563ca77acfaeb3824c4dd4b599996328b8a081b06f16befa6a0 \ + --hash=sha256:4b2156f32e46d16b74a055ccb4f64ee3c64399372a6aaf1ee98f6dccfadecee1 \ + --hash=sha256:4bbceef2caba4b2ae613b0e853a7aaab990c1a13bddb9054ba1328a84bccdbf7 \ + --hash=sha256:4c8eaaa6f0df2838437d1d8739629486b145f7a3405d3ef0874301a9f5bc7dcd \ + --hash=sha256:4dc79033140f82acaca40712a6d26ed190cc2dd403e104020a87c24f2771aa72 \ + --hash=sha256:4ec2ef9836a34a3bb009a81e5efe4d9d43515455fb5f182c5d2cf8ae61c79496 \ + --hash=sha256:5369827ace536c6df04e0e670d782999bc17bf9eb111e77435fdcdaecb10c2a3 \ + --hash=sha256:5378a8139ba61d7271c0f9350201259c11eb90bfed0ac45539c4aeaed3907230 \ + --hash=sha256:545635d9e857711d049dcdb0b8609fb707b34b032517376c531ca159fcd46265 \ + --hash=sha256:587ad51770de41eb491bea1bfb676abc7ff9a94dbec0e2bc51fc6a25abef99c4 \ + --hash=sha256:5cfbc4ed7ee2965e305bf81388fea377b795dabc82ee07f04f31d1fb8677a885 \ + --hash=sha256:5e748c2349719cb1bc90f802d9d7f07310633dcf166d468a5bd821f78ed17698 \ + --hash=sha256:608beb1683508c3cdbfff669c1c872ea02b47965e1bbb8a630de548e2490f96a \ + --hash=sha256:6338a47b6f8c7f1ee8b5636cc8b245ad2d1d0ee47f7bb6f33f38a522ef0219cc \ + --hash=sha256:668ea30b311944c643f866ce5e45edf346f05e920075c0056f2ba7f74dde6071 \ + --hash=sha256:66d303cd485710fe6d62108209219b7a695bdd10a722f4e86abdaf26f4bf2202 \ + --hash=sha256:6ebabcf982ae161534f8729d13fe05eebc977b497ac34936551f97cf8b07dd9e \ + --hash=sha256:6ede583155f24c8b2456a7720fbbfa5d9c1154ae04b4da3cf63368e2406ea099 \ + --hash=sha256:709a727f58d31a5ee1e5e83b247972fe55ef0014f6222256c9692c5efa471785 \ + --hash=sha256:742b785c93d16c63289902607219c200bd2b6077dafc788073c74337cae382fb \ + --hash=sha256:76d5d34a8e21de8073c66ae801f053520f946d499fa533fbba654712775f8132 \ + --hash=sha256:7bc550d0986ace95bde003b8a60e622449baf2bdf24d8412f7a50f401a289ec3 \ + --hash=sha256:7c2d67220867d640e36931b3d63b8349369b485d52cf6f4a2635bec8da92d678 \ + --hash=sha256:7ce3f14a8e006fb7e3fc7bab965ab7da5817f48fc48d25cf735fcec8f1d2e39a \ + --hash=sha256:7e40a4bac848c9a8883225f926cfa7b2bc9f651e989a8b7006cdb596edc7ac9b \ + --hash=sha256:80e67bd73a05592ecd52aede4afa8ea49575de70f9d5bfbe2c52ebd3541b20be \ + --hash=sha256:8446f8da38857482ec0cfd616fe5e7dcd3695fd323cc65f37366a9ff6a31c9cb \ + --hash=sha256:8476862a5c3150b8d63a7475563a4bff6dc50bbc0447894eb6b6a116ced0809d \ + --hash=sha256:84b55b732e311629a8308ad2778a0f9824e29e3c35987eb35610fc52eb6d4634 \ + --hash=sha256:88ccdc8dc20c16e8059ace00fb58d353346a04fd24c0733b009678b2554801d2 \ + --hash=sha256:8aa92b05156dfa2e248c3743670d5deb41a45b5789416d5fa31be009f4f043ab \ + --hash=sha256:8ac4ed77d3263eac7f9b6ed89d451644332aecd55cda921201e348803a1e5c57 \ + --hash=sha256:8bdbcd1570340b07549f71e8a5ba3f0a6d84408bf86c4051dc7b70a29ae342bb \ + --hash=sha256:8c031cbe3685b0343f5cc2dcf2172fd21b82f8ccc5c487179a895009bf0e4ea8 \ + --hash=sha256:8c27a5178ce322b56527a451185b4224217aa81955d9b0dad6f5a8de81ffe80f \ + --hash=sha256:8cf87a5e2962431d7260dd81dc1ca0697f61aad81036145d3666f4c0d514ce3a \ + --hash=sha256:8d4ba0df46bb41d660d77e7cc6b4d38c8d5b6f977d51c48ed1217db6a8474cde \ + --hash=sha256:8dd8ef4239b24fb1c9f0b536e48e55194d5966d351d349af23e67c9eb3875c68 \ + --hash=sha256:92bf2370b01d7a4862abf411f8f60f39f064cebebce176e3e9ee14e744db8288 \ + --hash=sha256:9485f2a5c88113410153256657072bc93b81bf5c8690d47e4cc3df58135dbadb \ + --hash=sha256:9ff1255c499fcb41ba37a578ad8c1b8dab5c44f78941b8e1c1d7fab5b5e831bc \ + --hash=sha256:a18c8e4d1aae3f9950797d049020c64a8a63cc8b4e43afcca91ec400bf6304c5 \ + --hash=sha256:a68b05614d25cc2a5fbcc4d2fd124be7668d075fd5ac3d82f292eec573157361 \ + --hash=sha256:a7adaabe07c5ceb6228332b9184f06eb9cda89c227d198a1b8a6f78c05b3c672 \ + --hash=sha256:aa39bb773915e4df330d311bb6c100a8613e265cc50d5b25b015c8db824e1c47 \ + --hash=sha256:ac8b6266799645827980ab1af4e0bfae209c1f747a10bdf6e5da96a6ebe511a2 \ + --hash=sha256:b0ba9723c7d67a61e160b3457259552f7d679d74aaa144b892eb68b7e2a5ebb6 \ + --hash=sha256:b167b32b3e336c5ec5e0212f025587f9248344ae6e73ed668270eba5c6a506e5 \ + --hash=sha256:b646ace5085a60d4f89b28c81301c9d9e8cd6a9bdda908181b2fa3dfac7fc10d \ + --hash=sha256:bd0bfa71b1441be359e99e77709885b79c22857bf9bb7f4e84c09e501f6c5fad \ + --hash=sha256:be038321695267a8faa5ae1b1a83deb3748827f0b6f72471e0beed36afcbd72a \ + --hash=sha256:be87998ffcbb5fb0c37a76d100f63b4811f48527192677da0ec3624b49ab8a64 \ + --hash=sha256:c270487d60b33102efea73be6dcd5835f3ddc3dc06e77499f0963df6cba2ec71 \ + --hash=sha256:c290a7211f1b4f87c300df4424cc46b7379cead3b6f37fa8d3e7e6c6212ccd39 \ + --hash=sha256:cc36ba40027b4f8821155c9e3e0afadffccdccbe955556039d1d1169dfc659c9 \ + --hash=sha256:ce7e76c6341abb498368d42b8081f2f45c245ac2a221af6a0394349d41302c08 \ + --hash=sha256:cefd5a668f6d7af1279aca10104b43882fdd83f9bdc68933ba5429257a628abe \ + --hash=sha256:cf2dee0f8c71598f8be51e3feceb9142ac01576277b9e691e25740987761c86e \ + --hash=sha256:d23c647b03acbb5783f9bdfd51cfa5365d51f7df9f4029717a35eff5cc32bbcc \ + --hash=sha256:d647f1e0c30c7a73f70f4de7376ed7dafc2b856b67fe480d32a81af133edbaeb \ + --hash=sha256:d932cb21e40beb93cfc8973de7f25fbf25ba4a07d1dccac3b9ba977164cf9887 \ + --hash=sha256:db7567997ffbc2feb999e30002a92461a76f17a596a142bdb463b5f7037f160c \ + --hash=sha256:de2dfd6498454c7d89036d56a53c0a01fd9bcf1c2970253e469b5e8bb938b69f \ + --hash=sha256:df9b0f8f511270ad259c7bfba22ab6d5a0c33d81cd594461668e67cd80dd9052 \ + --hash=sha256:e043b79e39f165026bc941c95582bfc4bfdd297a1de6f13ace0d0a7abf486288 \ + --hash=sha256:e2686c37d22faf27d02a19e83b55812d248b32b7ba3aa638e768d0ea032e1f3c \ + --hash=sha256:e9a6251818b9eb6d519bffd7a0b745f3a99b3e99563a4c9d3cad26e34f6ac880 \ + --hash=sha256:eab6c253983a6659e749f4c44fcc2215194c2e00bf7b1c5e90fe683ea3b7b00f \ + --hash=sha256:ec64b7b3fb95bc9c20c72548277794b81281a6ba9da85eda2c87324c218441ff \ + --hash=sha256:ee62ec5882a857b252faffeb7867679f7e418052ca6bf7d6b56099f6498a2b0e \ + --hash=sha256:ee757fd36bad66ad8b961958840894021ecaad22194f65219a666432739393ff \ + --hash=sha256:f55623094b665d79a3b82ba77386ac34fa85049163edfe65387063e5127d4184 \ + --hash=sha256:f622f542bd065ffec7d26b26d44d0c9a25c9c1295fd8ba6e4d77778e2293a12c \ + --hash=sha256:f873af54014cac12082c7f5ccec6bbbeb5b57f63466e7f9c61a34588621313fb \ + --hash=sha256:fae24c875c4ecc8c5f34a9715eb2a459743b4ca21d35c51819b640ee2f71cb51 \ + --hash=sha256:fb26e69fc6c12534fbaa1657efed3b6482f1a166ba8e31227fa6f6f062a59070 + # via -r requirements-test.in +pytest==7.3.1 \ + --hash=sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362 \ + --hash=sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3 + +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f diff --git a/extensions/positron-python/pythonFiles/tests/test_data/no-missing-deps.data b/extensions/positron-python/pythonFiles/tests/test_data/no-missing-deps.data new file mode 100644 index 00000000000..d5d04476dec --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_data/no-missing-deps.data @@ -0,0 +1,13 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --generate-hashes --resolver=backtracking requirements-test.in +# +pytest==7.3.1 \ + --hash=sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362 \ + --hash=sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3 + +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f diff --git a/extensions/positron-python/pythonFiles/tests/test_data/pyproject-missing-deps.data b/extensions/positron-python/pythonFiles/tests/test_data/pyproject-missing-deps.data new file mode 100644 index 00000000000..e4d6f9eb10d --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_data/pyproject-missing-deps.data @@ -0,0 +1,9 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "something" +version = "2023.0.0" +requires-python = ">=3.8" +dependencies = ["pytest==7.3.1", "flake8-csv"] diff --git a/extensions/positron-python/pythonFiles/tests/test_data/pyproject-no-missing-deps.data b/extensions/positron-python/pythonFiles/tests/test_data/pyproject-no-missing-deps.data new file mode 100644 index 00000000000..64dadf6fdf2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_data/pyproject-no-missing-deps.data @@ -0,0 +1,9 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "something" +version = "2023.0.0" +requires-python = ">=3.8" +dependencies = [jedi-language-server"] diff --git a/extensions/positron-python/pythonFiles/tests/test_dynamic_cursor.py b/extensions/positron-python/pythonFiles/tests/test_dynamic_cursor.py new file mode 100644 index 00000000000..7aea59427aa --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_dynamic_cursor.py @@ -0,0 +1,203 @@ +import importlib +import textwrap + +import normalizeSelection + + +def test_dictionary_mouse_mover(): + """ + Having the mouse cursor on second line, + 'my_dict = {' + and pressing shift+enter should bring the + mouse cursor to line 6, on and to be able to run + 'print('only send the dictionary')' + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + not_dictionary = 'hi' + my_dict = { + "key1": "value1", + "key2": "value2" + } + print('only send the dictionary') + """ + ) + + result = normalizeSelection.traverse_file(src, 2, 2, False) + + assert result["which_line_next"] == 6 + + +def test_beginning_func(): + """ + Pressing shift+enter on the very first line, + of function definition, such as 'my_func():' + It should properly skip the comment and assert the + next executable line to be executed is line 5 at + 'my_dict = {' + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + def my_func(): + print("line 2") + print("line 3") + # Skip line 4 because it is a comment + my_dict = { + "key1": "value1", + "key2": "value2" + } + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["which_line_next"] == 5 + + +def test_cursor_forloop(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + lucid_dream = ["Corgi", "Husky", "Pomsky"] + for dogs in lucid_dream: # initial starting position + print(dogs) + print("I wish I had a dog!") + + print("This should be the next block that should be ran") + """ + ) + + result = normalizeSelection.traverse_file(src, 2, 2, False) + + assert result["which_line_next"] == 6 + + +def test_inside_forloop(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + for food in lucid_dream: + print("We are starting") # initial starting position + print("Next cursor should be here!") + + """ + ) + + result = normalizeSelection.traverse_file(src, 2, 2, False) + + assert result["which_line_next"] == 3 + + +def test_skip_sameline_statements(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + print("Audi");print("BMW");print("Mercedes") + print("Next line to be run is here!") + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["which_line_next"] == 2 + + +def test_skip_multi_comp_lambda(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + ( + my_first_var + for my_first_var in range(1, 10) + if my_first_var % 2 == 0 + ) + + my_lambda = lambda x: ( + x + 1 + ) + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + # Shift enter from the very first ( should make + # next executable statement as the lambda expression + assert result["which_line_next"] == 7 + + +def test_move_whole_class(): + """ + Shift+enter on a class definition + should move the cursor after running whole class. + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + class Stub(object): + def __init__(self): + self.calls = [] + + def add_call(self, name, args=None, kwargs=None): + self.calls.append((name, args, kwargs)) + print("We should be here after running whole class") + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["which_line_next"] == 7 + + +def test_def_to_def(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + def my_dogs(): + print("Corgi") + print("Husky") + print("Corgi2") + print("Husky2") + print("no dogs") + + # Skip here + def next_func(): + print("Not here but above") + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["which_line_next"] == 9 + + +def test_try_catch_move(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + try: + 1+1 + except: + print("error") + + print("Should be here afterwards") + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["which_line_next"] == 6 + + +def test_skip_nested(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + for i in range(1, 6): + for j in range(1, 6): + for x in range(1, 5): + for y in range(1, 5): + for z in range(1,10): + print(i, j, x, y, z) + + print("Cursor should be here after running line 1") + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["which_line_next"] == 8 diff --git a/extensions/positron-python/pythonFiles/tests/test_installed_check.py b/extensions/positron-python/pythonFiles/tests/test_installed_check.py new file mode 100644 index 00000000000..dae019359e0 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_installed_check.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import contextlib +import json +import os +import pathlib +import subprocess +import sys + +import pytest +from typing import Dict, List, Optional, Union + +SCRIPT_PATH = pathlib.Path(__file__).parent.parent / "installed_check.py" +TEST_DATA = pathlib.Path(__file__).parent / "test_data" +DEFAULT_SEVERITY = 3 + + +@contextlib.contextmanager +def generate_file(base_file: pathlib.Path): + basename = "pyproject.toml" if "pyproject" in base_file.name else "requirements.txt" + fullpath = base_file.parent / basename + if fullpath.exists(): + os.unlink(os.fspath(fullpath)) + fullpath.write_text(base_file.read_text(encoding="utf-8")) + try: + yield fullpath + finally: + os.unlink(str(fullpath)) + + +def run_on_file( + file_path: pathlib.Path, severity: Optional[str] = None +) -> List[Dict[str, Union[str, int]]]: + env = os.environ.copy() + if severity: + env["VSCODE_MISSING_PGK_SEVERITY"] = severity + result = subprocess.run( + [ + sys.executable, + os.fspath(SCRIPT_PATH), + os.fspath(file_path), + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + env=env, + ) + assert result.returncode == 0 + assert result.stderr == b"" + return json.loads(result.stdout) + + +EXPECTED_DATA = { + "missing-deps": [ + { + "line": 6, + "character": 0, + "endLine": 6, + "endCharacter": 10, + "package": "flake8-csv", + "code": "not-installed", + "severity": 3, + }, + { + "line": 10, + "character": 0, + "endLine": 10, + "endCharacter": 11, + "package": "levenshtein", + "code": "not-installed", + "severity": 3, + }, + ], + "no-missing-deps": [], + "pyproject-missing-deps": [ + { + "line": 8, + "character": 34, + "endLine": 8, + "endCharacter": 44, + "package": "flake8-csv", + "code": "not-installed", + "severity": 3, + } + ], + "pyproject-no-missing-deps": [], +} + + +@pytest.mark.parametrize("test_name", EXPECTED_DATA.keys()) +def test_installed_check(test_name: str): + base_file = TEST_DATA / f"{test_name}.data" + with generate_file(base_file) as file_path: + result = run_on_file(file_path) + assert result == EXPECTED_DATA[test_name] + + +EXPECTED_DATA2 = { + "missing-deps": [ + { + "line": 6, + "character": 0, + "endLine": 6, + "endCharacter": 10, + "package": "flake8-csv", + "code": "not-installed", + "severity": 0, + }, + { + "line": 10, + "character": 0, + "endLine": 10, + "endCharacter": 11, + "package": "levenshtein", + "code": "not-installed", + "severity": 0, + }, + ], + "pyproject-missing-deps": [ + { + "line": 8, + "character": 34, + "endLine": 8, + "endCharacter": 44, + "package": "flake8-csv", + "code": "not-installed", + "severity": 0, + } + ], +} + + +@pytest.mark.parametrize("test_name", EXPECTED_DATA2.keys()) +def test_with_severity(test_name: str): + base_file = TEST_DATA / f"{test_name}.data" + with generate_file(base_file) as file_path: + result = run_on_file(file_path, severity="0") + assert result == EXPECTED_DATA2[test_name] diff --git a/extensions/positron-python/pythonFiles/tests/test_normalize_selection.py b/extensions/positron-python/pythonFiles/tests/test_normalize_selection.py new file mode 100644 index 00000000000..5f4d6d7d4a1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_normalize_selection.py @@ -0,0 +1,270 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +import importlib +import textwrap + +# __file__ = "/Users/anthonykim/Desktop/vscode-python/pythonFiles/normalizeSelection.py" +# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__)))) +import normalizeSelection + + +class TestNormalizationScript(object): + """Unit tests for the normalization script.""" + + def test_basicNormalization(self): + src = 'print("this is a test")' + expected = src + "\n" + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_moreThanOneLine(self): + src = textwrap.dedent( + """\ + # Some rando comment + + def show_something(): + print("Something") + """ + ) + expected = textwrap.dedent( + """\ + def show_something(): + print("Something") + + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_withHangingIndent(self): + src = textwrap.dedent( + """\ + x = 22 + y = 30 + z = -10 + result = x + y + z + + if result == 42: + print("The answer to life, the universe, and everything") + """ + ) + expected = textwrap.dedent( + """\ + x = 22 + y = 30 + z = -10 + result = x + y + z + if result == 42: + print("The answer to life, the universe, and everything") + + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_clearOutExtraneousNewlines(self): + src = textwrap.dedent( + """\ + value_x = 22 + + value_y = 30 + + value_z = -10 + + print(value_x + value_y + value_z) + + """ + ) + expected = textwrap.dedent( + """\ + value_x = 22 + value_y = 30 + value_z = -10 + print(value_x + value_y + value_z) + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_clearOutExtraLinesAndWhitespace(self): + src = textwrap.dedent( + """\ + if True: + x = 22 + + y = 30 + + z = -10 + + print(x + y + z) + + """ + ) + expected = textwrap.dedent( + """\ + if True: + x = 22 + y = 30 + z = -10 + + print(x + y + z) + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_partialSingleLine(self): + src = " print('foo')" + expected = textwrap.dedent(src) + "\n" + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_multiLineWithIndent(self): + src = """\ + + if (x > 0 + and condition == True): + print('foo') + else: + + print('bar') + """ + + expected = textwrap.dedent( + """\ + if (x > 0 + and condition == True): + print('foo') + else: + print('bar') + + """ + ) + + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_multiLineWithComment(self): + src = textwrap.dedent( + """\ + + def show_something(): + # A comment + print("Something") + """ + ) + expected = textwrap.dedent( + """\ + def show_something(): + # A comment + print("Something") + + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_exception(self): + src = " if True:" + expected = src + "\n\n" + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_multilineException(self): + src = textwrap.dedent( + """\ + + def show_something(): + if True: + """ + ) + expected = src + "\n\n" + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_decorators(self): + src = textwrap.dedent( + """\ + def foo(func): + + def wrapper(): + print('before') + func() + print('after') + + return wrapper + + + @foo + def show_something(): + print("Something") + """ + ) + expected = textwrap.dedent( + """\ + def foo(func): + def wrapper(): + print('before') + func() + print('after') + return wrapper + + @foo + def show_something(): + print("Something") + + """ + ) + result = normalizeSelection.normalize_lines(src) + assert result == expected + + def test_fstring(self): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + name = "Ahri" + age = 10 + + print(f'My name is {name}') + """ + ) + + expected = textwrap.dedent( + """\ + name = "Ahri" + age = 10 + print(f'My name is {name}') + """ + ) + result = normalizeSelection.normalize_lines(src) + + assert result == expected + + def test_list_comp(self): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + names = ['Ahri', 'Bobby', 'Charlie'] + breed = ['Pomeranian', 'Welsh Corgi', 'Siberian Husky'] + dogs = [(name, breed) for name, breed in zip(names, breed)] + + print(dogs) + my_family_dog = 'Corgi' + """ + ) + + expected = textwrap.dedent( + """\ + names = ['Ahri', 'Bobby', 'Charlie'] + breed = ['Pomeranian', 'Welsh Corgi', 'Siberian Husky'] + dogs = [(name, breed) for name, breed in zip(names, breed)] + print(dogs) + my_family_dog = 'Corgi' + """ + ) + + result = normalizeSelection.normalize_lines(src) + + assert result == expected diff --git a/extensions/positron-python/pythonFiles/tests/test_shell_integration.py b/extensions/positron-python/pythonFiles/tests/test_shell_integration.py new file mode 100644 index 00000000000..896df416ece --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_shell_integration.py @@ -0,0 +1,59 @@ +import importlib +import sys +from unittest.mock import Mock +import pythonrc + + +def test_decoration_success(): + importlib.reload(pythonrc) + ps1 = pythonrc.ps1() + + ps1.hooks.failure_flag = False + result = str(ps1) + if sys.platform != "win32": + assert ( + result + == "\x1b]633;D;0\x07\x1b]633;A\x07>>> \x1b]633;B\x07\x1b]633;C\x07\x1b]633;E;None\x07" + ) + else: + pass + + +def test_decoration_failure(): + importlib.reload(pythonrc) + ps1 = pythonrc.ps1() + + ps1.hooks.failure_flag = True + result = str(ps1) + if sys.platform != "win32": + assert ( + result + == "\x1b]633;D;1\x07\x1b]633;A\x07>>> \x1b]633;B\x07\x1b]633;C\x07\x1b]633;E;None\x07" + ) + else: + pass + + +def test_displayhook_call(): + importlib.reload(pythonrc) + pythonrc.ps1() + mock_displayhook = Mock() + + hooks = pythonrc.repl_hooks() + hooks.original_displayhook = mock_displayhook + + hooks.my_displayhook("mock_value") + + mock_displayhook.assert_called_once_with("mock_value") + + +def test_excepthook_call(): + importlib.reload(pythonrc) + pythonrc.ps1() + mock_excepthook = Mock() + + hooks = pythonrc.repl_hooks() + hooks.original_excepthook = mock_excepthook + + hooks.my_excepthook("mock_type", "mock_value", "mock_traceback") + mock_excepthook.assert_called_once_with("mock_type", "mock_value", "mock_traceback") diff --git a/extensions/positron-python/pythonFiles/tests/test_smart_selection.py b/extensions/positron-python/pythonFiles/tests/test_smart_selection.py new file mode 100644 index 00000000000..b29bf30c538 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/test_smart_selection.py @@ -0,0 +1,413 @@ +import importlib +import textwrap + +import normalizeSelection + + +def test_part_dictionary(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + not_dictionary = 'hi' + my_dict = { + "key1": "value1", + "key2": "value2" + } + print('only send the dictionary') + """ + ) + + expected = textwrap.dedent( + """\ + my_dict = { + "key1": "value1", + "key2": "value2" + } + """ + ) + + result = normalizeSelection.traverse_file(src, 3, 3, False) + assert result["normalized_smart_result"] == expected + + +def test_nested_loop(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + for i in range(1, 6): + for j in range(1, 6): + for x in range(1, 5): + for y in range(1, 5): + for z in range(1,10): + print(i, j, x, y, z) + """ + ) + expected = textwrap.dedent( + """\ + for i in range(1, 6): + for j in range(1, 6): + for x in range(1, 5): + for y in range(1, 5): + for z in range(1,10): + print(i, j, x, y, z) + + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["normalized_smart_result"] == expected + + +def test_smart_shift_enter_multiple_statements(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + import textwrap + import ast + + print("Porsche") + print("Genesis") + + + print("Audi");print("BMW");print("Mercedes") + + print("dont print me") + + """ + ) + # Expected to printing statement line by line, + # for when multiple print statements are ran + # from the same line. + expected = textwrap.dedent( + """\ + print("Audi") + print("BMW") + print("Mercedes") + """ + ) + result = normalizeSelection.traverse_file(src, 8, 8, False) + assert result["normalized_smart_result"] == expected + + +def test_two_layer_dictionary(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + print("dont print me") + + two_layered_dictionary = { + 'inner_dict_one': { + 'Audi': 'Germany', + 'BMW': 'Germnay', + 'Genesis': 'Korea', + }, + 'inner_dict_two': { + 'Mercedes': 'Germany', + 'Porsche': 'Germany', + 'Lamborghini': 'Italy', + 'Ferrari': 'Italy', + 'Maserati': 'Italy' + } + } + """ + ) + expected = textwrap.dedent( + """\ + two_layered_dictionary = { + 'inner_dict_one': { + 'Audi': 'Germany', + 'BMW': 'Germnay', + 'Genesis': 'Korea', + }, + 'inner_dict_two': { + 'Mercedes': 'Germany', + 'Porsche': 'Germany', + 'Lamborghini': 'Italy', + 'Ferrari': 'Italy', + 'Maserati': 'Italy' + } + } + """ + ) + result = normalizeSelection.traverse_file(src, 6, 7, False) + + assert result["normalized_smart_result"] == expected + + +def test_run_whole_func(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + print("Decide which dog you will choose") + def my_dogs(): + print("Corgi") + print("Husky") + print("Corgi2") + print("Husky2") + print("no dogs") + """ + ) + + expected = textwrap.dedent( + """\ + def my_dogs(): + print("Corgi") + print("Husky") + print("Corgi2") + print("Husky2") + print("no dogs") + + """ + ) + result = normalizeSelection.traverse_file(src, 2, 2, False) + + assert result["normalized_smart_result"] == expected + + +def test_small_forloop(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + for i in range(1, 6): + print(i) + print("Please also send this print statement") + """ + ) + expected = textwrap.dedent( + """\ + for i in range(1, 6): + print(i) + print("Please also send this print statement") + + """ + ) + + # Cover the whole for loop block with multiple inner statements + # Make sure to contain all of the print statements included. + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["normalized_smart_result"] == expected + + +def inner_for_loop_component(): + """ + Pressing shift+enter inside a for loop, + specifically on a viable expression + by itself, such as print(i) + should only return that exact expression + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + for i in range(1, 6): + print(i) + print("Please also send this print statement") + """ + ) + result = normalizeSelection.traverse_file(src, 2, 2, False) + expected = textwrap.dedent( + """\ + print(i) + """ + ) + + assert result["normalized_smart_result"] == expected + + +def test_dict_comprehension(): + """ + Having the mouse cursor on the first line, + and pressing shift+enter should return the + whole dictionary comp, respecting user's code style. + """ + + importlib.reload + src = textwrap.dedent( + """\ + my_dict_comp = {temp_mover: + temp_mover for temp_mover in range(1, 7)} + """ + ) + + expected = textwrap.dedent( + """\ + my_dict_comp = {temp_mover: + temp_mover for temp_mover in range(1, 7)} + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["normalized_smart_result"] == expected + + +def test_send_whole_generator(): + """ + Pressing shift+enter on the first line, which is the '(' + should be returning the whole generator expression instead of just the '(' + """ + + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + ( + my_first_var + for my_first_var in range(1, 10) + if my_first_var % 2 == 0 + ) + """ + ) + + expected = textwrap.dedent( + """\ + ( + my_first_var + for my_first_var in range(1, 10) + if my_first_var % 2 == 0 + ) + + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + + assert result["normalized_smart_result"] == expected + + +def test_multiline_lambda(): + """ + Shift+enter on part of the lambda expression + should return the whole lambda expression, + regardless of whether all the component of + lambda expression is on the same or not. + """ + + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + my_lambda = lambda x: ( + x + 1 + ) + """ + ) + expected = textwrap.dedent( + """\ + my_lambda = lambda x: ( + x + 1 + ) + + """ + ) + + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["normalized_smart_result"] == expected + + +def test_send_whole_class(): + """ + Shift+enter on a class definition + should send the whole class definition + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + class Stub(object): + def __init__(self): + self.calls = [] + + def add_call(self, name, args=None, kwargs=None): + self.calls.append((name, args, kwargs)) + print("We should be here after running whole class") + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + expected = textwrap.dedent( + """\ + class Stub(object): + def __init__(self): + self.calls = [] + def add_call(self, name, args=None, kwargs=None): + self.calls.append((name, args, kwargs)) + + """ + ) + assert result["normalized_smart_result"] == expected + + +def test_send_whole_if_statement(): + """ + Shift+enter on an if statement + should send the whole if statement + including statements inside and else. + """ + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + if True: + print('send this') + else: + print('also send this') + + print('cursor here afterwards') + """ + ) + expected = textwrap.dedent( + """\ + if True: + print('send this') + else: + print('also send this') + + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["normalized_smart_result"] == expected + + +def test_send_try(): + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + try: + 1+1 + except: + print("error") + + print("Not running this") + """ + ) + expected = textwrap.dedent( + """\ + try: + 1+1 + except: + print("error") + + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["normalized_smart_result"] == expected + + +# --- Start Positron --- + + +def test_positron_comment(): + # If we call from a comment, use the next top-level node. + importlib.reload(normalizeSelection) + src = textwrap.dedent( + """\ + # a comment + 1 + 2 + """ + ) + expected = textwrap.dedent( + """\ + 1 + """ + ) + result = normalizeSelection.traverse_file(src, 1, 1, False) + assert result["normalized_smart_result"] == expected + + +# --- End Positron --- diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/C/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/C/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/C/test_Spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/C/test_Spam.py new file mode 100644 index 00000000000..3501b9e118e --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/C/test_Spam.py @@ -0,0 +1,3 @@ + +def test_okay(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/A/b/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/NormCase/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/README.md b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/README.md new file mode 100644 index 00000000000..e30e96142d0 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/README.md @@ -0,0 +1,156 @@ +## Directory Structure + +``` +pythonFiles/tests/testing_tools/adapter/.data/ + tests/ # test root + test_doctest.txt + test_pytest.py + test_unittest.py + test_mixed.py + spam.py # note: no "test_" prefix, but contains tests + test_foo.py + test_42.py + test_42-43.py # note the hyphen + testspam.py + v/ + __init__.py + spam.py + test_eggs.py + test_ham.py + test_spam.py + w/ + # no __init__.py + test_spam.py + test_spam_ex.py + x/y/z/ # each with a __init__.py + test_ham.py + a/ + __init__.py + test_spam.py + b/ + __init__.py + test_spam.py +``` + +## Tests (and Suites) + +basic: + +- `./test_foo.py::test_simple` +- `./test_pytest.py::test_simple` +- `./test_pytest.py::TestSpam::test_simple` +- `./test_pytest.py::TestSpam::TestHam::TestEggs::test_simple` +- `./test_pytest.py::TestEggs::test_simple` +- `./test_pytest.py::TestParam::test_simple` +- `./test_mixed.py::test_top_level` +- `./test_mixed.py::MyTests::test_simple` +- `./test_mixed.py::TestMySuite::test_simple` +- `./test_unittest.py::MyTests::test_simple` +- `./test_unittest.py::OtherTests::test_simple` +- `./x/y/z/test_ham.py::test_simple` +- `./x/y/z/a/test_spam.py::test_simple` +- `./x/y/z/b/test_spam.py::test_simple` + +failures: + +- `./test_pytest.py::test_failure` +- `./test_pytest.py::test_runtime_failed` +- `./test_pytest.py::test_raises` + +skipped: + +- `./test_mixed.py::test_skipped` +- `./test_mixed.py::MyTests::test_skipped` +- `./test_pytest.py::test_runtime_skipped` +- `./test_pytest.py::test_skipped` +- `./test_pytest.py::test_maybe_skipped` +- `./test_pytest.py::SpamTests::test_skipped` +- `./test_pytest.py::test_param_13_markers[???]` +- `./test_pytest.py::test_param_13_skipped[*]` +- `./test_unittest.py::MyTests::test_skipped` +- (`./test_unittest.py::MyTests::test_maybe_skipped`) +- (`./test_unittest.py::MyTests::test_maybe_not_skipped`) + +in namespace package: + +- `./w/test_spam.py::test_simple` +- `./w/test_spam_ex.py::test_simple` + +filename oddities: + +- `./test_42.py::test_simple` +- `./test_42-43.py::test_simple` +- (`./testspam.py::test_simple` not discovered by default) +- (`./spam.py::test_simple` not discovered) + +imports discovered: + +- `./v/test_eggs.py::test_simple` +- `./v/test_eggs.py::TestSimple::test_simple` +- `./v/test_ham.py::test_simple` +- `./v/test_ham.py::test_not_hard` +- `./v/test_spam.py::test_simple` +- `./v/test_spam.py::test_simpler` + +subtests: + +- `./test_pytest.py::test_dynamic_*` +- `./test_pytest.py::test_param_01[]` +- `./test_pytest.py::test_param_11[1]` +- `./test_pytest.py::test_param_13[*]` +- `./test_pytest.py::test_param_13_markers[*]` +- `./test_pytest.py::test_param_13_repeat[*]` +- `./test_pytest.py::test_param_13_skipped[*]` +- `./test_pytest.py::test_param_23_13[*]` +- `./test_pytest.py::test_param_23_raises[*]` +- `./test_pytest.py::test_param_33[*]` +- `./test_pytest.py::test_param_33_ids[*]` +- `./test_pytest.py::TestParam::test_param_13[*]` +- `./test_pytest.py::TestParamAll::test_param_13[*]` +- `./test_pytest.py::TestParamAll::test_spam_13[*]` +- `./test_pytest.py::test_fixture_param[*]` +- `./test_pytest.py::test_param_fixture[*]` +- `./test_pytest_param.py::test_param_13[*]` +- `./test_pytest_param.py::TestParamAll::test_param_13[*]` +- `./test_pytest_param.py::TestParamAll::test_spam_13[*]` +- (`./test_unittest.py::MyTests::test_with_subtests`) +- (`./test_unittest.py::MyTests::test_with_nested_subtests`) +- (`./test_unittest.py::MyTests::test_dynamic_*`) + +For more options for pytests's parametrize(), see +https://docs.pytest.org/en/latest/example/parametrize.html#paramexamples. + +using fixtures: + +- `./test_pytest.py::test_fixture` +- `./test_pytest.py::test_fixture_param[*]` +- `./test_pytest.py::test_param_fixture[*]` +- `./test_pytest.py::test_param_mark_fixture[*]` + +other markers: + +- `./test_pytest.py::test_known_failure` +- `./test_pytest.py::test_param_markers[2]` +- `./test_pytest.py::test_warned` +- `./test_pytest.py::test_custom_marker` +- `./test_pytest.py::test_multiple_markers` +- (`./test_unittest.py::MyTests::test_known_failure`) + +others not discovered: + +- (`./test_pytest.py::TestSpam::TestHam::TestEggs::TestNoop1`) +- (`./test_pytest.py::TestSpam::TestNoop2`) +- (`./test_pytest.py::TestNoop3`) +- (`./test_pytest.py::MyTests::test_simple`) +- (`./test_unittest.py::MyTests::TestSub1`) +- (`./test_unittest.py::MyTests::TestSub2`) +- (`./test_unittest.py::NoTests`) + +doctests: + +- `./test_doctest.txt::test_doctest.txt` +- (`./test_doctest.py::test_doctest.py`) +- (`../mod.py::mod`) +- (`../mod.py::mod.square`) +- (`../mod.py::mod.Spam`) +- (`../mod.py::mod.spam.eggs`) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py new file mode 100644 index 00000000000..b8c49550389 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py @@ -0,0 +1,51 @@ +""" + +Examples: + +>>> square(1) +1 +>>> square(2) +4 +>>> square(3) +9 +>>> spam = Spam() +>>> spam.eggs() +42 +""" + + +def square(x): + """ + + Examples: + + >>> square(1) + 1 + >>> square(2) + 4 + >>> square(3) + 9 + """ + return x * x + + +class Spam(object): + """ + + Examples: + + >>> spam = Spam() + >>> spam.eggs() + 42 + """ + + def eggs(self): + """ + + Examples: + + >>> spam = Spam() + >>> spam.eggs() + 42 + """ + return 42 diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/spam.py new file mode 100644 index 00000000000..4c4134d7558 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/spam.py @@ -0,0 +1,3 @@ + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42-43.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42-43.py new file mode 100644 index 00000000000..4c4134d7558 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42-43.py @@ -0,0 +1,3 @@ + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42.py new file mode 100644 index 00000000000..4c4134d7558 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_42.py @@ -0,0 +1,3 @@ + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.py new file mode 100644 index 00000000000..27cccbdb77c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.py @@ -0,0 +1,6 @@ +""" +Doctests: + +>>> 1 == 1 +True +""" diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.txt b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.txt new file mode 100644 index 00000000000..4b51fde5667 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_doctest.txt @@ -0,0 +1,15 @@ + +assignment & lookup: + +>>> x = 3 +>>> x +3 + +deletion: + +>>> del x +>>> x +Traceback (most recent call last): + ... +NameError: name 'x' is not defined + diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_foo.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_foo.py new file mode 100644 index 00000000000..e752106f503 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_foo.py @@ -0,0 +1,4 @@ + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_mixed.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_mixed.py new file mode 100644 index 00000000000..e9c675647f1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_mixed.py @@ -0,0 +1,27 @@ +import pytest +import unittest + + +def test_top_level(): + assert True + + +@pytest.mark.skip +def test_skipped(): + assert False + + +class TestMySuite(object): + + def test_simple(self): + assert True + + +class MyTests(unittest.TestCase): + + def test_simple(self): + assert True + + @pytest.mark.skip + def test_skipped(self): + assert False diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest.py new file mode 100644 index 00000000000..39d3ece9c0b --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest.py @@ -0,0 +1,227 @@ +# ... + +import pytest + + +def test_simple(): + assert True + + +def test_failure(): + assert False + + +def test_runtime_skipped(): + pytest.skip('???') + + +def test_runtime_failed(): + pytest.fail('???') + + +def test_raises(): + raise Exception + + +@pytest.mark.skip +def test_skipped(): + assert False + + +@pytest.mark.skipif(True) +def test_maybe_skipped(): + assert False + + +@pytest.mark.xfail +def test_known_failure(): + assert False + + +@pytest.mark.filterwarnings +def test_warned(): + assert False + + +@pytest.mark.spam +def test_custom_marker(): + assert False + + +@pytest.mark.filterwarnings +@pytest.mark.skip +@pytest.mark.xfail +@pytest.mark.skipif(True) +@pytest.mark.skip +@pytest.mark.spam +def test_multiple_markers(): + assert False + + +for i in range(3): + def func(): + assert True + globals()['test_dynamic_{}'.format(i + 1)] = func +del func + + +class TestSpam(object): + + def test_simple(): + assert True + + @pytest.mark.skip + def test_skipped(self): + assert False + + class TestHam(object): + + class TestEggs(object): + + def test_simple(): + assert True + + class TestNoop1(object): + pass + + class TestNoop2(object): + pass + + +class TestEggs(object): + + def test_simple(): + assert True + + +# legend for parameterized test names: +# "test_param_XY[_XY]*" +# X - # params +# Y - # cases +# [_XY]* - extra decorators + +@pytest.mark.parametrize('', [()]) +def test_param_01(): + assert True + + +@pytest.mark.parametrize('x', [(1,)]) +def test_param_11(x): + assert x == 1 + + +@pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) +def test_param_13(x): + assert x == 1 + + +@pytest.mark.parametrize('x', [(1,), (1,), (1,)]) +def test_param_13_repeat(x): + assert x == 1 + + +@pytest.mark.parametrize('x,y,z', [(1, 1, 1), (3, 4, 5), (0, 0, 0)]) +def test_param_33(x, y, z): + assert x*x + y*y == z*z + + +@pytest.mark.parametrize('x,y,z', [(1, 1, 1), (3, 4, 5), (0, 0, 0)], + ids=['v1', 'v2', 'v3']) +def test_param_33_ids(x, y, z): + assert x*x + y*y == z*z + + +@pytest.mark.parametrize('z', [(1,), (5,), (0,)]) +@pytest.mark.parametrize('x,y', [(1, 1), (3, 4), (0, 0)]) +def test_param_23_13(x, y, z): + assert x*x + y*y == z*z + + +@pytest.mark.parametrize('x', [ + (1,), + pytest.param(1.0, marks=[pytest.mark.skip, pytest.mark.spam], id='???'), + pytest.param(2, marks=[pytest.mark.xfail]), + ]) +def test_param_13_markers(x): + assert x == 1 + + +@pytest.mark.skip +@pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) +def test_param_13_skipped(x): + assert x == 1 + + +@pytest.mark.parametrize('x,catch', [(1, None), (1.0, None), (2, pytest.raises(Exception))]) +def test_param_23_raises(x, catch): + if x != 1: + with catch: + raise Exception + + +class TestParam(object): + + def test_simple(): + assert True + + @pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) + def test_param_13(self, x): + assert x == 1 + + +@pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) +class TestParamAll(object): + + def test_param_13(self, x): + assert x == 1 + + def test_spam_13(self, x): + assert x == 1 + + +@pytest.fixture +def spamfix(request): + yield 'spam' + + +@pytest.fixture(params=['spam', 'eggs']) +def paramfix(request): + return request.param + + +def test_fixture(spamfix): + assert spamfix == 'spam' + + +@pytest.mark.usefixtures('spamfix') +def test_mark_fixture(): + assert True + + +@pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) +def test_param_fixture(spamfix, x): + assert spamfix == 'spam' + assert x == 1 + + +@pytest.mark.parametrize('x', [ + (1,), + (1.0,), + pytest.param(1+0j, marks=[pytest.mark.usefixtures('spamfix')]), + ]) +def test_param_mark_fixture(x): + assert x == 1 + + +def test_fixture_param(paramfix): + assert paramfix == 'spam' + + +class TestNoop3(object): + pass + + +class MyTests(object): # does not match default name pattern + + def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest_param.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest_param.py new file mode 100644 index 00000000000..bd22d89f42b --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_pytest_param.py @@ -0,0 +1,18 @@ +import pytest + + +# module-level parameterization +pytestmark = pytest.mark.parametrize('x', [(1,), (1.0,), (1+0j,)]) + + +def test_param_13(x): + assert x == 1 + + +class TestParamAll(object): + + def test_param_13(self, x): + assert x == 1 + + def test_spam_13(self, x): + assert x == 1 diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_unittest.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_unittest.py new file mode 100644 index 00000000000..dd3e8253573 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/test_unittest.py @@ -0,0 +1,66 @@ +import unittest + + +class MyTests(unittest.TestCase): + + def test_simple(self): + self.assertTrue(True) + + @unittest.skip('???') + def test_skipped(self): + self.assertTrue(False) + + @unittest.skipIf(True, '???') + def test_maybe_skipped(self): + self.assertTrue(False) + + @unittest.skipUnless(False, '???') + def test_maybe_not_skipped(self): + self.assertTrue(False) + + def test_skipped_inside(self): + raise unittest.SkipTest('???') + + class TestSub1(object): + + def test_simple(self): + self.assertTrue(True) + + class TestSub2(unittest.TestCase): + + def test_simple(self): + self.assertTrue(True) + + def test_failure(self): + raise Exception + + @unittest.expectedFailure + def test_known_failure(self): + raise Exception + + def test_with_subtests(self): + for i in range(3): + with self.subtest(i): # This is invalid under Py2. + self.assertTrue(True) + + def test_with_nested_subtests(self): + for i in range(3): + with self.subtest(i): # This is invalid under Py2. + for j in range(3): + with self.subtest(i): # This is invalid under Py2. + self.assertTrue(True) + + for i in range(3): + def test_dynamic_(self, i=i): + self.assertEqual(True) + test_dynamic_.__name__ += str(i) + + +class OtherTests(unittest.TestCase): + + def test_simple(self): + self.assertTrue(True) + + +class NoTests(unittest.TestCase): + pass diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/testspam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/testspam.py new file mode 100644 index 00000000000..7ec91c783e2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/testspam.py @@ -0,0 +1,9 @@ +''' +... +... +... +''' + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/spam.py new file mode 100644 index 00000000000..18c92c09306 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/spam.py @@ -0,0 +1,9 @@ + +def test_simple(self): + assert True + + +class TestSimple(object): + + def test_simple(self): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_eggs.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_eggs.py new file mode 100644 index 00000000000..f3e7d951763 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_eggs.py @@ -0,0 +1 @@ +from .spam import * diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_ham.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_ham.py new file mode 100644 index 00000000000..6b6a01f87ec --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_ham.py @@ -0,0 +1,2 @@ +from .spam import test_simple +from .spam import test_simple as test_not_hard diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_spam.py new file mode 100644 index 00000000000..18cf56f9053 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/v/test_spam.py @@ -0,0 +1,5 @@ +from .spam import test_simple + + +def test_simpler(self): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam.py new file mode 100644 index 00000000000..6a0b60d1d5b --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam.py @@ -0,0 +1,5 @@ + + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam_ex.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam_ex.py new file mode 100644 index 00000000000..6a0b60d1d5b --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/w/test_spam_ex.py @@ -0,0 +1,5 @@ + + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/a/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/a/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/a/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/a/test_spam.py new file mode 100644 index 00000000000..bdb7e4fec3a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/a/test_spam.py @@ -0,0 +1,12 @@ +""" +... +""" + + +# ... + +ANSWER = 42 + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/b/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/b/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/b/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/b/test_spam.py new file mode 100644 index 00000000000..4923c556c29 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/b/test_spam.py @@ -0,0 +1,8 @@ + + +# ?!? +CHORUS = 'spamspamspamspamspam...' + + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/test_ham.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/test_ham.py new file mode 100644 index 00000000000..4c4134d7558 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/complex/tests/x/y/z/test_ham.py @@ -0,0 +1,3 @@ + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/notests/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/notests/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/simple/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/simple/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/simple/tests/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/simple/tests/test_spam.py new file mode 100644 index 00000000000..4c4134d7558 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/simple/tests/test_spam.py @@ -0,0 +1,3 @@ + +def test_simple(): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/syntax-error/tests/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/syntax-error/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/syntax-error/tests/test_spam.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/syntax-error/tests/test_spam.py new file mode 100644 index 00000000000..54d6400a346 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/.data/syntax-error/tests/test_spam.py @@ -0,0 +1,7 @@ + +def test_simple(): + assert True + + +# A syntax error: +: diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_cli.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_cli.py new file mode 100644 index 00000000000..6f590a31fa5 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_cli.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +from ....util import Stub, StubProxy +from testing_tools.adapter.errors import UnsupportedCommandError +from testing_tools.adapter.pytest._cli import add_subparser + + +class StubSubparsers(StubProxy): + def __init__(self, stub=None, name="subparsers"): + super(StubSubparsers, self).__init__(stub, name) + + def add_parser(self, name): + self.add_call("add_parser", None, {"name": name}) + return self.return_add_parser + + +class StubArgParser(StubProxy): + def __init__(self, stub=None): + super(StubArgParser, self).__init__(stub, "argparser") + + def add_argument(self, *args, **kwargs): + self.add_call("add_argument", args, kwargs) + + +class AddCLISubparserTests(unittest.TestCase): + def test_discover(self): + stub = Stub() + subparsers = StubSubparsers(stub) + parser = StubArgParser(stub) + subparsers.return_add_parser = parser + + add_subparser("discover", "pytest", subparsers) + + self.assertEqual( + stub.calls, + [ + ("subparsers.add_parser", None, {"name": "pytest"}), + ], + ) + + def test_unsupported_command(self): + subparsers = StubSubparsers(name=None) + subparsers.return_add_parser = None + + with self.assertRaises(UnsupportedCommandError): + add_subparser("run", "pytest", subparsers) + with self.assertRaises(UnsupportedCommandError): + add_subparser("debug", "pytest", subparsers) + with self.assertRaises(UnsupportedCommandError): + add_subparser("???", "pytest", subparsers) + self.assertEqual( + subparsers.calls, + [ + ("add_parser", None, {"name": "pytest"}), + ("add_parser", None, {"name": "pytest"}), + ("add_parser", None, {"name": "pytest"}), + ], + ) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py new file mode 100644 index 00000000000..83eeaa1f906 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py @@ -0,0 +1,1645 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import print_function, unicode_literals + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # type: ignore (for Pylance) + +import os +import sys +import tempfile +import unittest + +import _pytest.doctest +import pytest +from testing_tools.adapter import info +from testing_tools.adapter import util as adapter_util +from testing_tools.adapter.pytest import _discovery +from testing_tools.adapter.pytest import _pytest_item as pytest_item + +from .... import util + + +def unique(collection, key): + result = [] + keys = [] + for item in collection: + k = key(item) + if k in keys: + continue + result.append(item) + keys.append(k) + return result + + +class StubPyTest(util.StubProxy): + def __init__(self, stub=None): + super(StubPyTest, self).__init__(stub, "pytest") + self.return_main = 0 + + def main(self, args, plugins): + self.add_call("main", None, {"args": args, "plugins": plugins}) + return self.return_main + + +class StubPlugin(util.StubProxy): + _started = True + + def __init__(self, stub=None, tests=None): + super(StubPlugin, self).__init__(stub, "plugin") + if tests is None: + tests = StubDiscoveredTests(self.stub) + self._tests = tests + + def __getattr__(self, name): + if not name.startswith("pytest_"): + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubDiscoveredTests(util.StubProxy): + NOT_FOUND = object() + + def __init__(self, stub=None): + super(StubDiscoveredTests, self).__init__(stub, "discovered") + self.return_items = [] + self.return_parents = [] + + def __len__(self): + self.add_call("__len__", None, None) + return len(self.return_items) + + def __getitem__(self, index): + self.add_call("__getitem__", (index,), None) + return self.return_items[index] + + @property + def parents(self): + self.add_call("parents", None, None) + return self.return_parents + + def reset(self): + self.add_call("reset", None, None) + + def add_test(self, test, parents): + self.add_call("add_test", None, {"test": test, "parents": parents}) + + +class FakeFunc(object): + def __init__(self, name): + self.__name__ = name + + +class FakeMarker(object): + def __init__(self, name): + self.name = name + + +class StubPytestItem(util.StubProxy): + _debugging = False + _hasfunc = True + + def __init__(self, stub=None, **attrs): + super(StubPytestItem, self).__init__(stub, "pytest.Item") + if attrs.get("function") is None: + attrs.pop("function", None) + self._hasfunc = False + + attrs.setdefault("user_properties", []) + + slots = getattr(type(self), "__slots__", None) + if slots: + for name, value in attrs.items(): + if name in self.__slots__: + setattr(self, name, value) + else: + self.__dict__[name] = value + else: + self.__dict__.update(attrs) + + if "own_markers" not in attrs: + self.own_markers = () + + def __repr__(self): + return object.__repr__(self) + + def __getattr__(self, name): + if not self._debugging: + self.add_call(name + " (attr)", None, None) + if name == "function": + if not self._hasfunc: + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubSubtypedItem(StubPytestItem): + @classmethod + def from_args(cls, *args, **kwargs): + if not hasattr(cls, "from_parent"): + return cls(*args, **kwargs) + self = cls.from_parent(None, name=kwargs["name"], runner=None, dtest=None) + self.__init__(*args, **kwargs) + return self + + def __init__(self, *args, **kwargs): + super(StubSubtypedItem, self).__init__(*args, **kwargs) + if "nodeid" in self.__dict__: + self._nodeid = self.__dict__.pop("nodeid") + + @property + def location(self): + return self.__dict__.get("location") + + +class StubFunctionItem(StubSubtypedItem, pytest.Function): + @property + def function(self): + return self.__dict__.get("function") + + +def create_stub_function_item(*args, **kwargs): + return StubFunctionItem.from_args(*args, **kwargs) + + +class StubDoctestItem(StubSubtypedItem, _pytest.doctest.DoctestItem): + pass + + +def create_stub_doctest_item(*args, **kwargs): + return StubDoctestItem.from_args(*args, **kwargs) + + +class StubPytestSession(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestSession, self).__init__(stub, "pytest.Session") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubPytestConfig(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestConfig, self).__init__(stub, "pytest.Config") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +def generate_parse_item(pathsep): + if pathsep == "\\": + + def normcase(path): + path = path.lower() + return path.replace("/", "\\") + + else: + raise NotImplementedError + + ########## + def _fix_fileid(*args): + return adapter_util.fix_fileid( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _normalize_test_id(*args): + return pytest_item._normalize_test_id( + *args, + **dict( + # dependency injection + _fix_fileid=_fix_fileid, + _pathsep=pathsep, + ) + ) + + def _iter_nodes(*args): + return pytest_item._iter_nodes( + *args, + **dict( + # dependency injection + _normalize_test_id=_normalize_test_id, + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _parse_node_id(*args): + return pytest_item._parse_node_id( + *args, + **dict( + # dependency injection + _iter_nodes=_iter_nodes, + ) + ) + + ########## + def _split_fspath(*args): + return pytest_item._split_fspath( + *args, + **dict( + # dependency injection + _normcase=normcase, + ) + ) + + ########## + def _matches_relfile(*args): + return pytest_item._matches_relfile( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _is_legacy_wrapper(*args): + return pytest_item._is_legacy_wrapper( + *args, + **dict( + # dependency injection + _pathsep=pathsep, + ) + ) + + def _get_location(*args): + return pytest_item._get_location( + *args, + **dict( + # dependency injection + _matches_relfile=_matches_relfile, + _is_legacy_wrapper=_is_legacy_wrapper, + _pathsep=pathsep, + ) + ) + + ########## + def _parse_item(item): + return pytest_item.parse_item( + item, + **dict( + # dependency injection + _parse_node_id=_parse_node_id, + _split_fspath=_split_fspath, + _get_location=_get_location, + ) + ) + + return _parse_item + + +################################## +# tests + + +def fake_pytest_main(stub, use_fd, pytest_stdout): + def ret(args, plugins): + stub.add_call("pytest.main", None, {"args": args, "plugins": plugins}) + if use_fd: + os.write(sys.stdout.fileno(), pytest_stdout.encode()) + else: + print(pytest_stdout, end="") + return 0 + + return ret + + +class DiscoverTests(unittest.TestCase): + DEFAULT_ARGS = [ + "--collect-only", + ] + + def test_basic(self): + stub = util.Stub() + stubpytest = StubPyTest(stub) + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + parents, tests = _discovery.discover( + [], _pytest_main=stubpytest.main, _plugin=plugin + ) + + actual_calls = unique(stub.calls, lambda k: k[0]) + expected_calls = unique(calls, lambda k: k[0]) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(actual_calls, expected_calls) + + def test_failure(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 2 + plugin = StubPlugin(stub) + + with self.assertRaises(Exception): + _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin) + + self.assertEqual( + stub.calls, + [ + # There's only one call. + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ], + ) + + def test_no_tests_found(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 5 + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + parents, tests = _discovery.discover( + [], _pytest_main=pytest.main, _plugin=plugin + ) + + actual_calls = unique(stub.calls, lambda k: k[0]) + expected_calls = unique(calls, lambda k: k[0]) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(actual_calls, expected_calls) + + def test_found_with_collection_error(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 1 + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + parents, tests = _discovery.discover( + [], _pytest_main=pytest.main, _plugin=plugin + ) + + actual_calls = unique(stub.calls, lambda k: k[0]) + expected_calls = unique(calls, lambda k: k[0]) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(actual_calls, expected_calls) + + def test_stdio_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # to simulate stdio behavior in methods like os.dup, + # use actual files (rather than StringIO) + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + + mock.seek(0) + captured = mock.read() + + actual_calls = unique(stub.calls, lambda k: k[0]) + expected_calls = unique(calls, lambda k: k[0]) + + self.assertEqual(captured, "") + self.assertEqual(actual_calls, expected_calls) + + def test_stdio_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # Replace with contextlib.redirect_stdout() once Python 2.7 support is dropped. + sys.stdout = StringIO() + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + captured = sys.stdout.read() + self.assertEqual(captured, "") + finally: + sys.stdout = sys.__stdout__ + + def test_stdio_not_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + buf = StringIO() + + sys.stdout = buf + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + captured = buf.getvalue() + + actual_calls = unique(stub.calls, lambda k: k[0]) + expected_calls = unique(calls, lambda k: k[0]) + + self.assertEqual(captured, pytest_stdout) + self.assertEqual(actual_calls, expected_calls) + + def test_stdio_not_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + stub.calls = [] + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + finally: + mock.seek(0) + captured = sys.stdout.read() + sys.stdout = sys.__stdout__ + self.assertEqual(captured, pytest_stdout) + + +class CollectorTests(unittest.TestCase): + def test_modifyitems(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + config = StubPytestConfig(stub) + collector = _discovery.TestCollector(tests=discovered) + + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile1 = adapter_util.fix_path("./test_spam.py") + relfile2 = adapter_util.fix_path("x/y/z/test_eggs.py") + + collector.pytest_collection_modifyitems( + session, + config, + [ + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_one", + name="test_one", + originalname=None, + location=("test_spam.py", 12, "SpamTests.test_one"), + path=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_one"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_other", + name="test_other", + originalname=None, + location=("test_spam.py", 19, "SpamTests.test_other"), + path=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_other"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_all", + name="test_all", + originalname=None, + location=("test_spam.py", 144, "test_all"), + path=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_all"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + originalname="test_each", + location=("test_spam.py", 273, "test_each[10-10]"), + path=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_each"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_first", + name="test_first", + originalname=None, + location=(relfile2, 31, "All.BasicTests.test_first"), + path=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_first"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_each[1+2-3]", + name="test_each[1+2-3]", + originalname="test_each", + location=(relfile2, 62, "All.BasicTests.test_each[1+2-3]"), + path=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_each"), + own_markers=[ + FakeMarker(v) + for v in [ + # supported + "skip", + "skipif", + "xfail", + # duplicate + "skip", + # ignored (pytest-supported) + "parameterize", + "usefixtures", + "filterwarnings", + # ignored (custom) + "timeout", + ] + ], + ), + ], + ) + + self.maxDiff = None + expected = [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_one", + name="test_one", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_one", + sub=None, + ), + source="{}:{}".format(relfile1, 13), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_other", + name="test_other", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_other", + sub=None, + ), + source="{}:{}".format(relfile1, 20), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_all", + name="test_all", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_all", + sub=None, + ), + source="{}:{}".format(relfile1, 145), + markers=None, + parentid="./test_spam.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::test_each", "test_each", "function"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_each[10-10]", + name="10-10", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile1, 274), + markers=None, + parentid="./test_spam.py::test_each", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_first", + name="test_first", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(relfile2), 32), + markers=None, + parentid="./x/y/z/test_eggs.py::All::BasicTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests::test_each", + "test_each", + "function", + ), + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_each[1+2-3]", + name="1+2-3", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_each", + sub=["[1+2-3]"], + ), + source="{}:{}".format(adapter_util.fix_relpath(relfile2), 63), + markers=["expected-failure", "skip", "skip-if"], + parentid="./x/y/z/test_eggs.py::All::BasicTests::test_each", + ), + ), + ), + ] + self.assertEqual(stub.calls, expected) + + def test_finish(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + originalname=None, + location=(relfile, 12, "SpamTests.test_spam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_doctest(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + doctestfile = adapter_util.fix_path("x/test_doctest.txt") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_doctest_item( + stub, + nodeid=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + location=(doctestfile, 0, "[doctest] test_doctest.txt"), + path=adapter_util.PATH_JOIN(testroot, doctestfile), + ), + # With --doctest-modules + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs", + name="test_eggs", + location=(relfile, 0, "[doctest] test_eggs"), + path=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + location=(relfile, 12, "[doctest] test_eggs.TestSpam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + location=(relfile, 27, "[doctest] test_eggs.TestSpam.TestEggs"), + path=adapter_util.PATH_JOIN(testroot, relfile), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/test_doctest.txt", "test_doctest.txt", "file"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/test_doctest.txt::test_doctest.txt", + name="test_doctest.txt", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(doctestfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(doctestfile), 1 + ), + markers=[], + parentid="./x/test_doctest.txt", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs", + name="test_eggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(relfile), 1), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 28 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) + + def test_nested_brackets(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam[a-[b]-c]", + name="test_spam[a-[b]-c]", + originalname="test_spam", + location=(relfile, 12, "SpamTests.test_spam[a-[b]-c]"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::test_spam", + "test_spam", + "function", + ), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam[a-[b]-c]", + name="a-[b]-c", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=["[a-[b]-c]"], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::test_spam", + ), + ), + ), + ], + ) + + def test_nested_suite(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + originalname=None, + location=(relfile, 12, "SpamTests.Ham.Eggs.test_spam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + "Eggs", + "suite", + ), + ("./x/y/z/test_eggs.py::SpamTests::Ham", "Ham", "suite"), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.Ham.Eggs.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + ), + ), + ), + ], + ) + + @pytest.mark.skipif(sys.platform != "win32", reason="Windows specific test.") + def test_windows(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = r"C:\A\B\C" + relfile = r"X\Y\Z\test_Eggs.py" + session.items = [ + # typical: + create_stub_function_item( + stub, + # pytest always uses "/" as the path separator in node IDs: + nodeid="X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + originalname=None, + # normal path separator (contrast with nodeid): + location=(relfile, 12, "SpamTests.test_spam"), + # path separator matches location: + path=testroot + "\\" + relfile, + function=FakeFunc("test_spam"), + ), + ] + tests = [ + # permutations of path separators + (r"X/test_a.py", "\\", "\\"), # typical + (r"X/test_b.py", "\\", "/"), + (r"X/test_c.py", "/", "\\"), + (r"X/test_d.py", "/", "/"), + (r"X\test_e.py", "\\", "\\"), + (r"X\test_f.py", "\\", "/"), + (r"X\test_g.py", "/", "\\"), + (r"X\test_h.py", "/", "/"), + ] + for fileid, locfile, fspath in tests: + if locfile == "/": + locfile = fileid.replace("\\", "/") + elif locfile == "\\": + locfile = fileid.replace("/", "\\") + if fspath == "/": + fspath = (testroot + "/" + fileid).replace("\\", "/") + elif fspath == "\\": + fspath = (testroot + "/" + fileid).replace("/", "\\") + session.items.append( + create_stub_function_item( + stub, + nodeid=fileid + "::test_spam", + name="test_spam", + originalname=None, + location=(locfile, 12, "test_spam"), + path=fspath, + function=FakeFunc("test_spam"), + ) + ) + collector = _discovery.TestCollector(tests=discovered) + if os.name != "nt": + collector.parse_item = generate_parse_item("\\") + + collector.pytest_collection_finish(session) + + self.maxDiff = None + expected = [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/Y/Z/test_Eggs.py::SpamTests", "SpamTests", "suite"), + (r"./X/Y/Z/test_Eggs.py", "test_Eggs.py", "file"), + (r"./X/Y/Z", "Z", "folder"), + (r"./X/Y", "Y", "folder"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, # not normalized + relfile=r".\X\Y\Z\test_Eggs.py", # not normalized + func="SpamTests.test_spam", + sub=None, + ), + source=r".\X\Y\Z\test_Eggs.py:13", # not normalized + markers=None, + parentid=r"./X/Y/Z/test_Eggs.py::SpamTests", + ), + ), + ), + # permutations + # (*all* the IDs use "/") + # (source path separator should match relfile, not location) + # /, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_a.py", "test_a.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_a.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_a.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_a.py:13", + markers=None, + parentid=r"./X/test_a.py", + ), + ), + ), + # /, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_b.py", "test_b.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_b.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_b.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_b.py:13", + markers=None, + parentid=r"./X/test_b.py", + ), + ), + ), + # /, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_c.py", "test_c.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_c.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_c.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_c.py:13", + markers=None, + parentid=r"./X/test_c.py", + ), + ), + ), + # /, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_d.py", "test_d.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_d.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_d.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_d.py:13", + markers=None, + parentid=r"./X/test_d.py", + ), + ), + ), + # \, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_e.py", "test_e.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_e.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_e.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_e.py:13", + markers=None, + parentid=r"./X/test_e.py", + ), + ), + ), + # \, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_f.py", "test_f.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_f.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_f.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_f.py:13", + markers=None, + parentid=r"./X/test_f.py", + ), + ), + ), + # \, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_g.py", "test_g.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_g.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_g.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_g.py:13", + markers=None, + parentid=r"./X/test_g.py", + ), + ), + ), + # \, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_h.py", "test_h.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_h.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_h.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_h.py:13", + markers=None, + parentid=r"./X/test_h.py", + ), + ), + ), + ] + self.assertEqual(stub.calls, expected) + + def test_mysterious_parens(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::()::()::test_spam", + name="test_spam", + originalname=None, + location=(relfile, 12, "SpamTests.test_spam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=[], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_mysterious_colons(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests:::()::test_spam", + name="test_spam", + originalname=None, + location=(relfile, 12, "SpamTests.test_spam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=[], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_imported_test(self): + # pytest will even discover tests that were imported from + # another module! + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.ABS_PATH(adapter_util.fix_path("/a/b/c")) + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + srcfile = adapter_util.fix_path("x/y/z/_extern.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + originalname=None, + location=(srcfile, 12, "SpamTests.test_spam"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + create_stub_function_item( + stub, + nodeid=relfile + "::test_ham", + name="test_ham", + originalname=None, + location=(srcfile, 3, "test_ham"), + path=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(srcfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_ham", + name="test_ham", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="test_ham", + sub=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(srcfile), 4), + markers=None, + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test___main__.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test___main__.py new file mode 100644 index 00000000000..5ff0ec30c94 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test___main__.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +from testing_tools.adapter.__main__ import ( + UnsupportedCommandError, + UnsupportedToolError, + main, + parse_args, +) + +from ...util import Stub, StubProxy + + +class StubTool(StubProxy): + def __init__(self, name, stub=None): + super(StubTool, self).__init__(stub, name) + self.return_discover = None + + def discover(self, args, **kwargs): + self.add_call("discover", (args,), kwargs) + if self.return_discover is None: + raise NotImplementedError + return self.return_discover + + +class StubReporter(StubProxy): + def __init__(self, stub=None): + super(StubReporter, self).__init__(stub, "reporter") + + def report(self, tests, parents, **kwargs): + self.add_call("report", (tests, parents), kwargs or None) + + +################################## +# tests + + +class ParseGeneralTests(unittest.TestCase): + def test_unsupported_command(self): + with self.assertRaises(SystemExit): + parse_args(["run", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["debug", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["???", "pytest"]) + + +class ParseDiscoverTests(unittest.TestCase): + def test_pytest_default(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual(toolargs, []) + + def test_pytest_full(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + # no adapter-specific options yet + "--", + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual( + toolargs, + [ + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ], + ) + + def test_pytest_opts(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + "--simple", + "--no-hide-stdio", + "--pretty", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": True, "hidestdio": False, "simple": True}) + self.assertEqual(toolargs, []) + + def test_unsupported_tool(self): + with self.assertRaises(SystemExit): + parse_args(["discover", "unittest"]) + with self.assertRaises(SystemExit): + parse_args(["discover", "???"]) + + +class MainTests(unittest.TestCase): + # TODO: We could use an integration test for pytest.discover(). + + def test_discover(self): + stub = Stub() + tool = StubTool("spamspamspam", stub) + tests, parents = object(), object() + tool.return_discover = (parents, tests) + reporter = StubReporter(stub) + main( + tool.name, + "discover", + {"spam": "eggs"}, + [], + _tools={ + tool.name: { + "discover": tool.discover, + } + }, + _reporters={ + "discover": reporter.report, + }, + ) + + self.assertEqual( + tool.calls, + [ + ("spamspamspam.discover", ([],), {"spam": "eggs"}), + ("reporter.report", (tests, parents), {"spam": "eggs"}), + ], + ) + + def test_unsupported_tool(self): + with self.assertRaises(UnsupportedToolError): + main( + "unittest", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + with self.assertRaises(UnsupportedToolError): + main( + "???", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + + def test_unsupported_command(self): + tool = StubTool("pytest") + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "run", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "debug", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "???", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + self.assertEqual(tool.calls, []) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_discovery.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_discovery.py new file mode 100644 index 00000000000..cf3b8fb3139 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_discovery.py @@ -0,0 +1,674 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import unittest + +from testing_tools.adapter.discovery import DiscoveredTests, fix_nodeid +from testing_tools.adapter.info import ParentInfo, SingleTestInfo, SingleTestPath +from testing_tools.adapter.util import fix_path, fix_relpath + + +def _fix_nodeid(nodeid): + nodeid = nodeid.replace("\\", "/") + if not nodeid.startswith("./"): + nodeid = "./" + nodeid + return nodeid + + +class DiscoveredTestsTests(unittest.TestCase): + def test_list(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_spam.py") + tests = [ + SingleTestInfo( + # missing "./": + id="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./": + parentid="test_spam.py::test_each", + ), + SingleTestInfo( + id="test_spam.py::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 62), + markers=None, + parentid="test_spam.py::All::BasicTests", + ), + ] + allparents = [ + [ + (fix_path("./test_spam.py::test_each"), "test_each", "function"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + [ + (fix_path("./test_spam.py::All::BasicTests"), "BasicTests", "suite"), + (fix_path("./test_spam.py::All"), "All", "suite"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in tests + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + size = len(discovered) + items = [discovered[0], discovered[1]] + snapshot = list(discovered) + + self.maxDiff = None + self.assertEqual(size, 2) + self.assertEqual(items, expected) + self.assertEqual(snapshot, expected) + + def test_reset(self): + testroot = fix_path("/a/b/c") + discovered = DiscoveredTests() + discovered.add_test( + SingleTestInfo( + id="./test_spam.py::test_each", + name="test_each", + path=SingleTestPath( + root=testroot, + relfile="test_spam.py", + func="test_each", + ), + source="test_spam.py:11", + markers=[], + parentid="./test_spam.py", + ), + [ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ) + + before = len(discovered), len(discovered.parents) + discovered.reset() + after = len(discovered), len(discovered.parents) + + self.assertEqual(before, (1, 2)) + self.assertEqual(after, (0, 0)) + + def test_parents(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("x/y/z/test_spam.py") + tests = [ + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::test_each", + ), + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 61), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::All::BasicTests", + ), + ] + allparents = [ + # missing "./", using pathsep: + [ + (relfile + "::test_each", "test_each", "function"), + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + # missing "./", using pathsep: + [ + (relfile + "::All::BasicTests", "BasicTests", "suite"), + (relfile + "::All", "All", "suite"), + (relfile, "test_spam.py", "file"), + (fix_path("x/y/z"), "z", "folder"), + (fix_path("x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_spam.py", + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All", + kind="suite", + name="All", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All::BasicTests", + kind="suite", + name="BasicTests", + root=testroot, + parentid="./x/y/z/test_spam.py::All", + ), + ParentInfo( + id="./x/y/z/test_spam.py::test_each", + kind="function", + name="test_each", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ], + ) + + def test_add_test_simple(self): + testroot = fix_path("/a/b/c") + relfile = "test_spam.py" + test = SingleTestInfo( + # missing "./": + id=relfile + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + # missing "./": + relfile=relfile, + func="test_spam", + ), + # missing "./": + source="{}:{}".format(relfile, 11), + markers=[], + # missing "./": + parentid=relfile, + ) + expected = test._replace( + id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid) + ) + discovered = DiscoveredTests() + + before = list(discovered), discovered.parents + discovered.add_test( + test, + [ + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + ) + after = list(discovered), discovered.parents + + self.maxDiff = None + self.assertEqual(before, ([], [])) + self.assertEqual( + after, + ( + [expected], + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name=relfile, + root=testroot, + relpath=relfile, + parentid=".", + ), + ], + ), + ) + + def test_multiroot(self): + # the first root + testroot1 = fix_path("/a/b/c") + relfile1 = "test_spam.py" + alltests = [ + SingleTestInfo( + # missing "./": + id=relfile1 + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + # missing "./": + parentid=relfile1, + ), + ] + allparents = [ + # missing "./": + [ + (relfile1, "test_spam.py", "file"), + (".", testroot1, "folder"), + ], + ] + # the second root + testroot2 = fix_path("/x/y/z") + relfile2 = fix_path("w/test_eggs.py") + alltests.extend( + [ + SingleTestInfo( + id=relfile2 + "::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid=relfile2 + "::BasicTests", + ), + ] + ) + allparents.extend( + [ + # missing "./", using pathsep: + [ + (relfile2 + "::BasicTests", "BasicTests", "suite"), + (relfile2, "test_eggs.py", "file"), + (fix_path("./w"), "w", "folder"), + (".", testroot2, "folder"), + ], + ] + ) + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + tests, + [ + # the first root + SingleTestInfo( + id="./test_spam.py::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + parentid="./test_spam.py", + ), + # the secondroot + SingleTestInfo( + id="./w/test_eggs.py::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid="./w/test_eggs.py::BasicTests", + ), + ], + ) + self.assertEqual( + parents, + [ + # the first root + ParentInfo( + id=".", + kind="folder", + name=testroot1, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name="test_spam.py", + root=testroot1, + relpath=fix_relpath(relfile1), + parentid=".", + ), + # the secondroot + ParentInfo( + id=".", + kind="folder", + name=testroot2, + ), + ParentInfo( + id="./w", + kind="folder", + name="w", + root=testroot2, + relpath=fix_path("./w"), + parentid=".", + ), + ParentInfo( + id="./w/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot2, + relpath=fix_relpath(relfile2), + parentid="./w", + ), + ParentInfo( + id="./w/test_eggs.py::BasicTests", + kind="suite", + name="BasicTests", + root=testroot2, + parentid="./w/test_eggs.py", + ), + ], + ) + + def test_doctest(self): + testroot = fix_path("/a/b/c") + doctestfile = fix_path("./x/test_doctest.txt") + relfile = fix_path("./x/y/z/test_eggs.py") + alltests = [ + SingleTestInfo( + id=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + path=SingleTestPath( + root=testroot, + relfile=doctestfile, + func=None, + ), + source="{}:{}".format(doctestfile, 0), + markers=[], + parentid=doctestfile, + ), + # With --doctest-modules + SingleTestInfo( + id=relfile + "::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 0), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 12), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 27), + markers=[], + parentid=relfile, + ), + ] + allparents = [ + [ + (doctestfile, "test_doctest.txt", "file"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/test_doctest.txt", + kind="file", + name="test_doctest.txt", + root=testroot, + relpath=fix_path(doctestfile), + parentid="./x", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ], + ) + + def test_nested_suite_simple(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_eggs.py") + alltests = [ + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_spam", + ), + source="{}:{}".format(relfile, 10), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_eggs", + ), + source="{}:{}".format(relfile, 21), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + ] + allparents = [ + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid=".", + ), + ParentInfo( + id="./test_eggs.py::TestOuter", + kind="suite", + name="TestOuter", + root=testroot, + parentid="./test_eggs.py", + ), + ParentInfo( + id="./test_eggs.py::TestOuter::TestInner", + kind="suite", + name="TestInner", + root=testroot, + parentid="./test_eggs.py::TestOuter", + ), + ], + ) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_functional.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_functional.py new file mode 100644 index 00000000000..a78d36a5fdc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_functional.py @@ -0,0 +1,1536 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, unicode_literals + +import json +import os +import os.path +import subprocess +import sys +import unittest + +from testing_tools.adapter.util import PATH_SEP, fix_path + +from ...__main__ import TESTING_TOOLS_ROOT + +# Pytest 3.7 and later uses pathlib/pathlib2 for path resolution. +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path # type: ignore (for Pylance) + + +CWD = os.getcwd() +DATA_DIR = os.path.join(os.path.dirname(__file__), ".data") +SCRIPT = os.path.join(TESTING_TOOLS_ROOT, "run_adapter.py") + + +def resolve_testroot(name): + projroot = os.path.join(DATA_DIR, name) + testroot = os.path.join(projroot, "tests") + return str(Path(projroot).resolve()), str(Path(testroot).resolve()) + + +def run_adapter(cmd, tool, *cliargs): + try: + return _run_adapter(cmd, tool, *cliargs) + except subprocess.CalledProcessError as exc: + print(exc.output) + + +def _run_adapter(cmd, tool, *cliargs, **kwargs): + hidestdio = kwargs.pop("hidestdio", True) + assert not kwargs or tuple(kwargs) == ("stderr",) + kwds = kwargs + argv = [sys.executable, SCRIPT, cmd, tool, "--"] + list(cliargs) + if not hidestdio: + argv.insert(4, "--no-hide-stdio") + kwds["stderr"] = subprocess.STDOUT + argv.append("--cache-clear") + print( + "running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv)) + ) + output = subprocess.check_output(argv, universal_newlines=True, **kwds) + return output + + +def fix_test_order(tests): + if sys.version_info >= (3, 6): + return tests + fixed = [] + curfile = None + group = [] + for test in tests: + if (curfile or "???") not in test["id"]: + fixed.extend(sorted(group, key=lambda t: t["id"])) + group = [] + curfile = test["id"].partition(".py::")[0] + ".py" + group.append(test) + fixed.extend(sorted(group, key=lambda t: t["id"])) + return fixed + + +def fix_source(tests, testid, srcfile, lineno): + for test in tests: + if test["id"] == testid: + break + else: + raise KeyError("test {!r} not found".format(testid)) + if not srcfile: + srcfile = test["source"].rpartition(":")[0] + test["source"] = fix_path("{}:{}".format(srcfile, lineno)) + + +def sorted_object(obj): + if isinstance(obj, dict): + return sorted((key, sorted_object(obj[key])) for key in obj.keys()) + if isinstance(obj, list): + return sorted((sorted_object(x) for x in obj)) + else: + return obj + + +# Note that these tests are skipped if util.PATH_SEP is not os.path.sep. +# This is because the functional tests should reflect the actual +# operating environment. + + +class PytestTests(unittest.TestCase): + def setUp(self): + if PATH_SEP is not os.path.sep: + raise unittest.SkipTest("functional tests require unmodified env") + super(PytestTests, self).setUp() + + def complex(self, testroot): + results = COMPLEX.copy() + results["root"] = testroot + return [results] + + def test_discover_simple(self): + projroot, testroot = resolve_testroot("simple") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/test_spam.py"), + "parentid": "./tests", + }, + ], + "tests": [ + { + "id": "./tests/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_spam.py:2"), + "markers": [], + "parentid": "./tests/test_spam.py", + }, + ], + } + ], + ) + + def test_discover_complex_default(self): + projroot, testroot = resolve_testroot("complex") + expected = self.complex(projroot) + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_complex_doctest(self): + projroot, _ = resolve_testroot("complex") + expected = self.complex(projroot) + # add in doctests from test suite + expected[0]["parents"].insert( + 3, + { + "id": "./tests/test_doctest.py", + "kind": "file", + "name": "test_doctest.py", + "relpath": fix_path("./tests/test_doctest.py"), + "parentid": "./tests", + }, + ) + expected[0]["tests"].insert( + 2, + { + "id": "./tests/test_doctest.py::tests.test_doctest", + "name": "tests.test_doctest", + "source": fix_path("./tests/test_doctest.py:1"), + "markers": [], + "parentid": "./tests/test_doctest.py", + }, + ) + # add in doctests from non-test module + expected[0]["parents"].insert( + 0, + { + "id": "./mod.py", + "kind": "file", + "name": "mod.py", + "relpath": fix_path("./mod.py"), + "parentid": ".", + }, + ) + expected[0]["tests"] = [ + { + "id": "./mod.py::mod", + "name": "mod", + "source": fix_path("./mod.py:1"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam", + "name": "mod.Spam", + "source": fix_path("./mod.py:33"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam.eggs", + "name": "mod.Spam.eggs", + "source": fix_path("./mod.py:43"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.square", + "name": "mod.square", + "source": fix_path("./mod.py:18"), + "markers": [], + "parentid": "./mod.py", + }, + ] + expected[0]["tests"] + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter( + "discover", "pytest", "--rootdir", projroot, "--doctest-modules", projroot + ) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_not_found(self): + projroot, testroot = resolve_testroot("notests") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual(result, []) + # TODO: Expect the following instead? + # self.assertEqual(result, [{ + # 'root': projroot, + # 'rootid': '.', + # 'parents': [], + # 'tests': [], + # }]) + + @unittest.skip("broken in CI") + def test_discover_bad_args(self): + projroot, testroot = resolve_testroot("simple") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--spam", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 4)", cm.exception.output) + + def test_discover_syntax_error(self): + projroot, testroot = resolve_testroot("syntax-error") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 2)", cm.exception.output) + + def test_discover_normcase(self): + projroot, testroot = resolve_testroot("NormCase") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertTrue(projroot.endswith("NormCase")) + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/A", + "kind": "folder", + "name": "A", + "relpath": fix_path("./tests/A"), + "parentid": "./tests", + }, + { + "id": "./tests/A/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/A/b"), + "parentid": "./tests/A", + }, + { + "id": "./tests/A/b/C", + "kind": "folder", + "name": "C", + "relpath": fix_path("./tests/A/b/C"), + "parentid": "./tests/A/b", + }, + { + "id": "./tests/A/b/C/test_Spam.py", + "kind": "file", + "name": "test_Spam.py", + "relpath": fix_path("./tests/A/b/C/test_Spam.py"), + "parentid": "./tests/A/b/C", + }, + ], + "tests": [ + { + "id": "./tests/A/b/C/test_Spam.py::test_okay", + "name": "test_okay", + "source": fix_path("./tests/A/b/C/test_Spam.py:2"), + "markers": [], + "parentid": "./tests/A/b/C/test_Spam.py", + }, + ], + } + ], + ) + + +COMPLEX = { + "root": None, + "rootid": ".", + "parents": [ + # + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + # +++ + { + "id": "./tests/test_42-43.py", + "kind": "file", + "name": "test_42-43.py", + "relpath": fix_path("./tests/test_42-43.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_42.py", + "kind": "file", + "name": "test_42.py", + "relpath": fix_path("./tests/test_42.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_doctest.txt", + "kind": "file", + "name": "test_doctest.txt", + "relpath": fix_path("./tests/test_doctest.txt"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_foo.py", + "kind": "file", + "name": "test_foo.py", + "relpath": fix_path("./tests/test_foo.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_mixed.py", + "kind": "file", + "name": "test_mixed.py", + "relpath": fix_path("./tests/test_mixed.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_mixed.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite", + "kind": "suite", + "name": "TestMySuite", + "parentid": "./tests/test_mixed.py", + }, + # +++ + { + "id": "./tests/test_pytest.py", + "kind": "file", + "name": "test_pytest.py", + "relpath": fix_path("./tests/test_pytest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest.py::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam", + "kind": "suite", + "name": "TestParam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestSpam", + "kind": "suite", + "name": "TestSpam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam", + "kind": "suite", + "name": "TestHam", + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py::TestSpam::TestHam", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param", + "kind": "function", + "name": "test_fixture_param", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_01", + "kind": "function", + "name": "test_param_01", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_11", + "kind": "function", + "name": "test_param_11", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers", + "kind": "function", + "name": "test_param_13_markers", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat", + "kind": "function", + "name": "test_param_13_repeat", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped", + "kind": "function", + "name": "test_param_13_skipped", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13", + "kind": "function", + "name": "test_param_23_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises", + "kind": "function", + "name": "test_param_23_raises", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33", + "kind": "function", + "name": "test_param_33", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids", + "kind": "function", + "name": "test_param_33_ids", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture", + "kind": "function", + "name": "test_param_fixture", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture", + "kind": "function", + "name": "test_param_mark_fixture", + "parentid": "./tests/test_pytest.py", + }, + # +++ + { + "id": "./tests/test_pytest_param.py", + "kind": "file", + "name": "test_pytest_param.py", + "relpath": fix_path("./tests/test_pytest_param.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest_param.py", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py", + }, + # +++ + { + "id": "./tests/test_unittest.py", + "kind": "file", + "name": "test_unittest.py", + "relpath": fix_path("./tests/test_unittest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_unittest.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_unittest.py", + }, + { + "id": "./tests/test_unittest.py::OtherTests", + "kind": "suite", + "name": "OtherTests", + "parentid": "./tests/test_unittest.py", + }, + ## + { + "id": "./tests/v", + "kind": "folder", + "name": "v", + "relpath": fix_path("./tests/v"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/v/test_eggs.py", + "kind": "file", + "name": "test_eggs.py", + "relpath": fix_path("./tests/v/test_eggs.py"), + "parentid": "./tests/v", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple", + "kind": "suite", + "name": "TestSimple", + "parentid": "./tests/v/test_eggs.py", + }, + ## +++ + { + "id": "./tests/v/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/v/test_ham.py"), + "parentid": "./tests/v", + }, + ## +++ + { + "id": "./tests/v/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/v/test_spam.py"), + "parentid": "./tests/v", + }, + ## + { + "id": "./tests/w", + "kind": "folder", + "name": "w", + "relpath": fix_path("./tests/w"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/w/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/w/test_spam.py"), + "parentid": "./tests/w", + }, + ## +++ + { + "id": "./tests/w/test_spam_ex.py", + "kind": "file", + "name": "test_spam_ex.py", + "relpath": fix_path("./tests/w/test_spam_ex.py"), + "parentid": "./tests/w", + }, + ## + { + "id": "./tests/x", + "kind": "folder", + "name": "x", + "relpath": fix_path("./tests/x"), + "parentid": "./tests", + }, + ### + { + "id": "./tests/x/y", + "kind": "folder", + "name": "y", + "relpath": fix_path("./tests/x/y"), + "parentid": "./tests/x", + }, + #### + { + "id": "./tests/x/y/z", + "kind": "folder", + "name": "z", + "relpath": fix_path("./tests/x/y/z"), + "parentid": "./tests/x/y", + }, + ##### + { + "id": "./tests/x/y/z/a", + "kind": "folder", + "name": "a", + "relpath": fix_path("./tests/x/y/z/a"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/a/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/a/test_spam.py"), + "parentid": "./tests/x/y/z/a", + }, + ##### + { + "id": "./tests/x/y/z/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/x/y/z/b"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/b/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/b/test_spam.py"), + "parentid": "./tests/x/y/z/b", + }, + #### +++ + { + "id": "./tests/x/y/z/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/x/y/z/test_ham.py"), + "parentid": "./tests/x/y/z", + }, + ], + "tests": [ + ########## + { + "id": "./tests/test_42-43.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42-43.py:2"), + "markers": [], + "parentid": "./tests/test_42-43.py", + }, + ##### + { + "id": "./tests/test_42.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42.py:2"), + "markers": [], + "parentid": "./tests/test_42.py", + }, + ##### + { + "id": "./tests/test_doctest.txt::test_doctest.txt", + "name": "test_doctest.txt", + "source": fix_path("./tests/test_doctest.txt:1"), + "markers": [], + "parentid": "./tests/test_doctest.txt", + }, + ##### + { + "id": "./tests/test_foo.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_foo.py:3"), + "markers": [], + "parentid": "./tests/test_foo.py", + }, + ##### + { + "id": "./tests/test_mixed.py::test_top_level", + "name": "test_top_level", + "source": fix_path("./tests/test_mixed.py:5"), + "markers": [], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:9"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:16"), + "markers": [], + "parentid": "./tests/test_mixed.py::TestMySuite", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:22"), + "markers": [], + "parentid": "./tests/test_mixed.py::MyTests", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:25"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py::MyTests", + }, + ##### + { + "id": "./tests/test_pytest.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:6"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_pytest.py:10"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_skipped", + "name": "test_runtime_skipped", + "source": fix_path("./tests/test_pytest.py:14"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_failed", + "name": "test_runtime_failed", + "source": fix_path("./tests/test_pytest.py:18"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_raises", + "name": "test_raises", + "source": fix_path("./tests/test_pytest.py:22"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:26"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_pytest.py:31"), + "markers": ["skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_pytest.py:36"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_warned", + "name": "test_warned", + "source": fix_path("./tests/test_pytest.py:41"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_custom_marker", + "name": "test_custom_marker", + "source": fix_path("./tests/test_pytest.py:46"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_multiple_markers", + "name": "test_multiple_markers", + "source": fix_path("./tests/test_pytest.py:51"), + "markers": ["expected-failure", "skip", "skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_1", + "name": "test_dynamic_1", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_2", + "name": "test_dynamic_2", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_3", + "name": "test_dynamic_3", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:70"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:73"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:81"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + }, + { + "id": "./tests/test_pytest.py::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:93"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestEggs", + }, + { + "id": "./tests/test_pytest.py::test_param_01[]", + "name": "", + "source": fix_path("./tests/test_pytest.py:103"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_01", + }, + { + "id": "./tests/test_pytest.py::test_param_11[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:108"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_11", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_33[1-1-1]", + "name": "1-1-1", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[3-4-5]", + "name": "3-4-5", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[0-0-0]", + "name": "0-0-0", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v1]", + "name": "v1", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v2]", + "name": "v2", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v3]", + "name": "v3", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z0]", + "name": "1-1-z0", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z1]", + "name": "1-1-z1", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z2]", + "name": "1-1-z2", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z0]", + "name": "3-4-z0", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z1]", + "name": "3-4-z1", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z2]", + "name": "3-4-z2", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z0]", + "name": "0-0-z0", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z1]", + "name": "0-0-z1", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z2]", + "name": "0-0-z2", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[???]", + "name": "???", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[2]", + "name": "2", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1-None]", + "name": "1-None", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1.0-None]", + "name": "1.0-None", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[2-catch2]", + "name": "2-catch2", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:164"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::test_fixture", + "name": "test_fixture", + "source": fix_path("./tests/test_pytest.py:192"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_mark_fixture", + "name": "test_mark_fixture", + "source": fix_path("./tests/test_pytest.py:196"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[(1+0j)]", + "name": "(1+0j)", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[spam]", + "name": "spam", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[eggs]", + "name": "eggs", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + ###### + { + "id": "./tests/test_pytest_param.py::test_param_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]", + "name": "x0", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]", + "name": "x1", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]", + "name": "x2", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + ###### + { + "id": "./tests/test_unittest.py::MyTests::test_dynamic_", + "name": "test_dynamic_", + "source": fix_path("./tests/test_unittest.py:54"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_unittest.py:34"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_unittest.py:37"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + "name": "test_maybe_not_skipped", + "source": fix_path("./tests/test_unittest.py:17"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_unittest.py:13"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:6"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_unittest.py:9"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped_inside", + "name": "test_skipped_inside", + "source": fix_path("./tests/test_unittest.py:21"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_nested_subtests", + "name": "test_with_nested_subtests", + "source": fix_path("./tests/test_unittest.py:46"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_subtests", + "name": "test_with_subtests", + "source": fix_path("./tests/test_unittest.py:41"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::OtherTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:61"), + "markers": [], + "parentid": "./tests/test_unittest.py::OtherTests", + }, + ########### + { + "id": "./tests/v/test_eggs.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_eggs.py", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:8"), + "markers": [], + "parentid": "./tests/v/test_eggs.py::TestSimple", + }, + ###### + { + "id": "./tests/v/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + { + "id": "./tests/v/test_ham.py::test_not_hard", + "name": "test_not_hard", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + ###### + { + "id": "./tests/v/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + { + "id": "./tests/v/test_spam.py::test_simpler", + "name": "test_simpler", + "source": fix_path("./tests/v/test_spam.py:4"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + ########### + { + "id": "./tests/w/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam.py", + }, + { + "id": "./tests/w/test_spam_ex.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam_ex.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam_ex.py", + }, + ########### + { + "id": "./tests/x/y/z/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/test_ham.py:2"), + "markers": [], + "parentid": "./tests/x/y/z/test_ham.py", + }, + ###### + { + "id": "./tests/x/y/z/a/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/a/test_spam.py:11"), + "markers": [], + "parentid": "./tests/x/y/z/a/test_spam.py", + }, + { + "id": "./tests/x/y/z/b/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/b/test_spam.py:7"), + "markers": [], + "parentid": "./tests/x/y/z/b/test_spam.py", + }, + ], +} diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_report.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_report.py new file mode 100644 index 00000000000..bb68c8a65e7 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_report.py @@ -0,0 +1,1179 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import unittest + +from ...util import StubProxy +from testing_tools.adapter.util import fix_path, fix_relpath +from testing_tools.adapter.info import SingleTestInfo, SingleTestPath, ParentInfo +from testing_tools.adapter.report import report_discovered + + +class StubSender(StubProxy): + def send(self, outstr): + self.add_call("send", (json.loads(outstr),), None) + + +################################## +# tests + + +class ReportDiscoveredTests(unittest.TestCase): + def test_basic(self): + stub = StubSender() + testroot = fix_path("/a/b/c") + relfile = "test_spam.py" + relpath = fix_relpath(relfile) + tests = [ + SingleTestInfo( + id="test#1", + name="test_spam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="test_spam", + ), + source="{}:{}".format(relfile, 10), + markers=[], + parentid="file#1", + ), + ] + parents = [ + ParentInfo( + id="", + kind="folder", + name=testroot, + ), + ParentInfo( + id="file#1", + kind="file", + name=relfile, + root=testroot, + relpath=relpath, + parentid="", + ), + ] + expected = [ + { + "rootid": "", + "root": testroot, + "parents": [ + { + "id": "file#1", + "kind": "file", + "name": relfile, + "relpath": relpath, + "parentid": "", + }, + ], + "tests": [ + { + "id": "test#1", + "name": "test_spam", + "source": "{}:{}".format(relfile, 10), + "markers": [], + "parentid": "file#1", + } + ], + } + ] + + report_discovered(tests, parents, _send=stub.send) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("send", (expected,), None), + ], + ) + + def test_multiroot(self): + stub = StubSender() + # the first root + testroot1 = fix_path("/a/b/c") + relfileid1 = "./test_spam.py" + relpath1 = fix_path(relfileid1) + relfile1 = relpath1[2:] + tests = [ + SingleTestInfo( + id=relfileid1 + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=relfile1, + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + parentid=relfileid1, + ), + ] + parents = [ + ParentInfo( + id=".", + kind="folder", + name=testroot1, + ), + ParentInfo( + id=relfileid1, + kind="file", + name="test_spam.py", + root=testroot1, + relpath=relpath1, + parentid=".", + ), + ] + expected = [ + { + "rootid": ".", + "root": testroot1, + "parents": [ + { + "id": relfileid1, + "kind": "file", + "name": "test_spam.py", + "relpath": relpath1, + "parentid": ".", + }, + ], + "tests": [ + { + "id": relfileid1 + "::test_spam", + "name": "test_spam", + "source": "{}:{}".format(relfile1, 10), + "markers": [], + "parentid": relfileid1, + } + ], + }, + ] + # the second root + testroot2 = fix_path("/x/y/z") + relfileid2 = "./w/test_eggs.py" + relpath2 = fix_path(relfileid2) + relfile2 = relpath2[2:] + tests.extend( + [ + SingleTestInfo( + id=relfileid2 + "::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=relfile2, + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid=relfileid2 + "::BasicTests", + ), + ] + ) + parents.extend( + [ + ParentInfo( + id=".", + kind="folder", + name=testroot2, + ), + ParentInfo( + id="./w", + kind="folder", + name="w", + root=testroot2, + relpath=fix_path("./w"), + parentid=".", + ), + ParentInfo( + id=relfileid2, + kind="file", + name="test_eggs.py", + root=testroot2, + relpath=relpath2, + parentid="./w", + ), + ParentInfo( + id=relfileid2 + "::BasicTests", + kind="suite", + name="BasicTests", + root=testroot2, + parentid=relfileid2, + ), + ] + ) + expected.extend( + [ + { + "rootid": ".", + "root": testroot2, + "parents": [ + { + "id": "./w", + "kind": "folder", + "name": "w", + "relpath": fix_path("./w"), + "parentid": ".", + }, + { + "id": relfileid2, + "kind": "file", + "name": "test_eggs.py", + "relpath": relpath2, + "parentid": "./w", + }, + { + "id": relfileid2 + "::BasicTests", + "kind": "suite", + "name": "BasicTests", + "parentid": relfileid2, + }, + ], + "tests": [ + { + "id": relfileid2 + "::BasicTests::test_first", + "name": "test_first", + "source": "{}:{}".format(relfile2, 61), + "markers": [], + "parentid": relfileid2 + "::BasicTests", + } + ], + }, + ] + ) + + report_discovered(tests, parents, _send=stub.send) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("send", (expected,), None), + ], + ) + + def test_complex(self): + """ + /a/b/c/ + test_ham.py + MySuite + test_x1 + test_x2 + /a/b/e/f/g/ + w/ + test_ham.py + test_ham1 + HamTests + test_uh_oh + test_whoa + MoreHam + test_yay + sub1 + sub2 + sub3 + test_eggs.py + SpamTests + test_okay + x/ + y/ + a/ + test_spam.py + SpamTests + test_okay + b/ + test_spam.py + SpamTests + test_okay + test_spam.py + SpamTests + test_okay + """ + stub = StubSender() + testroot = fix_path("/a/b/c") + relfileid1 = "./test_ham.py" + relfileid2 = "./test_spam.py" + relfileid3 = "./w/test_ham.py" + relfileid4 = "./w/test_eggs.py" + relfileid5 = "./x/y/a/test_spam.py" + relfileid6 = "./x/y/b/test_spam.py" + tests = [ + SingleTestInfo( + id=relfileid1 + "::MySuite::test_x1", + name="test_x1", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid1), + func="MySuite.test_x1", + ), + source="{}:{}".format(fix_path(relfileid1), 10), + markers=None, + parentid=relfileid1 + "::MySuite", + ), + SingleTestInfo( + id=relfileid1 + "::MySuite::test_x2", + name="test_x2", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid1), + func="MySuite.test_x2", + ), + source="{}:{}".format(fix_path(relfileid1), 21), + markers=None, + parentid=relfileid1 + "::MySuite", + ), + SingleTestInfo( + id=relfileid2 + "::SpamTests::test_okay", + name="test_okay", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid2), + func="SpamTests.test_okay", + ), + source="{}:{}".format(fix_path(relfileid2), 17), + markers=None, + parentid=relfileid2 + "::SpamTests", + ), + SingleTestInfo( + id=relfileid3 + "::test_ham1", + name="test_ham1", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid3), + func="test_ham1", + ), + source="{}:{}".format(fix_path(relfileid3), 8), + markers=None, + parentid=relfileid3, + ), + SingleTestInfo( + id=relfileid3 + "::HamTests::test_uh_oh", + name="test_uh_oh", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid3), + func="HamTests.test_uh_oh", + ), + source="{}:{}".format(fix_path(relfileid3), 19), + markers=["expected-failure"], + parentid=relfileid3 + "::HamTests", + ), + SingleTestInfo( + id=relfileid3 + "::HamTests::test_whoa", + name="test_whoa", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid3), + func="HamTests.test_whoa", + ), + source="{}:{}".format(fix_path(relfileid3), 35), + markers=None, + parentid=relfileid3 + "::HamTests", + ), + SingleTestInfo( + id=relfileid3 + "::MoreHam::test_yay[1-2]", + name="test_yay[1-2]", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid3), + func="MoreHam.test_yay", + sub=["[1-2]"], + ), + source="{}:{}".format(fix_path(relfileid3), 57), + markers=None, + parentid=relfileid3 + "::MoreHam::test_yay", + ), + SingleTestInfo( + id=relfileid3 + "::MoreHam::test_yay[1-2][3-4]", + name="test_yay[1-2][3-4]", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid3), + func="MoreHam.test_yay", + sub=["[1-2]", "[3=4]"], + ), + source="{}:{}".format(fix_path(relfileid3), 72), + markers=None, + parentid=relfileid3 + "::MoreHam::test_yay[1-2]", + ), + SingleTestInfo( + id=relfileid4 + "::SpamTests::test_okay", + name="test_okay", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid4), + func="SpamTests.test_okay", + ), + source="{}:{}".format(fix_path(relfileid4), 15), + markers=None, + parentid=relfileid4 + "::SpamTests", + ), + SingleTestInfo( + id=relfileid5 + "::SpamTests::test_okay", + name="test_okay", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid5), + func="SpamTests.test_okay", + ), + source="{}:{}".format(fix_path(relfileid5), 12), + markers=None, + parentid=relfileid5 + "::SpamTests", + ), + SingleTestInfo( + id=relfileid6 + "::SpamTests::test_okay", + name="test_okay", + path=SingleTestPath( + root=testroot, + relfile=fix_path(relfileid6), + func="SpamTests.test_okay", + ), + source="{}:{}".format(fix_path(relfileid6), 27), + markers=None, + parentid=relfileid6 + "::SpamTests", + ), + ] + parents = [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id=relfileid1, + kind="file", + name="test_ham.py", + root=testroot, + relpath=fix_path(relfileid1), + parentid=".", + ), + ParentInfo( + id=relfileid1 + "::MySuite", + kind="suite", + name="MySuite", + root=testroot, + parentid=relfileid1, + ), + ParentInfo( + id=relfileid2, + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_path(relfileid2), + parentid=".", + ), + ParentInfo( + id=relfileid2 + "::SpamTests", + kind="suite", + name="SpamTests", + root=testroot, + parentid=relfileid2, + ), + ParentInfo( + id="./w", + kind="folder", + name="w", + root=testroot, + relpath=fix_path("./w"), + parentid=".", + ), + ParentInfo( + id=relfileid3, + kind="file", + name="test_ham.py", + root=testroot, + relpath=fix_path(relfileid3), + parentid="./w", + ), + ParentInfo( + id=relfileid3 + "::HamTests", + kind="suite", + name="HamTests", + root=testroot, + parentid=relfileid3, + ), + ParentInfo( + id=relfileid3 + "::MoreHam", + kind="suite", + name="MoreHam", + root=testroot, + parentid=relfileid3, + ), + ParentInfo( + id=relfileid3 + "::MoreHam::test_yay", + kind="function", + name="test_yay", + root=testroot, + parentid=relfileid3 + "::MoreHam", + ), + ParentInfo( + id=relfileid3 + "::MoreHam::test_yay[1-2]", + kind="subtest", + name="test_yay[1-2]", + root=testroot, + parentid=relfileid3 + "::MoreHam::test_yay", + ), + ParentInfo( + id=relfileid4, + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_path(relfileid4), + parentid="./w", + ), + ParentInfo( + id=relfileid4 + "::SpamTests", + kind="suite", + name="SpamTests", + root=testroot, + parentid=relfileid4, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/a", + kind="folder", + name="a", + root=testroot, + relpath=fix_path("./x/y/a"), + parentid="./x/y", + ), + ParentInfo( + id=relfileid5, + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_path(relfileid5), + parentid="./x/y/a", + ), + ParentInfo( + id=relfileid5 + "::SpamTests", + kind="suite", + name="SpamTests", + root=testroot, + parentid=relfileid5, + ), + ParentInfo( + id="./x/y/b", + kind="folder", + name="b", + root=testroot, + relpath=fix_path("./x/y/b"), + parentid="./x/y", + ), + ParentInfo( + id=relfileid6, + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_path(relfileid6), + parentid="./x/y/b", + ), + ParentInfo( + id=relfileid6 + "::SpamTests", + kind="suite", + name="SpamTests", + root=testroot, + parentid=relfileid6, + ), + ] + expected = [ + { + "rootid": ".", + "root": testroot, + "parents": [ + { + "id": relfileid1, + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path(relfileid1), + "parentid": ".", + }, + { + "id": relfileid1 + "::MySuite", + "kind": "suite", + "name": "MySuite", + "parentid": relfileid1, + }, + { + "id": relfileid2, + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path(relfileid2), + "parentid": ".", + }, + { + "id": relfileid2 + "::SpamTests", + "kind": "suite", + "name": "SpamTests", + "parentid": relfileid2, + }, + { + "id": "./w", + "kind": "folder", + "name": "w", + "relpath": fix_path("./w"), + "parentid": ".", + }, + { + "id": relfileid3, + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path(relfileid3), + "parentid": "./w", + }, + { + "id": relfileid3 + "::HamTests", + "kind": "suite", + "name": "HamTests", + "parentid": relfileid3, + }, + { + "id": relfileid3 + "::MoreHam", + "kind": "suite", + "name": "MoreHam", + "parentid": relfileid3, + }, + { + "id": relfileid3 + "::MoreHam::test_yay", + "kind": "function", + "name": "test_yay", + "parentid": relfileid3 + "::MoreHam", + }, + { + "id": relfileid3 + "::MoreHam::test_yay[1-2]", + "kind": "subtest", + "name": "test_yay[1-2]", + "parentid": relfileid3 + "::MoreHam::test_yay", + }, + { + "id": relfileid4, + "kind": "file", + "name": "test_eggs.py", + "relpath": fix_path(relfileid4), + "parentid": "./w", + }, + { + "id": relfileid4 + "::SpamTests", + "kind": "suite", + "name": "SpamTests", + "parentid": relfileid4, + }, + { + "id": "./x", + "kind": "folder", + "name": "x", + "relpath": fix_path("./x"), + "parentid": ".", + }, + { + "id": "./x/y", + "kind": "folder", + "name": "y", + "relpath": fix_path("./x/y"), + "parentid": "./x", + }, + { + "id": "./x/y/a", + "kind": "folder", + "name": "a", + "relpath": fix_path("./x/y/a"), + "parentid": "./x/y", + }, + { + "id": relfileid5, + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path(relfileid5), + "parentid": "./x/y/a", + }, + { + "id": relfileid5 + "::SpamTests", + "kind": "suite", + "name": "SpamTests", + "parentid": relfileid5, + }, + { + "id": "./x/y/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./x/y/b"), + "parentid": "./x/y", + }, + { + "id": relfileid6, + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path(relfileid6), + "parentid": "./x/y/b", + }, + { + "id": relfileid6 + "::SpamTests", + "kind": "suite", + "name": "SpamTests", + "parentid": relfileid6, + }, + ], + "tests": [ + { + "id": relfileid1 + "::MySuite::test_x1", + "name": "test_x1", + "source": "{}:{}".format(fix_path(relfileid1), 10), + "markers": [], + "parentid": relfileid1 + "::MySuite", + }, + { + "id": relfileid1 + "::MySuite::test_x2", + "name": "test_x2", + "source": "{}:{}".format(fix_path(relfileid1), 21), + "markers": [], + "parentid": relfileid1 + "::MySuite", + }, + { + "id": relfileid2 + "::SpamTests::test_okay", + "name": "test_okay", + "source": "{}:{}".format(fix_path(relfileid2), 17), + "markers": [], + "parentid": relfileid2 + "::SpamTests", + }, + { + "id": relfileid3 + "::test_ham1", + "name": "test_ham1", + "source": "{}:{}".format(fix_path(relfileid3), 8), + "markers": [], + "parentid": relfileid3, + }, + { + "id": relfileid3 + "::HamTests::test_uh_oh", + "name": "test_uh_oh", + "source": "{}:{}".format(fix_path(relfileid3), 19), + "markers": ["expected-failure"], + "parentid": relfileid3 + "::HamTests", + }, + { + "id": relfileid3 + "::HamTests::test_whoa", + "name": "test_whoa", + "source": "{}:{}".format(fix_path(relfileid3), 35), + "markers": [], + "parentid": relfileid3 + "::HamTests", + }, + { + "id": relfileid3 + "::MoreHam::test_yay[1-2]", + "name": "test_yay[1-2]", + "source": "{}:{}".format(fix_path(relfileid3), 57), + "markers": [], + "parentid": relfileid3 + "::MoreHam::test_yay", + }, + { + "id": relfileid3 + "::MoreHam::test_yay[1-2][3-4]", + "name": "test_yay[1-2][3-4]", + "source": "{}:{}".format(fix_path(relfileid3), 72), + "markers": [], + "parentid": relfileid3 + "::MoreHam::test_yay[1-2]", + }, + { + "id": relfileid4 + "::SpamTests::test_okay", + "name": "test_okay", + "source": "{}:{}".format(fix_path(relfileid4), 15), + "markers": [], + "parentid": relfileid4 + "::SpamTests", + }, + { + "id": relfileid5 + "::SpamTests::test_okay", + "name": "test_okay", + "source": "{}:{}".format(fix_path(relfileid5), 12), + "markers": [], + "parentid": relfileid5 + "::SpamTests", + }, + { + "id": relfileid6 + "::SpamTests::test_okay", + "name": "test_okay", + "source": "{}:{}".format(fix_path(relfileid6), 27), + "markers": [], + "parentid": relfileid6 + "::SpamTests", + }, + ], + } + ] + + report_discovered(tests, parents, _send=stub.send) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("send", (expected,), None), + ], + ) + + def test_simple_basic(self): + stub = StubSender() + testroot = fix_path("/a/b/c") + relfile = fix_path("x/y/z/test_spam.py") + tests = [ + SingleTestInfo( + id="test#1", + name="test_spam_1", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="MySuite.test_spam_1", + sub=None, + ), + source="{}:{}".format(relfile, 10), + markers=None, + parentid="suite#1", + ), + ] + parents = None + expected = [ + { + "id": "test#1", + "name": "test_spam_1", + "testroot": testroot, + "relfile": relfile, + "lineno": 10, + "testfunc": "MySuite.test_spam_1", + "subtest": None, + "markers": [], + } + ] + + report_discovered(tests, parents, simple=True, _send=stub.send) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("send", (expected,), None), + ], + ) + + def test_simple_complex(self): + """ + /a/b/c/ + test_ham.py + MySuite + test_x1 + test_x2 + /a/b/e/f/g/ + w/ + test_ham.py + test_ham1 + HamTests + test_uh_oh + test_whoa + MoreHam + test_yay + sub1 + sub2 + sub3 + test_eggs.py + SpamTests + test_okay + x/ + y/ + a/ + test_spam.py + SpamTests + test_okay + b/ + test_spam.py + SpamTests + test_okay + test_spam.py + SpamTests + test_okay + """ + stub = StubSender() + testroot1 = fix_path("/a/b/c") + relfile1 = fix_path("./test_ham.py") + testroot2 = fix_path("/a/b/e/f/g") + relfile2 = fix_path("./test_spam.py") + relfile3 = fix_path("w/test_ham.py") + relfile4 = fix_path("w/test_eggs.py") + relfile5 = fix_path("x/y/a/test_spam.py") + relfile6 = fix_path("x/y/b/test_spam.py") + tests = [ + # under first root folder + SingleTestInfo( + id="test#1", + name="test_x1", + path=SingleTestPath( + root=testroot1, + relfile=relfile1, + func="MySuite.test_x1", + sub=None, + ), + source="{}:{}".format(relfile1, 10), + markers=None, + parentid="suite#1", + ), + SingleTestInfo( + id="test#2", + name="test_x2", + path=SingleTestPath( + root=testroot1, + relfile=relfile1, + func="MySuite.test_x2", + sub=None, + ), + source="{}:{}".format(relfile1, 21), + markers=None, + parentid="suite#1", + ), + # under second root folder + SingleTestInfo( + id="test#3", + name="test_okay", + path=SingleTestPath( + root=testroot2, + relfile=relfile2, + func="SpamTests.test_okay", + sub=None, + ), + source="{}:{}".format(relfile2, 17), + markers=None, + parentid="suite#2", + ), + SingleTestInfo( + id="test#4", + name="test_ham1", + path=SingleTestPath( + root=testroot2, + relfile=relfile3, + func="test_ham1", + sub=None, + ), + source="{}:{}".format(relfile3, 8), + markers=None, + parentid="file#3", + ), + SingleTestInfo( + id="test#5", + name="test_uh_oh", + path=SingleTestPath( + root=testroot2, + relfile=relfile3, + func="HamTests.test_uh_oh", + sub=None, + ), + source="{}:{}".format(relfile3, 19), + markers=["expected-failure"], + parentid="suite#3", + ), + SingleTestInfo( + id="test#6", + name="test_whoa", + path=SingleTestPath( + root=testroot2, + relfile=relfile3, + func="HamTests.test_whoa", + sub=None, + ), + source="{}:{}".format(relfile3, 35), + markers=None, + parentid="suite#3", + ), + SingleTestInfo( + id="test#7", + name="test_yay (sub1)", + path=SingleTestPath( + root=testroot2, + relfile=relfile3, + func="MoreHam.test_yay", + sub=["sub1"], + ), + source="{}:{}".format(relfile3, 57), + markers=None, + parentid="suite#4", + ), + SingleTestInfo( + id="test#8", + name="test_yay (sub2) (sub3)", + path=SingleTestPath( + root=testroot2, + relfile=relfile3, + func="MoreHam.test_yay", + sub=["sub2", "sub3"], + ), + source="{}:{}".format(relfile3, 72), + markers=None, + parentid="suite#3", + ), + SingleTestInfo( + id="test#9", + name="test_okay", + path=SingleTestPath( + root=testroot2, + relfile=relfile4, + func="SpamTests.test_okay", + sub=None, + ), + source="{}:{}".format(relfile4, 15), + markers=None, + parentid="suite#5", + ), + SingleTestInfo( + id="test#10", + name="test_okay", + path=SingleTestPath( + root=testroot2, + relfile=relfile5, + func="SpamTests.test_okay", + sub=None, + ), + source="{}:{}".format(relfile5, 12), + markers=None, + parentid="suite#6", + ), + SingleTestInfo( + id="test#11", + name="test_okay", + path=SingleTestPath( + root=testroot2, + relfile=relfile6, + func="SpamTests.test_okay", + sub=None, + ), + source="{}:{}".format(relfile6, 27), + markers=None, + parentid="suite#7", + ), + ] + expected = [ + { + "id": "test#1", + "name": "test_x1", + "testroot": testroot1, + "relfile": relfile1, + "lineno": 10, + "testfunc": "MySuite.test_x1", + "subtest": None, + "markers": [], + }, + { + "id": "test#2", + "name": "test_x2", + "testroot": testroot1, + "relfile": relfile1, + "lineno": 21, + "testfunc": "MySuite.test_x2", + "subtest": None, + "markers": [], + }, + { + "id": "test#3", + "name": "test_okay", + "testroot": testroot2, + "relfile": relfile2, + "lineno": 17, + "testfunc": "SpamTests.test_okay", + "subtest": None, + "markers": [], + }, + { + "id": "test#4", + "name": "test_ham1", + "testroot": testroot2, + "relfile": relfile3, + "lineno": 8, + "testfunc": "test_ham1", + "subtest": None, + "markers": [], + }, + { + "id": "test#5", + "name": "test_uh_oh", + "testroot": testroot2, + "relfile": relfile3, + "lineno": 19, + "testfunc": "HamTests.test_uh_oh", + "subtest": None, + "markers": ["expected-failure"], + }, + { + "id": "test#6", + "name": "test_whoa", + "testroot": testroot2, + "relfile": relfile3, + "lineno": 35, + "testfunc": "HamTests.test_whoa", + "subtest": None, + "markers": [], + }, + { + "id": "test#7", + "name": "test_yay (sub1)", + "testroot": testroot2, + "relfile": relfile3, + "lineno": 57, + "testfunc": "MoreHam.test_yay", + "subtest": ["sub1"], + "markers": [], + }, + { + "id": "test#8", + "name": "test_yay (sub2) (sub3)", + "testroot": testroot2, + "relfile": relfile3, + "lineno": 72, + "testfunc": "MoreHam.test_yay", + "subtest": ["sub2", "sub3"], + "markers": [], + }, + { + "id": "test#9", + "name": "test_okay", + "testroot": testroot2, + "relfile": relfile4, + "lineno": 15, + "testfunc": "SpamTests.test_okay", + "subtest": None, + "markers": [], + }, + { + "id": "test#10", + "name": "test_okay", + "testroot": testroot2, + "relfile": relfile5, + "lineno": 12, + "testfunc": "SpamTests.test_okay", + "subtest": None, + "markers": [], + }, + { + "id": "test#11", + "name": "test_okay", + "testroot": testroot2, + "relfile": relfile6, + "lineno": 27, + "testfunc": "SpamTests.test_okay", + "subtest": None, + "markers": [], + }, + ] + parents = None + + report_discovered(tests, parents, simple=True, _send=stub.send) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("send", (expected,), None), + ], + ) diff --git a/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_util.py b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_util.py new file mode 100644 index 00000000000..822ba2ed1b2 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/testing_tools/adapter/test_util.py @@ -0,0 +1,330 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import ntpath +import os +import os.path +import posixpath +import shlex +import sys +import unittest + +import pytest + +# Pytest 3.7 and later uses pathlib/pathlib2 for path resolution. +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path # type: ignore (for Pylance) + +from testing_tools.adapter.util import ( + fix_path, + fix_relpath, + fix_fileid, + shlex_unsplit, +) + + +@unittest.skipIf(sys.version_info < (3,), "Python 2 does not have subTest") +class FilePathTests(unittest.TestCase): + def test_isolated_imports(self): + import testing_tools.adapter + from testing_tools.adapter import util + from . import test_functional + + ignored = { + str(Path(os.path.abspath(__file__)).resolve()), + str(Path(os.path.abspath(util.__file__)).resolve()), + str(Path(os.path.abspath(test_functional.__file__)).resolve()), + } + adapter = os.path.abspath(os.path.dirname(testing_tools.adapter.__file__)) + tests = os.path.join( + os.path.abspath(os.path.dirname(os.path.dirname(testing_tools.__file__))), + "tests", + "testing_tools", + "adapter", + ) + found = [] + for root in [adapter, tests]: + for dirname, _, files in os.walk(root): + if ".data" in dirname: + continue + for basename in files: + if not basename.endswith(".py"): + continue + filename = os.path.join(dirname, basename) + if filename in ignored: + continue + with open(filename) as srcfile: + for line in srcfile: + if line.strip() == "import os.path": + found.append(filename) + break + + if found: + self.fail( + os.linesep.join( + [ + "", + "Please only use path-related API from testing_tools.adapter.util.", + 'Found use of "os.path" in the following files:', + ] + + [" " + file for file in found] + ) + ) + + def test_fix_path(self): + tests = [ + ("./spam.py", r".\spam.py"), + ("./some-dir", r".\some-dir"), + ("./some-dir/", ".\\some-dir\\"), + ("./some-dir/eggs", r".\some-dir\eggs"), + ("./some-dir/eggs/spam.py", r".\some-dir\eggs\spam.py"), + ("X/y/Z/a.B.c.PY", r"X\y\Z\a.B.c.PY"), + ("/", "\\"), + ("/spam", r"\spam"), + ("C:/spam", r"C:\spam"), + ] + for path, expected in tests: + pathsep = ntpath.sep + with self.subTest(r"fixed for \: {!r}".format(path)): + fixed = fix_path(path, _pathsep=pathsep) + self.assertEqual(fixed, expected) + + pathsep = posixpath.sep + with self.subTest("unchanged for /: {!r}".format(path)): + unchanged = fix_path(path, _pathsep=pathsep) + self.assertEqual(unchanged, path) + + # no path -> "." + for path in ["", None]: + for pathsep in [ntpath.sep, posixpath.sep]: + with self.subTest(r"fixed for {}: {!r}".format(pathsep, path)): + fixed = fix_path(path, _pathsep=pathsep) + self.assertEqual(fixed, ".") + + # no-op paths + paths = [path for _, path in tests] + paths.extend( + [ + ".", + "..", + "some-dir", + "spam.py", + ] + ) + for path in paths: + for pathsep in [ntpath.sep, posixpath.sep]: + with self.subTest(r"unchanged for {}: {!r}".format(pathsep, path)): + unchanged = fix_path(path, _pathsep=pathsep) + self.assertEqual(unchanged, path) + + def test_fix_relpath(self): + tests = [ + ("spam.py", posixpath, "./spam.py"), + ("eggs/spam.py", posixpath, "./eggs/spam.py"), + ("eggs/spam/", posixpath, "./eggs/spam/"), + (r"\spam.py", posixpath, r"./\spam.py"), + ("spam.py", ntpath, r".\spam.py"), + (r"eggs\spam.py", ntpath, r".\eggs\spam.py"), + ("eggs\\spam\\", ntpath, ".\\eggs\\spam\\"), + ("/spam.py", ntpath, r"\spam.py"), # Note the fixed "/". + # absolute + ("/", posixpath, "/"), + ("/spam.py", posixpath, "/spam.py"), + ("\\", ntpath, "\\"), + (r"\spam.py", ntpath, r"\spam.py"), + (r"C:\spam.py", ntpath, r"C:\spam.py"), + # no-op + ("./spam.py", posixpath, "./spam.py"), + (r".\spam.py", ntpath, r".\spam.py"), + ] + # no-op + for path in [".", ".."]: + tests.extend( + [ + (path, posixpath, path), + (path, ntpath, path), + ] + ) + for path, _os_path, expected in tests: + with self.subTest((path, _os_path.sep)): + fixed = fix_relpath( + path, + _fix_path=(lambda p: fix_path(p, _pathsep=_os_path.sep)), + _path_isabs=_os_path.isabs, + _pathsep=_os_path.sep, + ) + self.assertEqual(fixed, expected) + + def test_fix_fileid(self): + common = [ + ("spam.py", "./spam.py"), + ("eggs/spam.py", "./eggs/spam.py"), + ("eggs/spam/", "./eggs/spam/"), + # absolute (no-op) + ("/", "/"), + ("//", "//"), + ("/spam.py", "/spam.py"), + # no-op + (None, None), + ("", ""), + (".", "."), + ("./spam.py", "./spam.py"), + ] + tests = [(p, posixpath, e) for p, e in common] + tests.extend( + (p, posixpath, e) + for p, e in [ + (r"\spam.py", r"./\spam.py"), + ] + ) + tests.extend((p, ntpath, e) for p, e in common) + tests.extend( + (p, ntpath, e) + for p, e in [ + (r"eggs\spam.py", "./eggs/spam.py"), + ("eggs\\spam\\", "./eggs/spam/"), + (r".\spam.py", r"./spam.py"), + # absolute + (r"\spam.py", "/spam.py"), + (r"C:\spam.py", "C:/spam.py"), + ("\\", "/"), + ("\\\\", "//"), + ("C:\\\\", "C://"), + ("C:/", "C:/"), + ("C://", "C://"), + ("C:/spam.py", "C:/spam.py"), + ] + ) + for fileid, _os_path, expected in tests: + pathsep = _os_path.sep + with self.subTest(r"for {}: {!r}".format(pathsep, fileid)): + fixed = fix_fileid( + fileid, + _path_isabs=_os_path.isabs, + _normcase=_os_path.normcase, + _pathsep=pathsep, + ) + self.assertEqual(fixed, expected) + + # with rootdir + common = [ + ("spam.py", "/eggs", "./spam.py"), + ("spam.py", r"\eggs", "./spam.py"), + # absolute + ("/spam.py", "/", "./spam.py"), + ("/eggs/spam.py", "/eggs", "./spam.py"), + ("/eggs/spam.py", "/eggs/", "./spam.py"), + # no-op + ("/spam.py", "/eggs", "/spam.py"), + ("/spam.py", "/eggs/", "/spam.py"), + # root-only (no-op) + ("/", "/", "/"), + ("/", "/spam", "/"), + ("//", "/", "//"), + ("//", "//", "//"), + ("//", "//spam", "//"), + ] + tests = [(p, r, posixpath, e) for p, r, e in common] + tests = [(p, r, ntpath, e) for p, r, e in common] + tests.extend( + (p, r, ntpath, e) + for p, r, e in [ + ("spam.py", r"\eggs", "./spam.py"), + # absolute + (r"\spam.py", "\\", r"./spam.py"), + (r"C:\spam.py", "C:\\", r"./spam.py"), + (r"\eggs\spam.py", r"\eggs", r"./spam.py"), + (r"\eggs\spam.py", "\\eggs\\", r"./spam.py"), + # normcase + (r"C:\spam.py", "c:\\", r"./spam.py"), + (r"\Eggs\Spam.py", "\\eggs", r"./Spam.py"), + (r"\eggs\spam.py", "\\Eggs", r"./spam.py"), + (r"\eggs\Spam.py", "\\Eggs", r"./Spam.py"), + # no-op + (r"\spam.py", r"\eggs", r"/spam.py"), + (r"C:\spam.py", r"C:\eggs", r"C:/spam.py"), + # TODO: Should these be supported. + (r"C:\spam.py", "\\", r"C:/spam.py"), + (r"\spam.py", "C:\\", r"/spam.py"), + # root-only + ("\\", "\\", "/"), + ("\\\\", "\\", "//"), + ("C:\\", "C:\\eggs", "C:/"), + ("C:\\", "C:\\", "C:/"), + (r"C:\spam.py", "D:\\", r"C:/spam.py"), + ] + ) + for fileid, rootdir, _os_path, expected in tests: + pathsep = _os_path.sep + with self.subTest( + r"for {} (with rootdir {!r}): {!r}".format(pathsep, rootdir, fileid) + ): + fixed = fix_fileid( + fileid, + rootdir, + _path_isabs=_os_path.isabs, + _normcase=_os_path.normcase, + _pathsep=pathsep, + ) + self.assertEqual(fixed, expected) + + +class ShlexUnsplitTests(unittest.TestCase): + def test_no_args(self): + argv = [] + joined = shlex_unsplit(argv) + + self.assertEqual(joined, "") + self.assertEqual(shlex.split(joined), argv) + + def test_one_arg(self): + argv = ["spam"] + joined = shlex_unsplit(argv) + + self.assertEqual(joined, "spam") + self.assertEqual(shlex.split(joined), argv) + + def test_multiple_args(self): + argv = [ + "-x", + "X", + "-xyz", + "spam", + "eggs", + ] + joined = shlex_unsplit(argv) + + self.assertEqual(joined, "-x X -xyz spam eggs") + self.assertEqual(shlex.split(joined), argv) + + def test_whitespace(self): + argv = [ + "-x", + "X Y Z", + "spam spam\tspam", + "eggs", + ] + joined = shlex_unsplit(argv) + + self.assertEqual(joined, "-x 'X Y Z' 'spam spam\tspam' eggs") + self.assertEqual(shlex.split(joined), argv) + + def test_quotation_marks(self): + argv = [ + "-x", + "''", + 'spam"spam"spam', + "ham'ham'ham", + "eggs", + ] + joined = shlex_unsplit(argv) + + self.assertEqual( + joined, + "-x ''\"'\"''\"'\"'' 'spam\"spam\"spam' 'ham'\"'\"'ham'\"'\"'ham' eggs", + ) + self.assertEqual(shlex.split(joined), argv) diff --git a/extensions/positron-python/pythonFiles/tests/tree_comparison_helper.py b/extensions/positron-python/pythonFiles/tests/tree_comparison_helper.py new file mode 100644 index 00000000000..edf6aa8ff86 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/tree_comparison_helper.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +def is_same_tree(tree1, tree2) -> bool: + """Helper function to test if two test trees are the same. + + `is_same_tree` starts by comparing the root attributes, and then checks if all children are the same. + """ + # Compare the root. + if any(tree1[key] != tree2[key] for key in ["path", "name", "type_"]): + return False + + # Compare child test nodes if they exist, otherwise compare test items. + if "children" in tree1 and "children" in tree2: + # sort children by path before comparing since order doesn't matter of children + children1 = sorted(tree1["children"], key=lambda x: x["path"]) + children2 = sorted(tree2["children"], key=lambda x: x["path"]) + + # Compare test nodes. + if len(children1) != len(children2): + return False + else: + return all(is_same_tree(*pair) for pair in zip(children1, children2)) + elif "id_" in tree1 and "id_" in tree2: + # Compare test items. + return all(tree1[key] == tree2[key] for key in ["id_", "lineno"]) + + return False diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_empty.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_empty.py new file mode 100644 index 00000000000..9af5071303c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_empty.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryEmpty(unittest.TestCase): + """Test class for the test_empty_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + + def something(self) -> bool: + return True diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py new file mode 100644 index 00000000000..031b6f6c9d6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +import something_else # type: ignore # noqa: F401 + + +class DiscoveryErrorOne(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py new file mode 100644 index 00000000000..5d6d54f886a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryErrorTwo(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_simple.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_simple.py new file mode 100644 index 00000000000..1859436d5b5 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/discovery_simple.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoverySimple(unittest.TestCase): + """Test class for the test_simple_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree + if unittest discovery was performed successfully. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_fail_simple.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_fail_simple.py new file mode 100644 index 00000000000..e329c3fd700 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_fail_simple.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +# Test class for the test_fail_simple test. +# The test_failed_tests function should return a dictionary with a "success" status +# and the two tests with their outcome as "failed". + +class RunFailSimple(unittest.TestCase): + """Test class for the test_fail_simple test. + + The test_failed_tests function should return a dictionary with a "success" status + and the two tests with their outcome as "failed". + """ + + def test_one_fail(self) -> None: + self.assertGreater(2, 3) + + def test_two_fail(self) -> None: + self.assertNotEqual(1, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_subtest.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_subtest.py new file mode 100644 index 00000000000..b913b877370 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_subtest.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +# Test class for the test_subtest_run test. +# The test_failed_tests function should return a dictionary that has a "success" status +# and the "result" value is a dict with 6 entries, one for each subtest. + + +class NumbersTest(unittest.TestCase): + def test_even(self): + """ + Test that numbers between 0 and 5 are all even. + """ + for i in range(0, 6): + with self.subTest(i=i): + self.assertEqual(i % 2, 0) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_two_classes.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_two_classes.py new file mode 100644 index 00000000000..60b26706ad4 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/test_two_classes.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +# Test class which runs for the test_multiple_ids_run test with the two class parameters. +# Both test functions will be returned in a dictionary with a "success" status, +# and the two tests with their outcome as "success". + + +class ClassOne(unittest.TestCase): + + def test_one(self) -> None: + self.assertGreater(2, 1) + +class ClassTwo(unittest.TestCase): + + def test_two(self) -> None: + self.assertGreater(2, 1) + diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/pattern_a_test.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/pattern_a_test.py new file mode 100644 index 00000000000..4f3f77e1056 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/pattern_a_test.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + +# Test class for the two file pattern test. It is pattern *test.py. +# The test_ids_multiple_runs function should return a dictionary with a "success" status, +# and the two tests with their outcome as "success". + + + +class DiscoveryA(unittest.TestCase): + """Test class for the two file pattern test. It is pattern *test.py + + The test_ids_multiple_runs function should return a dictionary with a "success" status, + and the two tests with their outcome as "success". + """ + + def test_one_a(self) -> None: + self.assertGreater(2, 1) + + def test_two_a(self) -> None: + self.assertNotEqual(2, 1) \ No newline at end of file diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/test_pattern_b.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/test_pattern_b.py new file mode 100644 index 00000000000..a912699383c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/two_patterns/test_pattern_b.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + +# Test class for the two file pattern test. This file is pattern test*.py. +# The test_ids_multiple_runs function should return a dictionary with a "success" status, +# and the two tests with their outcome as "success". + +class DiscoveryB(unittest.TestCase): + + def test_one_b(self) -> None: + self.assertGreater(2, 1) + + def test_two_b(self) -> None: + self.assertNotEqual(2, 1) \ No newline at end of file diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_add.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_add.py new file mode 100644 index 00000000000..2e616077ec4 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_add.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + +# Test class which runs for the test_multiple_ids_run test with the two test +# files in the same folder. The cwd is set to the parent folder. This should return +# a dictionary with a "success" status and the two tests with their outcome as "success". + +def add(a, b): + return a + b + + +class TestAddFunction(unittest.TestCase): + + def test_add_positive_numbers(self): + result = add(2, 3) + self.assertEqual(result, 5) + + + def test_add_negative_numbers(self): + result = add(-2, -3) + self.assertEqual(result, -5) \ No newline at end of file diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_subtract.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_subtract.py new file mode 100644 index 00000000000..4028e25825d --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_folder/test_subtract.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + +# Test class which runs for the test_multiple_ids_run test with the two test +# files in the same folder. The cwd is set to the parent folder. This should return +# a dictionary with a "success" status and the two tests with their outcome as "success". + +def subtract(a, b): + return a - b + + +class TestSubtractFunction(unittest.TestCase): + def test_subtract_positive_numbers(self): + result = subtract(5, 3) + self.assertEqual(result, 2) + + + def test_subtract_negative_numbers(self): + result = subtract(-2, -3) + self.assertEqual(result, 1) \ No newline at end of file diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_file.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_file.py new file mode 100644 index 00000000000..927a56bc920 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_file.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from unittest import SkipTest + +raise SkipTest("This is unittest.SkipTest calling") + + +def test_example(): + assert 1 == 1 diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_function.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_function.py new file mode 100644 index 00000000000..59e66e9a1d4 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/unittest_skip/unittest_skip_function.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +def add(x, y): + return x + y + + +class SimpleTest(unittest.TestCase): + @unittest.skip("demonstrating skipping") + def testadd1(self): + self.assertEquals(add(4, 5), 9) + + +if __name__ == "__main__": + unittest.main() diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/__init__.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/__init__.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/test_inner_folder/__init__.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/test_inner_folder/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/test_inner_folder/test_utils_complex_tree.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/test_inner_folder/test_utils_complex_tree.py new file mode 100644 index 00000000000..8f57fb880ff --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_complex_tree/test_outer_folder/test_inner_folder/test_utils_complex_tree.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import unittest + + +class TreeOne(unittest.TestCase): + def test_one(self): + assert True diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py new file mode 100644 index 00000000000..90fdfc89a27 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest +from functools import wraps + + +def my_decorator(f): + @wraps(f) + def wrapper(*args, **kwds): + print("Calling decorated function") + return f(*args, **kwds) + + return wrapper + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_decorated_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + @my_decorator + def test_one(self) -> None: + self.assertGreater(2, 1) + + @my_decorator + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py new file mode 100644 index 00000000000..84f7fefc4eb --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseTwoFileOne(unittest.TestCase): + """Test class for the test_nested_test_cases test. + + get_test_case should return tests from the test suites in this folder. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/__init__.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/file_two.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/file_two.py new file mode 100644 index 00000000000..235a104016a --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/folder/file_two.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseTwoFileTwo(unittest.TestCase): + """Test class for the test_nested_test_cases test. + + get_test_case should return tests from the test suites in this folder. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py new file mode 100644 index 00000000000..fb3ae7eb790 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseOne(unittest.TestCase): + """Test class for the test_simple_test_cases test. + + get_test_case should return tests from the test suite. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py new file mode 100644 index 00000000000..6db51a4fd80 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_simple_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/__init__.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/conftest.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/conftest.py new file mode 100644 index 00000000000..19af85d1e09 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/conftest.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import sys + +# Ignore the contents of this folder for Python 2 tests. +if sys.version_info[0] < 3: + collect_ignore_glob = ["*.py"] diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/expected_discovery_test_output.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/expected_discovery_test_output.py new file mode 100644 index 00000000000..1007a8f42df --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/expected_discovery_test_output.py @@ -0,0 +1,155 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +from unittestadapter.pvsc_utils import TestNodeTypeEnum +import pathlib + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +skip_unittest_folder_discovery_output = { + "path": os.fspath(TEST_DATA_PATH / "unittest_skip"), + "name": "unittest_skip", + "type_": TestNodeTypeEnum.folder, + "children": [ + { + "path": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py" + ), + "name": "unittest_skip_file.py", + "type_": TestNodeTypeEnum.file, + "children": [], + "id_": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_file.py" + ), + }, + { + "path": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py" + ), + "name": "unittest_skip_function.py", + "type_": TestNodeTypeEnum.file, + "children": [ + { + "path": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py" + ), + "name": "SimpleTest", + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "testadd1", + "path": os.fspath( + TEST_DATA_PATH + / "unittest_skip" + / "unittest_skip_function.py" + ), + "lineno": "13", + "type_": TestNodeTypeEnum.test, + "id_": os.fspath( + TEST_DATA_PATH + / "unittest_skip" + / "unittest_skip_function.py" + ) + + "\\SimpleTest\\testadd1", + "runID": "unittest_skip_function.SimpleTest.testadd1", + } + ], + "id_": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py" + ) + + "\\SimpleTest", + } + ], + "id_": os.fspath( + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py" + ), + }, + ], + "id_": os.fspath(TEST_DATA_PATH / "unittest_skip"), +} + +complex_tree_file_path = os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, + "utils_complex_tree", + "test_outer_folder", + "test_inner_folder", + "test_utils_complex_tree.py", + ) +) +complex_tree_expected_output = { + "name": "utils_complex_tree", + "type_": TestNodeTypeEnum.folder, + "path": os.fsdecode(pathlib.PurePath(TEST_DATA_PATH, "utils_complex_tree")), + "children": [ + { + "name": "test_outer_folder", + "type_": TestNodeTypeEnum.folder, + "path": os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder" + ) + ), + "children": [ + { + "name": "test_inner_folder", + "type_": TestNodeTypeEnum.folder, + "path": os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, + "utils_complex_tree", + "test_outer_folder", + "test_inner_folder", + ) + ), + "children": [ + { + "name": "test_utils_complex_tree.py", + "type_": TestNodeTypeEnum.file, + "path": complex_tree_file_path, + "children": [ + { + "name": "TreeOne", + "type_": TestNodeTypeEnum.class_, + "path": complex_tree_file_path, + "children": [ + { + "name": "test_one", + "type_": TestNodeTypeEnum.test, + "path": complex_tree_file_path, + "lineno": "7", + "id_": complex_tree_file_path + + "\\" + + "TreeOne" + + "\\" + + "test_one", + "runID": "utils_complex_tree.test_outer_folder.test_inner_folder.test_utils_complex_tree.TreeOne.test_one", + }, + ], + "id_": complex_tree_file_path + "\\" + "TreeOne", + } + ], + "id_": complex_tree_file_path, + } + ], + "id_": os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, + "utils_complex_tree", + "test_outer_folder", + "test_inner_folder", + ) + ), + }, + ], + "id_": os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, "utils_complex_tree", "test_outer_folder" + ) + ), + } + ], + "id_": os.fsdecode(pathlib.PurePath(TEST_DATA_PATH, "utils_complex_tree")), +} diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/test_discovery.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_discovery.py new file mode 100644 index 00000000000..a68774d3f2d --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_discovery.py @@ -0,0 +1,327 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +import sys +from typing import List + +import pytest +from unittestadapter.discovery import discover_tests +from unittestadapter.pvsc_utils import TestNodeTypeEnum, parse_unittest_args + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) + + +from . import expected_discovery_test_output +from tests.tree_comparison_helper import is_same_tree + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +@pytest.mark.parametrize( + "args, expected", + [ + ( + ["-s", "something", "-p", "other*", "-t", "else"], + ("something", "other*", "else", 1, None, None), + ), + ( + [ + "--start-directory", + "foo", + "--pattern", + "bar*", + "--top-level-directory", + "baz", + ], + ("foo", "bar*", "baz", 1, None, None), + ), + ( + ["--foo", "something"], + (".", "test*.py", None, 1, None, None), + ), + ( + ["--foo", "something", "-v"], + (".", "test*.py", None, 2, None, None), + ), + ( + ["--foo", "something", "-f"], + (".", "test*.py", None, 1, True, None), + ), + ( + ["--foo", "something", "--verbose", "-f"], + (".", "test*.py", None, 2, True, None), + ), + ( + ["--foo", "something", "-q", "--failfast"], + (".", "test*.py", None, 0, True, None), + ), + ( + ["--foo", "something", "--quiet"], + (".", "test*.py", None, 0, None, None), + ), + ( + ["--foo", "something", "--quiet", "--locals"], + (".", "test*.py", None, 0, None, True), + ), + ], +) +def test_parse_unittest_args(args: List[str], expected: List[str]) -> None: + """The parse_unittest_args function should return values for the start_dir, pattern, and top_level_dir arguments + when passed as command-line options, and ignore unrecognized arguments. + """ + actual = parse_unittest_args(args) + + assert actual == expected + + +def test_simple_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and a test tree + if unittest discovery was performed successfully. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_simple*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "discovery_simple.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoverySimple", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoverySimple", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert is_same_tree(actual.get("tests"), expected) + assert "error" not in actual + + +def test_simple_discovery_with_top_dir_calculated() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and a test tree + if unittest discovery was performed successfully. + """ + start_dir = "." + pattern = "discovery_simple*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) + + expected = { + "path": os.fsdecode(pathlib.PurePath(TEST_DATA_PATH)), + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "discovery_simple.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoverySimple", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoverySimple", + } + ], + "id_": file_path, + } + ], + "id_": os.fsdecode(pathlib.PurePath(TEST_DATA_PATH)), + } + + uuid = "some-uuid" + # Define the CWD to be the root of the test data folder. + os.chdir(os.fsdecode(pathlib.PurePath(TEST_DATA_PATH))) + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert is_same_tree(actual.get("tests"), expected) + assert "error" not in actual + + +def test_empty_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_empty*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert "error" not in actual + + +def test_error_discovery() -> None: + """The discover_tests function should return a dictionary with an "error" status, a uuid, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + # Discover tests in .data/discovery_error/. + start_path = pathlib.PurePath(TEST_DATA_PATH / "discovery_error") + start_dir = os.fsdecode(start_path) + pattern = "file*" + + file_path = os.fsdecode(start_path / "file_two.py") + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": "discovery_error", + "children": [ + { + "name": "file_two.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoveryErrorTwo", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoveryErrorTwo", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "error" + assert is_same_tree(expected, actual.get("tests")) + assert len(actual.get("error", [])) == 1 + + +def test_unit_skip() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and test tree. + if unittest discovery was performed and found a test in one file marked as skipped and another file marked as skipped. + """ + start_dir = os.fsdecode(TEST_DATA_PATH / "unittest_skip") + pattern = "unittest_*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert is_same_tree( + actual.get("tests"), + expected_discovery_test_output.skip_unittest_folder_discovery_output, + ) + assert "error" not in actual + + +def test_complex_tree() -> None: + """This test specifically tests when different start_dir and top_level_dir are provided.""" + start_dir = os.fsdecode( + pathlib.PurePath( + TEST_DATA_PATH, + "utils_complex_tree", + "test_outer_folder", + "test_inner_folder", + ) + ) + pattern = "test_*.py" + top_level_dir = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH, "utils_complex_tree")) + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, top_level_dir, uuid) + assert actual["status"] == "success" + assert "error" not in actual + assert is_same_tree( + actual.get("tests"), + expected_discovery_test_output.complex_tree_expected_output, + ) diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/test_execution.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_execution.py new file mode 100644 index 00000000000..7d11c656b57 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_execution.py @@ -0,0 +1,275 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +import sys + +import pytest + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) + +from unittestadapter.execution import run_tests + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +def test_no_ids_run() -> None: + """This test runs on an empty array of test_ids, therefore it should return + an empty dict for the result. + """ + start_dir: str = os.fspath(TEST_DATA_PATH) + testids = [] + pattern = "discovery_simple*" + actual = run_tests(start_dir, testids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + if actual["result"] is not None: + assert len(actual["result"]) == 0 + else: + raise AssertionError("actual['result'] is None") + + +def test_single_ids_run() -> None: + """This test runs on a single test_id, therefore it should return + a dict with a single key-value pair for the result. + + This single test passes so the outcome should be 'success'. + """ + id = "discovery_simple.DiscoverySimple.test_one" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "discovery_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 1 + assert id in result + id_result = result[id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "success" + + +def test_subtest_run() -> None: + """This test runs on a the test_subtest which has a single method, test_even, + that uses unittest subtest. + + The actual result of run should return a dict payload with 6 entry for the 6 subtests. + """ + id = "test_subtest.NumbersTest.test_even" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "test_subtest.py", + None, + "fake-uuid", + 1, + None, + ) + subtests_ids = [ + "test_subtest.NumbersTest.test_even (i=0)", + "test_subtest.NumbersTest.test_even (i=1)", + "test_subtest.NumbersTest.test_even (i=2)", + "test_subtest.NumbersTest.test_even (i=3)", + "test_subtest.NumbersTest.test_even (i=4)", + "test_subtest.NumbersTest.test_even (i=5)", + ] + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 6 + for id in subtests_ids: + assert id in result + + +@pytest.mark.parametrize( + "test_ids, pattern, cwd, expected_outcome", + [ + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + ], + "test_add.py", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + "test_subtract.TestSubtractFunction.test_subtract_negative_numbers", + "test_subtract.TestSubtractFunction.test_subtract_positive_numbers", + ], + "test*", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "pattern_a_test.DiscoveryA.test_one_a", + "pattern_a_test.DiscoveryA.test_two_a", + ], + "*test", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "test_pattern_b.DiscoveryB.test_one_b", + "test_pattern_b.DiscoveryB.test_two_b", + ], + "test_*", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "file_one.CaseTwoFileOne.test_one", + "file_one.CaseTwoFileOne.test_two", + "folder.file_two.CaseTwoFileTwo.test_one", + "folder.file_two.CaseTwoFileTwo.test_two", + ], + "*", + os.fspath(TEST_DATA_PATH / "utils_nested_cases"), + "success", + ), + ( + [ + "test_two_classes.ClassOne.test_one", + "test_two_classes.ClassTwo.test_two", + ], + "test_two_classes.py", + os.fspath(TEST_DATA_PATH), + "success", + ), + ], +) +def test_multiple_ids_run(test_ids, pattern, cwd, expected_outcome) -> None: + """ + The following are all successful tests of different formats. + + # 1. Two tests with the `pattern` specified as a file + # 2. Two test files in the same folder called `unittest_folder` + # 3. A folder with two different test file patterns, this test gathers pattern `*test` + # 4. A folder with two different test file patterns, this test gathers pattern `test_*` + # 5. A nested structure where a test file is on the same level as a folder containing a test file + # 6. Test file with two test classes + + All tests should have the outcome of `success`. + """ + actual = run_tests(cwd, test_ids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == cwd + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == expected_outcome + assert True + + +def test_failed_tests(): + """This test runs on a single file `test_fail` with two tests that fail.""" + test_ids = [ + "test_fail_simple.RunFailSimple.test_one_fail", + "test_fail_simple.RunFailSimple.test_two_fail", + ] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "failure" + assert "message" and "traceback" in id_result + assert "2 not greater than 3" in str(id_result["message"]) or "1 == 1" in str( + id_result["traceback"] + ) + assert True + + +def test_unknown_id(): + """This test runs on a unknown test_id, therefore it should return + an error as the outcome as it attempts to find the given test. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + assert "unittest.loader._FailedTest.unknown_id" in result + id_result = result["unittest.loader._FailedTest.unknown_id"] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "error" + assert "message" and "traceback" in id_result + + +def test_incorrect_path(): + """This test runs on a non existent path, therefore it should return + an error as the outcome as it attempts to find the given folder. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH / "unknown_folder"), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status", "error")) + assert actual["status"] == "error" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH / "unknown_folder") diff --git a/extensions/positron-python/pythonFiles/tests/unittestadapter/test_utils.py b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_utils.py new file mode 100644 index 00000000000..d5f6fbbe9f1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/unittestadapter/test_utils.py @@ -0,0 +1,308 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +import sys +import unittest + +import pytest + +from unittestadapter.pvsc_utils import ( + TestNode, + TestNodeTypeEnum, + build_test_tree, + get_child_node, + get_test_case, +) + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) + +from tests.tree_comparison_helper import is_same_tree + + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +@pytest.mark.parametrize( + "directory, pattern, expected", + [ + ( + ".", + "utils_simple_cases*", + [ + "utils_simple_cases.CaseOne.test_one", + "utils_simple_cases.CaseOne.test_two", + ], + ), + ( + "utils_nested_cases", + "file*", + [ + "file_one.CaseTwoFileOne.test_one", + "file_one.CaseTwoFileOne.test_two", + "folder.file_two.CaseTwoFileTwo.test_one", + "folder.file_two.CaseTwoFileTwo.test_two", + ], + ), + ], +) +def test_simple_test_cases(directory, pattern, expected) -> None: + """The get_test_case fuction should return tests from all test suites.""" + + actual = [] + + # Discover tests in .data/. + start_dir = os.fsdecode(TEST_DATA_PATH / directory) + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + + # Iterate on get_test_case and save the test id. + for test in get_test_case(suite): + actual.append(test.id()) + + assert expected == actual + + +def test_get_existing_child_node() -> None: + """The get_child_node fuction should return the child node of a test tree if it exists.""" + + tree: TestNode = { + "name": "root", + "path": "foo", + "type_": TestNodeTypeEnum.folder, + "children": [ + { + "name": "childOne", + "path": "child/one", + "type_": TestNodeTypeEnum.folder, + "children": [ + { + "name": "nestedOne", + "path": "nested/one", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "nested/one", + }, + { + "name": "nestedTwo", + "path": "nested/two", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "nested/two", + }, + ], + "id_": "child/one", + }, + { + "name": "childTwo", + "path": "child/two", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "child/two", + }, + ], + "id_": "foo", + } + + get_child_node("childTwo", "child/two", TestNodeTypeEnum.folder, tree) + tree_copy = tree.copy() + + # Check that the tree didn't get mutated by get_child_node. + assert is_same_tree(tree, tree_copy) + + +def test_no_existing_child_node() -> None: + """The get_child_node fuction should add a child node to a test tree and return it if it does not exist.""" + + tree: TestNode = { + "name": "root", + "path": "foo", + "type_": TestNodeTypeEnum.folder, + "children": [ + { + "name": "childOne", + "path": "child/one", + "type_": TestNodeTypeEnum.folder, + "children": [ + { + "name": "nestedOne", + "path": "nested/one", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "nested/one", + }, + { + "name": "nestedTwo", + "path": "nested/two", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "nested/two", + }, + ], + "id_": "child/one", + }, + { + "name": "childTwo", + "path": "child/two", + "type_": TestNodeTypeEnum.folder, + "children": [], + "id_": "child/two", + }, + ], + "id_": "foo", + } + + # Make a separate copy of tree["children"]. + tree_before = tree.copy() + tree_before["children"] = tree["children"][:] + + get_child_node("childThree", "child/three", TestNodeTypeEnum.folder, tree) + + tree_after = tree.copy() + tree_after["children"] = tree_after["children"][:-1] + + # Check that all pre-existing items in the tree didn't get mutated by get_child_node. + assert is_same_tree(tree_before, tree_after) + + # Check for the added node. + last_child = tree["children"][-1] + assert last_child["name"] == "childThree" + + +def test_build_simple_tree() -> None: + """The build_test_tree function should build and return a test tree from discovered test suites, + and an empty list of errors if there are none in the discovered data. + """ + + # Discovery tests in utils_simple_tree.py. + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "utils_simple_tree*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH, "utils_simple_tree.py")) + + expected: TestNode = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "utils_simple_tree.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "TreeOne", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "13", + "id_": file_path + "\\" + "TreeOne" + "\\" + "test_one", + "runID": "utils_simple_tree.TreeOne.test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "16", + "id_": file_path + "\\" + "TreeOne" + "\\" + "test_two", + "runID": "utils_simple_tree.TreeOne.test_two", + }, + ], + "id_": file_path + "\\" + "TreeOne", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + tests, errors = build_test_tree(suite, start_dir) + + assert is_same_tree(expected, tests) + assert not errors + + +def test_build_decorated_tree() -> None: + """The build_test_tree function should build and return a test tree from discovered test suites, + with correct line numbers for decorated test, + and an empty list of errors if there are none in the discovered data. + """ + + # Discovery tests in utils_decorated_tree.py. + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "utils_decorated_tree*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH, "utils_decorated_tree.py")) + + expected: TestNode = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "utils_decorated_tree.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "TreeOne", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "24", + "id_": file_path + "\\" + "TreeOne" + "\\" + "test_one", + "runID": "utils_decorated_tree.TreeOne.test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "28", + "id_": file_path + "\\" + "TreeOne" + "\\" + "test_two", + "runID": "utils_decorated_tree.TreeOne.test_two", + }, + ], + "id_": file_path + "\\" + "TreeOne", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + tests, errors = build_test_tree(suite, start_dir) + + assert is_same_tree(expected, tests) + assert not errors + + +def test_build_empty_tree() -> None: + """The build_test_tree function should return None if there are no discovered test suites, + and an empty list of errors if there are none in the discovered data.""" + + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "does_not_exist*" + + expected = None + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + tests, errors = build_test_tree(suite, start_dir) + + assert expected == tests + assert not errors diff --git a/extensions/positron-python/pythonFiles/tests/util.py b/extensions/positron-python/pythonFiles/tests/util.py new file mode 100644 index 00000000000..45c3536145c --- /dev/null +++ b/extensions/positron-python/pythonFiles/tests/util.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class Stub(object): + def __init__(self): + self.calls = [] + + def add_call(self, name, args=None, kwargs=None): + self.calls.append((name, args, kwargs)) + + +class StubProxy(object): + def __init__(self, stub=None, name=None): + self.name = name + self.stub = stub if stub is not None else Stub() + + @property + def calls(self): + return self.stub.calls + + def add_call(self, funcname, *args, **kwargs): + callname = funcname + if self.name: + callname = "{}.{}".format(self.name, funcname) + return self.stub.add_call(callname, *args, **kwargs) diff --git a/extensions/positron-python/pythonFiles/unittestadapter/__init__.py b/extensions/positron-python/pythonFiles/unittestadapter/__init__.py new file mode 100644 index 00000000000..5b7f7a925cc --- /dev/null +++ b/extensions/positron-python/pythonFiles/unittestadapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/extensions/positron-python/pythonFiles/unittestadapter/discovery.py b/extensions/positron-python/pythonFiles/unittestadapter/discovery.py new file mode 100644 index 00000000000..db06004e02c --- /dev/null +++ b/extensions/positron-python/pythonFiles/unittestadapter/discovery.py @@ -0,0 +1,171 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import json +import os +import pathlib +import sys +import traceback +import unittest +from typing import List, Optional, Union + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) +sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) + +from testing_tools import socket_manager +from typing_extensions import Literal, NotRequired, TypedDict + +# If I use from utils then there will be an import error in test_discovery.py. +from unittestadapter.pvsc_utils import ( + TestNode, + build_test_tree, + parse_unittest_args, +) + +DEFAULT_PORT = 45454 + + +class PayloadDict(TypedDict): + cwd: str + status: Literal["success", "error"] + tests: Optional[TestNode] + error: NotRequired[List[str]] + + +class EOTPayloadDict(TypedDict): + """A dictionary that is used to send a end of transmission post request to the server.""" + + command_type: Union[Literal["discovery"], Literal["execution"]] + eot: bool + + +def discover_tests( + start_dir: str, + pattern: str, + top_level_dir: Optional[str], + uuid: Optional[str], +) -> PayloadDict: + """Returns a dictionary containing details of the discovered tests. + + The returned dict has the following keys: + + - cwd: Absolute path to the test start directory; + - uuid: UUID sent by the caller of the Python script, that needs to be sent back as an integrity check; + - status: Test discovery status, can be "success" or "error"; + - tests: Discoverered tests if any, not present otherwise. Note that the status can be "error" but the payload can still contain tests; + - error: Discovery error if any, not present otherwise. + + Payload format for a successful discovery: + { + "status": "success", + "cwd": , + "tests": + } + + Payload format for a successful discovery with no tests: + { + "status": "success", + "cwd": , + } + + Payload format when there are errors: + { + "cwd": + "": [list of errors] + "status": "error", + } + """ + cwd = os.path.abspath(start_dir) + if "/" in start_dir: # is a subdir + parent_dir = os.path.dirname(start_dir) + sys.path.insert(0, parent_dir) + else: + sys.path.insert(0, cwd) + payload: PayloadDict = {"cwd": cwd, "status": "success", "tests": None} + tests = None + error: List[str] = [] + + try: + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern, top_level_dir) + + # If the top level directory is not provided, then use the start directory. + if top_level_dir is None: + top_level_dir = start_dir + + # Get abspath of top level directory for build_test_tree. + top_level_dir = os.path.abspath(top_level_dir) + + tests, error = build_test_tree( + suite, top_level_dir + ) # test tree built successfully here. + + except Exception: + error.append(traceback.format_exc()) + + # Still include the tests in the payload even if there are errors so that the TS + # side can determine if it is from run or discovery. + payload["tests"] = tests if tests is not None else None + + if len(error): + payload["status"] = "error" + payload["error"] = error + + return payload + + +def post_response( + payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str +) -> None: + # Build the request data (it has to be a POST request or the Node side will not process it), and send it. + addr = ("localhost", port) + data = json.dumps(payload) + request = f"""Content-Length: {len(data)} +Content-Type: application/json +Request-uuid: {uuid} + +{data}""" + try: + with socket_manager.SocketManager(addr) as s: + if s.socket is not None: + s.socket.sendall(request.encode("utf-8")) + except Exception as e: + print(f"Error sending response: {e}") + print(f"Request data: {request}") + + +if __name__ == "__main__": + # Get unittest discovery arguments. + argv = sys.argv[1:] + index = argv.index("--udiscovery") + + ( + start_dir, + pattern, + top_level_dir, + _verbosity, + _failfast, + _locals, + ) = parse_unittest_args(argv[index + 1 :]) + + testPort = int(os.environ.get("TEST_PORT", DEFAULT_PORT)) + testUuid = os.environ.get("TEST_UUID") + if testPort is DEFAULT_PORT: + print( + "Error[vscode-unittest]: TEST_PORT is not set.", + " TEST_UUID = ", + testUuid, + ) + if testUuid is not None: + # Perform test discovery. + payload = discover_tests(start_dir, pattern, top_level_dir, testUuid) + # Post this discovery payload. + post_response(payload, testPort, testUuid) + # Post EOT token. + eot_payload: EOTPayloadDict = {"command_type": "discovery", "eot": True} + post_response(eot_payload, testPort, testUuid) + else: + print("Error: no uuid provided or parsed.") + eot_payload: EOTPayloadDict = {"command_type": "discovery", "eot": True} + post_response(eot_payload, testPort, "") diff --git a/extensions/positron-python/pythonFiles/unittestadapter/execution.py b/extensions/positron-python/pythonFiles/unittestadapter/execution.py new file mode 100644 index 00000000000..22451c25bf1 --- /dev/null +++ b/extensions/positron-python/pythonFiles/unittestadapter/execution.py @@ -0,0 +1,365 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import atexit +import enum +import json +import os +import pathlib +import socket +import sys +import traceback +import unittest +from types import TracebackType +from typing import Dict, List, Optional, Tuple, Type, Union + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) +sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) + +from testing_tools import process_json_util, socket_manager +from typing_extensions import Literal, NotRequired, TypeAlias, TypedDict +from unittestadapter.pvsc_utils import parse_unittest_args + +ErrorType = Union[ + Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None] +] +testPort = 0 +testUuid = 0 +START_DIR = "" +DEFAULT_PORT = 45454 + + +class TestOutcomeEnum(str, enum.Enum): + error = "error" + failure = "failure" + success = "success" + skipped = "skipped" + expected_failure = "expected-failure" + unexpected_success = "unexpected-success" + subtest_success = "subtest-success" + subtest_failure = "subtest-failure" + + +class UnittestTestResult(unittest.TextTestResult): + def __init__(self, *args, **kwargs): + self.formatted: Dict[str, Dict[str, Union[str, None]]] = dict() + super(UnittestTestResult, self).__init__(*args, **kwargs) + + def startTest(self, test: unittest.TestCase): + super(UnittestTestResult, self).startTest(test) + + def addError( + self, + test: unittest.TestCase, + err: ErrorType, + ): + super(UnittestTestResult, self).addError(test, err) + self.formatResult(test, TestOutcomeEnum.error, err) + + def addFailure( + self, + test: unittest.TestCase, + err: ErrorType, + ): + super(UnittestTestResult, self).addFailure(test, err) + self.formatResult(test, TestOutcomeEnum.failure, err) + + def addSuccess(self, test: unittest.TestCase): + super(UnittestTestResult, self).addSuccess(test) + self.formatResult(test, TestOutcomeEnum.success) + + def addSkip(self, test: unittest.TestCase, reason: str): + super(UnittestTestResult, self).addSkip(test, reason) + self.formatResult(test, TestOutcomeEnum.skipped) + + def addExpectedFailure(self, test: unittest.TestCase, err: ErrorType): + super(UnittestTestResult, self).addExpectedFailure(test, err) + self.formatResult(test, TestOutcomeEnum.expected_failure, err) + + def addUnexpectedSuccess(self, test: unittest.TestCase): + super(UnittestTestResult, self).addUnexpectedSuccess(test) + self.formatResult(test, TestOutcomeEnum.unexpected_success) + + def addSubTest( + self, + test: unittest.TestCase, + subtest: unittest.TestCase, + err: Union[ErrorType, None], + ): + super(UnittestTestResult, self).addSubTest(test, subtest, err) + self.formatResult( + test, + TestOutcomeEnum.subtest_failure if err else TestOutcomeEnum.subtest_success, + err, + subtest, + ) + + def formatResult( + self, + test: unittest.TestCase, + outcome: str, + error: Union[ErrorType, None] = None, + subtest: Union[unittest.TestCase, None] = None, + ): + tb = None + + message = "" + # error is a tuple of the form returned by sys.exc_info(): (type, value, traceback). + if error is not None: + try: + message = f"{error[0]} {error[1]}" + except Exception: + message = "Error occurred, unknown type or value" + formatted = traceback.format_exception(*error) + tb = "".join(formatted) + # Remove the 'Traceback (most recent call last)' + formatted = formatted[1:] + if subtest: + test_id = subtest.id() + else: + test_id = test.id() + + result = { + "test": test.id(), + "outcome": outcome, + "message": message, + "traceback": tb, + "subtest": subtest.id() if subtest else None, + } + self.formatted[test_id] = result + if testPort == 0 or testUuid == 0: + print("Error sending response, port or uuid unknown to python server.") + send_run_data(result, testPort, testUuid) + + +class TestExecutionStatus(str, enum.Enum): + error = "error" + success = "success" + + +TestResultTypeAlias: TypeAlias = Dict[str, Dict[str, Union[str, None]]] + + +class PayloadDict(TypedDict): + cwd: str + status: TestExecutionStatus + result: Optional[TestResultTypeAlias] + not_found: NotRequired[List[str]] + error: NotRequired[str] + + +class EOTPayloadDict(TypedDict): + """A dictionary that is used to send a end of transmission post request to the server.""" + + command_type: Union[Literal["discovery"], Literal["execution"]] + eot: bool + + +# Args: start_path path to a directory or a file, list of ids that may be empty. +# Edge cases: +# - if tests got deleted since the VS Code side last ran discovery and the current test run, +# return these test ids in the "not_found" entry, and the VS Code side can process them as "unknown"; +# - if tests got added since the VS Code side last ran discovery and the current test run, ignore them. +def run_tests( + start_dir: str, + test_ids: List[str], + pattern: str, + top_level_dir: Optional[str], + uuid: Optional[str], + verbosity: int, + failfast: Optional[bool], + locals: Optional[bool] = None, +) -> PayloadDict: + cwd = os.path.abspath(start_dir) + status = TestExecutionStatus.error + error = None + payload: PayloadDict = {"cwd": cwd, "status": status, "result": None} + + try: + # If it's a file, split path and file name. + start_dir = cwd + if cwd.endswith(".py"): + start_dir = os.path.dirname(cwd) + pattern = os.path.basename(cwd) + + # Discover tests at path with the file name as a pattern (if any). + loader = unittest.TestLoader() + + args = { # noqa: F841 + "start_dir": start_dir, + "pattern": pattern, + "top_level_dir": top_level_dir, + } + suite = loader.discover(start_dir, pattern, top_level_dir) # noqa: F841 + + if failfast is None: + failfast = False + if locals is None: + locals = False + if verbosity is None: + verbosity = 1 + runner = unittest.TextTestRunner( + resultclass=UnittestTestResult, + tb_locals=locals, + failfast=failfast, + verbosity=verbosity, + ) + # lets try to tailer our own suite so we can figure out running only the ones we want + loader = unittest.TestLoader() + tailor: unittest.TestSuite = loader.loadTestsFromNames(test_ids) + result: UnittestTestResult = runner.run(tailor) # type: ignore + + payload["result"] = result.formatted + + except Exception: + status = TestExecutionStatus.error + error = traceback.format_exc() + + if error is not None: + payload["error"] = error + else: + status = TestExecutionStatus.success + + payload["status"] = status + + return payload + + +__socket = None +atexit.register(lambda: __socket.close() if __socket else None) + + +def send_run_data(raw_data, port, uuid): + status = raw_data["outcome"] + cwd = os.path.abspath(START_DIR) + if raw_data["subtest"]: + test_id = raw_data["subtest"] + else: + test_id = raw_data["test"] + test_dict = {} + test_dict[test_id] = raw_data + payload: PayloadDict = {"cwd": cwd, "status": status, "result": test_dict} + post_response(payload, port, uuid) + + +def post_response( + payload: Union[PayloadDict, EOTPayloadDict], port: int, uuid: str +) -> None: + # Build the request data (it has to be a POST request or the Node side will not process it), and send it. + addr = ("localhost", port) + global __socket + if __socket is None: + try: + __socket = socket_manager.SocketManager(addr) + __socket.connect() + except Exception as error: + print(f"Plugin error connection error[vscode-pytest]: {error}") + __socket = None + data = json.dumps(payload) + request = f"""Content-Length: {len(data)} +Content-Type: application/json +Request-uuid: {uuid} + +{data}""" + try: + if __socket is not None and __socket.socket is not None: + __socket.socket.sendall(request.encode("utf-8")) + except Exception as ex: + print(f"Error sending response: {ex}") + print(f"Request data: {request}") + + +if __name__ == "__main__": + # Get unittest test execution arguments. + argv = sys.argv[1:] + index = argv.index("--udiscovery") + + ( + start_dir, + pattern, + top_level_dir, + verbosity, + failfast, + locals, + ) = parse_unittest_args(argv[index + 1 :]) + + run_test_ids_port = os.environ.get("RUN_TEST_IDS_PORT") + run_test_ids_port_int = ( + int(run_test_ids_port) if run_test_ids_port is not None else 0 + ) + if run_test_ids_port_int == 0: + print("Error[vscode-unittest]: RUN_TEST_IDS_PORT env var is not set.") + # get data from socket + test_ids_from_buffer = [] + try: + client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + client_socket.connect(("localhost", run_test_ids_port_int)) + buffer = b"" + + while True: + # Receive the data from the client + data = client_socket.recv(1024 * 1024) + if not data: + break + + # Append the received data to the buffer + buffer += data + + try: + # Try to parse the buffer as JSON + test_ids_from_buffer = process_json_util.process_rpc_json( + buffer.decode("utf-8") + ) + # Clear the buffer as complete JSON object is received + buffer = b"" + break + except json.JSONDecodeError: + # JSON decoding error, the complete JSON object is not yet received + continue + except socket.error as e: + print(f"Error: Could not connect to runTestIdsPort: {e}") + print("Error: Could not connect to runTestIdsPort") + + testPort = int(os.environ.get("TEST_PORT", DEFAULT_PORT)) + testUuid = os.environ.get("TEST_UUID") + if testPort is DEFAULT_PORT: + print( + "Error[vscode-unittest]: TEST_PORT is not set.", + " TEST_UUID = ", + testUuid, + ) + if testUuid is None: + print( + "Error[vscode-unittest]: TEST_UUID is not set.", + " TEST_PORT = ", + testPort, + ) + testUuid = "unknown" + if test_ids_from_buffer: + # Perform test execution. + payload = run_tests( + start_dir, + test_ids_from_buffer, + pattern, + top_level_dir, + testUuid, + verbosity, + failfast, + locals, + ) + else: + cwd = os.path.abspath(start_dir) + status = TestExecutionStatus.error + payload: PayloadDict = { + "cwd": cwd, + "status": status, + "error": "No test ids received from buffer", + "result": None, + } + eot_payload: EOTPayloadDict = {"command_type": "execution", "eot": True} + if testUuid is None: + print("Error sending response, uuid unknown to python server.") + post_response(eot_payload, testPort, "unknown") + else: + post_response(eot_payload, testPort, testUuid) diff --git a/extensions/positron-python/pythonFiles/unittestadapter/pvsc_utils.py b/extensions/positron-python/pythonFiles/unittestadapter/pvsc_utils.py new file mode 100644 index 00000000000..5632e69b09c --- /dev/null +++ b/extensions/positron-python/pythonFiles/unittestadapter/pvsc_utils.py @@ -0,0 +1,262 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import enum +import inspect +import os +import pathlib +import sys +import unittest +from typing import List, Tuple, Union + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.append(os.fspath(script_dir)) +sys.path.append(os.fspath(script_dir / "lib" / "python")) + +from typing_extensions import TypedDict + +# Types + + +# Inherit from str so it's JSON serializable. +class TestNodeTypeEnum(str, enum.Enum): + class_ = "class" + file = "file" + folder = "folder" + test = "test" + + +class TestData(TypedDict): + name: str + path: str + type_: TestNodeTypeEnum + id_: str + + +class TestItem(TestData): + lineno: str + runID: str + + +class TestNode(TestData): + children: "List[TestNode | TestItem]" + + +# Helper functions for data retrieval. + + +def get_test_case(suite): + """Iterate through a unittest test suite and return all test cases.""" + for test in suite: + if isinstance(test, unittest.TestCase): + yield test + else: + for test_case in get_test_case(test): + yield test_case + + +def get_source_line(obj) -> str: + """Get the line number of a test case start line.""" + try: + sourcelines, lineno = inspect.getsourcelines(obj) + except Exception: + try: + # tornado-specific, see https://github.com/microsoft/vscode-python/issues/17285. + sourcelines, lineno = inspect.getsourcelines(obj.orig_method) + except Exception: + return "*" + + # Return the line number of the first line of the test case definition. + for i, v in enumerate(sourcelines): + if v.strip().startswith(("def", "async def")): + return str(lineno + i) + + return "*" + + +# Helper functions for test tree building. + + +def build_test_node(path: str, name: str, type_: TestNodeTypeEnum) -> TestNode: + """Build a test node with no children. A test node can be a folder, a file or a class.""" + ## figure out if we are folder, file, or class + id_gen = path + if type_ == TestNodeTypeEnum.folder or type_ == TestNodeTypeEnum.file: + id_gen = path + else: + # means we have to build test node for class + id_gen = path + "\\" + name + + return {"path": path, "name": name, "type_": type_, "children": [], "id_": id_gen} + + +def get_child_node( + name: str, path: str, type_: TestNodeTypeEnum, root: TestNode +) -> TestNode: + """Find a child node in a test tree given its name, type and path. If the node doesn't exist, create it. + Path is required to distinguish between nodes with the same name and type.""" + try: + result = next( + node + for node in root["children"] + if node["name"] == name and node["type_"] == type_ and node["path"] == path + ) + except StopIteration: + result = build_test_node(path, name, type_) + root["children"].append(result) + + return result # type:ignore + + +def build_test_tree( + suite: unittest.TestSuite, top_level_directory: str +) -> Tuple[Union[TestNode, None], List[str]]: + """Build a test tree from a unittest test suite. + + This function returns the test tree, and any errors found by unittest. + If no tests were discovered, return `None` and a list of errors (if any). + + Test tree structure: + { + "path": , + "type": "folder", + "name": , + "children": [ + { files and folders } + ... + { + "path": , + "name": filename.py, + "type_": "file", + "children": [ + { + "path": , + "name": , + "type_": "class", + "children": [ + { + "path": , + "name": , + "type_": "test", + "lineno": + "id_": , + } + ], + "id_": + } + ], + "id_": + } + ], + "id_": + } + """ + error = [] + directory_path = pathlib.PurePath(top_level_directory) + root = build_test_node( + top_level_directory, directory_path.name, TestNodeTypeEnum.folder + ) + + for test_case in get_test_case(suite): + test_id = test_case.id() + if test_id.startswith("unittest.loader._FailedTest"): + error.append(str(test_case._exception)) # type: ignore + elif test_id.startswith("unittest.loader.ModuleSkipped"): + components = test_id.split(".") + class_name = f"{components[-1]}.py" + # Find/build class node. + file_path = os.fsdecode(os.path.join(directory_path, class_name)) + current_node = get_child_node( + class_name, file_path, TestNodeTypeEnum.file, root + ) + else: + # Get the static test path components: filename, class name and function name. + components = test_id.split(".") + *folders, filename, class_name, function_name = components + py_filename = f"{filename}.py" + + current_node = root + + # Find/build nodes for the intermediate folders in the test path. + for folder in folders: + current_node = get_child_node( + folder, + os.fsdecode(pathlib.PurePath(current_node["path"], folder)), + TestNodeTypeEnum.folder, + current_node, + ) + + # Find/build file node. + path_components = [top_level_directory] + folders + [py_filename] + file_path = os.fsdecode(pathlib.PurePath("/".join(path_components))) + current_node = get_child_node( + py_filename, file_path, TestNodeTypeEnum.file, current_node + ) + + # Find/build class node. + current_node = get_child_node( + class_name, file_path, TestNodeTypeEnum.class_, current_node + ) + + # Get test line number. + test_method = getattr(test_case, test_case._testMethodName) + lineno = get_source_line(test_method) + + # Add test node. + test_node: TestItem = { + "name": function_name, + "path": file_path, + "lineno": lineno, + "type_": TestNodeTypeEnum.test, + "id_": file_path + "\\" + class_name + "\\" + function_name, + "runID": test_id, + } # concatenate class name and function test name + current_node["children"].append(test_node) + + if not root["children"]: + root = None + + return root, error + + +def parse_unittest_args( + args: List[str], +) -> Tuple[str, str, Union[str, None], int, Union[bool, None], Union[bool, None]]: + """Parse command-line arguments that should be forwarded to unittest to perform discovery. + + Valid unittest arguments are: -v, -s, -p, -t and their long-form counterparts, + however we only care about the last three. + + The returned tuple contains the following items + - start_directory: The directory where to start discovery, defaults to . + - pattern: The pattern to match test files, defaults to test*.py + - top_level_directory: The top-level directory of the project, defaults to None, + and unittest will use start_directory behind the scenes. + """ + + arg_parser = argparse.ArgumentParser() + arg_parser.add_argument("--start-directory", "-s", default=".") + arg_parser.add_argument("--pattern", "-p", default="test*.py") + arg_parser.add_argument("--top-level-directory", "-t", default=None) + arg_parser.add_argument("--failfast", "-f", action="store_true", default=None) + arg_parser.add_argument("--verbose", "-v", action="store_true", default=None) + arg_parser.add_argument("-q", "--quiet", action="store_true", default=None) + arg_parser.add_argument("--locals", action="store_true", default=None) + + parsed_args, _ = arg_parser.parse_known_args(args) + + verbosity: int = 1 + if parsed_args.quiet: + verbosity = 0 + elif parsed_args.verbose: + verbosity = 2 + + return ( + parsed_args.start_directory, + parsed_args.pattern, + parsed_args.top_level_directory, + verbosity, + parsed_args.failfast, + parsed_args.locals, + ) diff --git a/extensions/positron-python/pythonFiles/visualstudio_py_testlauncher.py b/extensions/positron-python/pythonFiles/visualstudio_py_testlauncher.py new file mode 100644 index 00000000000..0b0ef3242f6 --- /dev/null +++ b/extensions/positron-python/pythonFiles/visualstudio_py_testlauncher.py @@ -0,0 +1,404 @@ +# Python Tools for Visual Studio +# Copyright(c) Microsoft Corporation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the License); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS +# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY +# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# +# See the Apache Version 2.0 License for specific language governing +# permissions and limitations under the License. + +__author__ = "Microsoft Corporation " +__version__ = "3.0.0.0" + +import json +import os +import signal +import socket +import sys +import traceback +import unittest + +try: + import thread +except: + import _thread as thread + + +class _TestOutput(object): + """file like object which redirects output to the repl window.""" + + errors = "strict" + + def __init__(self, old_out, is_stdout): + self.is_stdout = is_stdout + self.old_out = old_out + if sys.version >= "3." and hasattr(old_out, "buffer"): + self.buffer = _TestOutputBuffer(old_out.buffer, is_stdout) + + def flush(self): + if self.old_out: + self.old_out.flush() + + def writelines(self, lines): + for line in lines: + self.write(line) + + @property + def encoding(self): + return "utf8" + + def write(self, value): + _channel.send_event("stdout" if self.is_stdout else "stderr", content=value) + if self.old_out: + self.old_out.write(value) + # flush immediately, else things go wonky and out of order + self.flush() + + def isatty(self): + return True + + def next(self): + pass + + @property + def name(self): + if self.is_stdout: + return "" + else: + return "" + + def __getattr__(self, name): + return getattr(self.old_out, name) + + +class _TestOutputBuffer(object): + def __init__(self, old_buffer, is_stdout): + self.buffer = old_buffer + self.is_stdout = is_stdout + + def write(self, data): + _channel.send_event("stdout" if self.is_stdout else "stderr", content=data) + self.buffer.write(data) + + def flush(self): + self.buffer.flush() + + def truncate(self, pos=None): + return self.buffer.truncate(pos) + + def tell(self): + return self.buffer.tell() + + def seek(self, pos, whence=0): + return self.buffer.seek(pos, whence) + + +class _IpcChannel(object): + def __init__(self, socket, callback): + self.socket = socket + self.seq = 0 + self.callback = callback + self.lock = thread.allocate_lock() + self._closed = False + # start the testing reader thread loop + self.test_thread_id = thread.start_new_thread(self.readSocket, ()) + + def close(self): + self._closed = True + + def readSocket(self): + try: + data = self.socket.recv(1024) + self.callback() + except OSError: + if not self._closed: + raise + + def receive(self): + pass + + def send_event(self, name, **args): + with self.lock: + body = {"type": "event", "seq": self.seq, "event": name, "body": args} + self.seq += 1 + content = json.dumps(body).encode("utf8") + headers = ("Content-Length: %d\n\n" % (len(content),)).encode("utf8") + self.socket.send(headers) + self.socket.send(content) + + +_channel = None + + +class VsTestResult(unittest.TextTestResult): + def startTest(self, test): + super(VsTestResult, self).startTest(test) + if _channel is not None: + _channel.send_event(name="start", test=test.id()) + + def addError(self, test, err): + super(VsTestResult, self).addError(test, err) + self.sendResult(test, "error", err) + + def addFailure(self, test, err): + super(VsTestResult, self).addFailure(test, err) + self.sendResult(test, "failed", err) + + def addSuccess(self, test): + super(VsTestResult, self).addSuccess(test) + self.sendResult(test, "passed") + + def addSkip(self, test, reason): + super(VsTestResult, self).addSkip(test, reason) + self.sendResult(test, "skipped") + + def addExpectedFailure(self, test, err): + super(VsTestResult, self).addExpectedFailure(test, err) + self.sendResult(test, "failed-expected", err) + + def addUnexpectedSuccess(self, test): + super(VsTestResult, self).addUnexpectedSuccess(test) + self.sendResult(test, "passed-unexpected") + + def addSubTest(self, test, subtest, err): + super(VsTestResult, self).addSubTest(test, subtest, err) + self.sendResult( + test, "subtest-passed" if err is None else "subtest-failed", err, subtest + ) + + def sendResult(self, test, outcome, trace=None, subtest=None): + if _channel is not None: + tb = None + message = None + if trace is not None: + traceback.print_exc() + formatted = traceback.format_exception(*trace) + # Remove the 'Traceback (most recent call last)' + formatted = formatted[1:] + tb = "".join(formatted) + message = str(trace[1]) + + result = { + "outcome": outcome, + "traceback": tb, + "message": message, + "test": test.id(), + } + if subtest is not None: + result["subtest"] = subtest.id() + _channel.send_event("result", **result) + + +def stopTests(): + try: + os.kill(os.getpid(), signal.SIGUSR1) + except: + try: + os.kill(os.getpid(), signal.SIGTERM) + except: + pass + + +class ExitCommand(Exception): + pass + + +def signal_handler(signal, frame): + raise ExitCommand() + + +def main(): + import os + import sys + import unittest + from optparse import OptionParser + + global _channel + + parser = OptionParser( + prog="visualstudio_py_testlauncher", + usage="Usage: %prog [