diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e14f211..02ea4bd 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,6 +9,7 @@ jobs: name: tests-python${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] os: ["macOS-latest", "ubuntu-latest", "windows-latest"] @@ -35,9 +36,43 @@ jobs: - name: Install and run tests macOS run: | tox -epy --notest - .tox/py/bin/pip install gnureadline subunit2sql + .tox/py/bin/pip install gnureadline tox -epy if: runner.os == 'macOS' + pytest: + name: test-pytest-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: ["macOS-latest", "ubuntu-latest", "windows-latest"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: Pip cache + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-tests-${{ hashFiles('setup.py','requirements-dev.txt','constraints.txt') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip-tests- + ${{ runner.os }}-${{ matrix.python-version }}-pip- + ${{ runner.os }}-${{ matrix.python-version }} + - name: Install Deps + run: python -m pip install -U 'tox<4' setuptools virtualenv wheel + - name: Install and Run Tests + run: tox -e py -- --pytest + if: runner.os != 'macOS' + - name: Install and run tests macOS + run: | + tox -epy --notest + .tox/py/bin/pip install gnureadline + tox -epy -- --pytest + if: runner.os == 'macOS' + lint: name: pep8 runs-on: ubuntu-latest diff --git a/doc/source/MANUAL.rst b/doc/source/MANUAL.rst index fae7c1a..9213f4e 100644 --- a/doc/source/MANUAL.rst +++ b/doc/source/MANUAL.rst @@ -68,6 +68,7 @@ A full example config file is:: test_path=./project/tests top_dir=./ group_regex=([^\.]*\.)* + runner=pytest The ``group_regex`` option is used to specify is used to provide a scheduler @@ -77,7 +78,10 @@ You can also specify the ``parallel_class=True`` instead of group_regex to group tests in the stestr scheduler together by class. Since this is a common use case this enables that without needing to memorize the complicated regex for ``group_regex`` to do -this. +this. The ``runner`` argument is used to specify the test runner to use. By +default a runner based on Python's standard library ``unittest`` module is +used. However, if you'd prefer to use ``pytest`` as your runner you can specify +this as the runner argument in the config file. There is also an option to specify all the options in the config file via the CLI. This way you can run stestr directly without having to write a config file @@ -137,6 +141,8 @@ providing configs in TOML format, the configuration directives **must** be located in a ``[tool.stestr]`` section, and the filename **must** have a ``.toml`` extension. + + Running tests ------------- @@ -166,6 +172,35 @@ Additionally you can specify a specific class or method within that file using will skip discovery and directly call the test runner on the test method in the specified test class. +.. note:: + + If you're using ``--pytest`` or have the runner configured to pytest, then + the ``--no-discover``/``-n`` option passes the id field directly to + ``pytest`` and the id passed via the argument needs to be in a format that + pytest will accept. + +Test runners +'''''''''''' + +By default ``stestr`` is built to run tests leveraging the Python standard +library ``unittest`` modules runner. stestr includes a test runner that will +emit the subunit protocol it relies on internally to handle live results from +parallel workers. However, there is an alternative runner available that +leverages ``pytest`` which is a popular test runner and testing library +alternative to the standard library's ``unittest`` module. The ``stestr`` +project bundles a ``pytest`` plugin that adds real time subunit output to +pytest. As a test suite author the ``pytest`` plugin enables you to write your +test suite using pytest's test library instead of ``unittest``. There are two +ways to specify your test runner, first is the ``--pytest`` flag on +``stestr run``. This tells stestr for this test run use ``pytest`` as the +runner instead of ``unittest``, this is good for a/b comparisons between the +test runners and also general investigations with using different test runners. +The other option is to leverage your project's config file and set the +``runner`` field to either ``pytest`` or ``unittest`` (although ``unittest`` is +always the default so you shouldn't ever need to set it). This is the more +natural fit because if your test suite is written using pytest it won't be +compatible with the unittest based runner. + Running with pdb '''''''''''''''' diff --git a/requirements.txt b/requirements.txt index 8e64059..edc625e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ PyYAML>=3.10.0 # MIT voluptuous>=0.8.9 # BSD License tomlkit>=0.11.6 # MIT extras>=1.0.0 +pytest>=2.3 # MIT diff --git a/setup.cfg b/setup.cfg index db766a7..d7930c8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,6 +46,8 @@ stestr.cm = history_list = stestr.commands.history:HistoryList history_show = stestr.commands.history:HistoryShow history_remove = stestr.commands.history:HistoryRemove +pytest11 = + stestr_subunit = stestr.pytest_subunit [extras] sql = diff --git a/stestr/commands/run.py b/stestr/commands/run.py index 8c02b24..4d2e2a5 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -244,6 +244,12 @@ def get_parser(self, prog_name): help="If set, show non-text attachments. This is " "generally only useful for debug purposes.", ) + parser.add_argument( + "--pytest", + action="store_true", + dest="pytest", + help="If set to True enable using pytest as the test runner", + ) return parser def take_action(self, parsed_args): @@ -335,6 +341,7 @@ def take_action(self, parsed_args): all_attachments=all_attachments, show_binary_attachments=args.show_binary_attachments, pdb=args.pdb, + pytest=args.pytest, ) # Always output slowest test info if requested, regardless of other @@ -396,6 +403,7 @@ def run_command( all_attachments=False, show_binary_attachments=True, pdb=False, + pytest=False, ): """Function to execute the run command @@ -460,6 +468,8 @@ def run_command( :param str pdb: Takes in a single test_id to bypasses test discover and just execute the test specified without launching any additional processes. A file name may be used in place of a test name. + :param bool pytest: Set to true to use pytest as the test runner instead of + the stestr stdlib based unittest runner :return return_code: The exit code for the command. 0 for success and > 0 for failures. @@ -519,13 +529,15 @@ def run_command( stdout.write(msg) return 2 + conf = config_file.TestrConf.load_from_file(config) if no_discover: ids = no_discover - if "::" in ids: - ids = ids.replace("::", ".") - if ids.find("/") != -1: - root = ids.replace(".py", "") - ids = root.replace("/", ".") + if not pytest and conf.runner != "pytest": + if "::" in ids: + ids = ids.replace("::", ".") + if ids.find("/") != -1: + root = ids.replace(".py", "") + ids = root.replace("/", ".") stestr_python = sys.executable if os.environ.get("PYTHON"): python_bin = os.environ.get("PYTHON") @@ -535,7 +547,10 @@ def run_command( raise RuntimeError( "The Python interpreter was not found and " "PYTHON is not set" ) - run_cmd = python_bin + " -m stestr.subunit_runner.run " + ids + if pytest or conf.runner == "pytest": + run_cmd = python_bin + " -m pytest --subunit " + ids + else: + run_cmd = python_bin + " -m stestr.subunit_runner.run " + ids def run_tests(): run_proc = [ @@ -629,7 +644,6 @@ def run_tests(): # that are both failing and listed. ids = list_ids.intersection(ids) - conf = config_file.TestrConf.load_from_file(config) if not analyze_isolation: cmd = conf.get_run_command( ids, @@ -645,6 +659,7 @@ def run_tests(): top_dir=top_dir, test_path=test_path, randomize=random, + pytest=pytest, ) if isolated: result = 0 @@ -669,6 +684,7 @@ def run_tests(): randomize=random, test_path=test_path, top_dir=top_dir, + pytest=pytest, ) run_result = _run_tests( @@ -724,6 +740,7 @@ def run_tests(): randomize=random, test_path=test_path, top_dir=top_dir, + pytest=pytest, ) if not _run_tests(cmd, until_failure): # If the test was filtered, it won't have been run. diff --git a/stestr/config_file.py b/stestr/config_file.py index 6a66503..5d21617 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -39,6 +39,7 @@ class TestrConf: top_dir = None parallel_class = False group_regex = None + runner = None def __init__(self, config_file, section="DEFAULT"): self.config_file = str(config_file) @@ -59,6 +60,7 @@ def _load_from_configparser(self): self.group_regex = parser.get( self.section, "group_regex", fallback=self.group_regex ) + self.runner = parser.get(self.section, "runner", fallback=self.runner) def _load_from_toml(self): with open(self.config_file) as f: @@ -68,6 +70,7 @@ def _load_from_toml(self): self.top_dir = root.get("top_dir", self.top_dir) self.parallel_class = root.get("parallel_class", self.parallel_class) self.group_regex = root.get("group_regex", self.group_regex) + self.runner = root.get("runner", self.runner) @classmethod def load_from_file(cls, config): @@ -113,6 +116,7 @@ def get_run_command( exclude_regex=None, randomize=False, parallel_class=None, + pytest=False, ): """Get a test_processor.TestProcessorFixture for this config file @@ -158,6 +162,8 @@ def get_run_command( stestr scheduler by class. If both this and the corresponding config file option which includes `group-regex` are set, this value will be used. + :param bool pytest: Set to true to use pytest as the test runner instead of + the stestr stdlib based unittest runner :returns: a TestProcessorFixture object for the specified config file and any arguments passed into this function @@ -198,12 +204,48 @@ def get_run_command( if os.path.exists('"%s"' % python): python = '"%s"' % python - command = ( - '%s -m stestr.subunit_runner.run discover -t "%s" "%s" ' - "$LISTOPT $IDOPTION" % (python, top_dir, test_path) - ) - listopt = "--list" - idoption = "--load-list $IDFILE" + if not pytest and self.runner is not None: + if self.runner == "pytest": + pytest = True + elif self.runner == "unittest": + pytest = False + else: + raise RuntimeError( + f"Specified runner argument value: {self.runner} in " + "config file is not valid. Only pytest or unittest can be " + "specified in the config file." + ) + if pytest: + if sys.platform == "win32": + command = ( + '%s -m pytest -s --subunit --rootdir="%s" "%s" ' + "$LISTOPT $IDOPTION" + % ( + python, + top_dir, + test_path, + ) + ) + + else: + command = ( + '%s -m pytest --subunit --rootdir="%s" "%s" ' + "$LISTOPT $IDOPTION" + % ( + python, + top_dir, + test_path, + ) + ) + listopt = "--co" + idoption = "--load-list $IDFILE" + else: + command = ( + '%s -m stestr.subunit_runner.run discover -t "%s" "%s" ' + "$LISTOPT $IDOPTION" % (python, top_dir, test_path) + ) + listopt = "--list" + idoption = "--load-list $IDFILE" # If the command contains $IDOPTION read that command from config # Use a group regex if one is defined if parallel_class or self.parallel_class: diff --git a/stestr/pytest_subunit.py b/stestr/pytest_subunit.py new file mode 100644 index 0000000..2888108 --- /dev/null +++ b/stestr/pytest_subunit.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This file was forked from: +# https://github.com/jogo/pytest-subunit/blob/f5da98f3bee2ffc8d898ced92034f11bcf8e35fe/pytest_subunit.py +# which itself was a fork from the now archived: +# pytest-subunit: https://github.com/lukaszo/pytest-subunit + +from __future__ import annotations + +from typing import Optional + +import datetime +from io import StringIO +import pathlib + +from _pytest._io import TerminalWriter +from _pytest.terminal import TerminalReporter +import pytest +from subunit import StreamResultToBytes + + +def to_path(testid: str) -> pathlib.Path: + delim = "::" + if delim in testid: + path = testid.split(delim)[0] + else: + path = testid + return pathlib.Path(path).resolve() + + +# hook +def pytest_ignore_collect(collection_path, config) -> Optional[bool]: + # TODO(jogo): If specify a path, use same short circuit logic + # Only collect files in the list + if config.option.subunit_load_list: + # TODO(jogo): memoize me + with open(config.option.subunit_load_list) as f: + testids = f.readlines() + filenames = [to_path(line.strip()) for line in testids] + for filename in filenames: + if str(filename).startswith(str(collection_path)): + # Don't ignore + return None + # Ignore everything else by default + return True + return None + + +# hook +def pytest_collection_modifyitems(session, config, items): + if config.option.subunit: + terminal_reporter = config.pluginmanager.getplugin("terminalreporter") + terminal_reporter.tests_count += len(items) + if config.option.subunit_load_list: + with open(config.option.subunit_load_list) as f: + to_run = f.readlines() + to_run = [line.strip() for line in to_run] + # print(to_run) + # print([item.nodeid for item in items]) + filtered = [item for item in items if item.nodeid in to_run] + items[:] = filtered + + +# hook +def pytest_deselected(items): + """Update tests_count to not include deselected tests""" + if len(items) > 0: + pluginmanager = items[0].config.pluginmanager + terminal_reporter = pluginmanager.getplugin("terminalreporter") + if ( + hasattr(terminal_reporter, "tests_count") + and terminal_reporter.tests_count > 0 + ): + terminal_reporter.tests_count -= len(items) + + +# hook +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "--subunit", + action="store_true", + dest="subunit", + default=False, + help=("enable pytest-subunit"), + ) + group._addoption( + "--load-list", + dest="subunit_load_list", + default=False, + help=("Path to file with list of tests to run"), + ) + + +@pytest.mark.trylast +def pytest_configure(config): + if config.option.subunit: + # Get the standard terminal reporter plugin and replace it with our + standard_reporter = config.pluginmanager.getplugin("terminalreporter") + subunit_reporter = SubunitTerminalReporter(standard_reporter) + config.pluginmanager.unregister(standard_reporter) + config.pluginmanager.register(subunit_reporter, "terminalreporter") + + +class SubunitTerminalReporter(TerminalReporter): + def __init__(self, reporter): + TerminalReporter.__init__(self, reporter.config) + self.tests_count = 0 + self.reports = [] + self.skipped = [] + self.failed = [] + self.result = StreamResultToBytes(self._tw._file) + + @property + def no_summary(self): + return True + + def _status(self, report: pytest.TestReport, status: str): + # task id + test_id = report.nodeid + + # get time + now = datetime.datetime.now(datetime.timezone.utc) + + # capture output + buffer = StringIO() + writer = TerminalWriter(file=buffer) + report.toterminal(writer) + buffer.seek(0) + out_bytes = buffer.read().encode("utf-8") + + # send status + self.result.status( + test_id=test_id, + test_status=status, + timestamp=now, + file_name=report.fspath, + file_bytes=out_bytes, + mime_type="text/plain; charset=utf8", + ) + + def pytest_collectreport(self, report): + pass + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + + def pytest_collection(self): + # Prevent shoving `collecting` message + pass + + def report_collect(self, final=False): + # Prevent shoving `collecting` message + pass + + def pytest_sessionstart(self, session): + # Set self._session + # https://github.com/pytest-dev/pytest/blob/58cf20edf08d84c5baf08f0566cc9bccbc4ec7fd/src/_pytest/terminal.py#L692 + self._session = session + + def pytest_runtest_logstart(self, nodeid, location): + pass + + def pytest_sessionfinish(self, session, exitstatus): + # always exit with exitcode 0 + session.exitstatus = 0 + + def pytest_runtest_logreport(self, report: pytest.TestReport): + self.reports.append(report) + test_id = report.nodeid + if report.when in ["setup", "session"]: + self._status(report, "exists") + if report.outcome == "passed": + self._status(report, "inprogress") + if report.outcome == "failed": + self._status(report, "fail") + elif report.outcome == "skipped": + self._status(report, "skip") + elif report.when in ["call"]: + if hasattr(report, "wasxfail"): + if report.skipped: + self._status(report, "xfail") + elif report.outcome == "passed": + self._status(report, "uxsuccess") + self.failed.append(test_id) + elif report.outcome == "failed": + self._status(report, "fail") + self.failed.append(test_id) + elif report.outcome == "skipped": + self._status(report, "skip") + self.skipped.append(test_id) + elif report.when in ["teardown"]: + if test_id not in self.skipped and test_id not in self.failed: + if report.outcome == "passed": + self._status(report, "success") + elif report.outcome == "failed": + self._status(report, "fail") + else: + raise Exception(str(report)) + + def _printcollecteditems(self, items): + for item in items: + test_id = item.nodeid + self.result.status(test_id=test_id, test_status="exists") + + def _determine_show_progress_info(self): + # Never show progress bar + return False diff --git a/stestr/tests/pytest_mode_files/__init__.py b/stestr/tests/pytest_mode_files/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stestr/tests/pytest_mode_files/failing-tests b/stestr/tests/pytest_mode_files/failing-tests new file mode 100644 index 0000000..fa59696 --- /dev/null +++ b/stestr/tests/pytest_mode_files/failing-tests @@ -0,0 +1,22 @@ +import pytest + +def inc(x): + return x + 1 + + +def test_answer(): + assert inc(3) == 5 + + +def test_pass(): + assert False + + +def test_pass_list(): + test_list = ['test', 'a', 'b'] + assert "fail" in test_list + + +@pytest.mark.xfail +def test_unexpected_pass(): + assert True diff --git a/stestr/tests/pytest_mode_files/passing-tests b/stestr/tests/pytest_mode_files/passing-tests new file mode 100644 index 0000000..ba30b7e --- /dev/null +++ b/stestr/tests/pytest_mode_files/passing-tests @@ -0,0 +1,18 @@ +import pytest + +def inc(x): + return x + 1 + +def test_answer(): + assert inc(4) == 5 + +def test_pass(): + assert True + +def test_pass_list(): + test_list = ['test', 'a', 'b'] + assert "test" in test_list + +@pytest.mark.xfail +def test_xfail(): + assert 0 == 1 diff --git a/stestr/tests/pytest_mode_files/pytest_stestr_conf b/stestr/tests/pytest_mode_files/pytest_stestr_conf new file mode 100644 index 0000000..4f69032 --- /dev/null +++ b/stestr/tests/pytest_mode_files/pytest_stestr_conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./tests +runner=pytest diff --git a/stestr/tests/pytest_mode_files/setup.cfg b/stestr/tests/pytest_mode_files/setup.cfg new file mode 100644 index 0000000..f6f9f73 --- /dev/null +++ b/stestr/tests/pytest_mode_files/setup.cfg @@ -0,0 +1,20 @@ +[metadata] +name = tempest_unit_tests +version = 1 +summary = Fake Project for testing wrapper scripts +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Intended Audience :: Information Technology + Intended Audience :: System Administrators + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook diff --git a/stestr/tests/test_config_file.py b/stestr/tests/test_config_file.py index a1a1e9d..c90d682 100644 --- a/stestr/tests/test_config_file.py +++ b/stestr/tests/test_config_file.py @@ -26,6 +26,7 @@ class TestTestrConf(base.TestCase): def setUp(self, mock_ConfigParser): super().setUp() self._testr_conf = config_file.TestrConf(mock.sentinel.config_file) + self._testr_conf.runner = "unittest" @mock.patch.object(config_file.util, "get_repo_open") @mock.patch.object(config_file.test_processor, "TestProcessorFixture") @@ -198,5 +199,6 @@ def test_toml_load(self, mock_toml): with open(file_path, "w"): pass self._testr_conf = config_file.TestrConf(file_path) + self._testr_conf.runner = "unittest" self._check_get_run_command() mock_toml.return_value.__getitem__.assert_called_once_with("tool") diff --git a/stestr/tests/test_return_codes.py b/stestr/tests/test_return_codes.py index 99c1e3c..b388aee 100644 --- a/stestr/tests/test_return_codes.py +++ b/stestr/tests/test_return_codes.py @@ -524,3 +524,90 @@ def test_all_configs_missing(self): os.remove(self.pyproject_toml) output, _ = self.assertRunExit("stestr run passing", 1) self.assertIn(b"No config file found", output) + + +class TestPytestReturnCodes(TestReturnCodes): + def setUp(self): + super().setUp() + os.chdir(self.repo_root) + # Setup test dirs + self.directory = tempfile.mkdtemp(prefix="stestr-pytest-unit") + self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True) + self.test_dir = os.path.join(self.directory, "tests") + os.mkdir(self.test_dir) + # Setup Test files + self.repo_root = os.path.abspath(os.curdir) + self.testr_conf_file = os.path.join(self.directory, ".stestr.conf") + self.setup_cfg_file = os.path.join(self.directory, "setup.cfg") + self.passing_file = os.path.join(self.test_dir, "test_passing.py") + self.failing_file = os.path.join(self.test_dir, "test_failing.py") + self.init_file = os.path.join(self.test_dir, "__init__.py") + self.setup_py = os.path.join(self.directory, "setup.py") + self.user_config = os.path.join(self.directory, "stestr.yaml") + shutil.copy( + "stestr/tests/pytest_mode_files/pytest_stestr_conf", self.testr_conf_file + ) + shutil.copy("stestr/tests/pytest_mode_files/passing-tests", self.passing_file) + shutil.copy("stestr/tests/pytest_mode_files/failing-tests", self.failing_file) + shutil.copy("setup.py", self.setup_py) + shutil.copy("stestr/tests/pytest_mode_files/setup.cfg", self.setup_cfg_file) + shutil.copy("stestr/tests/pytest_mode_files/__init__.py", self.init_file) + shutil.copy("stestr/tests/files/stestr.yaml", self.user_config) + + self.stdout = io.StringIO() + self.stderr = io.StringIO() + # Change directory, run wrapper and check result + self.addCleanup(os.chdir, self.repo_root) + os.chdir(self.directory) + subprocess.call("stestr init", shell=True) + + def test_history_show_passing(self): + self.assertRunExit("stestr run passing", 0) + self.assertRunExit("stestr run", 1) + self.assertRunExit("stestr run passing", 0) + output, _ = self.assertRunExit("stestr history show 0", 0) + lines = [x.rstrip() for x in output.decode("utf8").split("\n")] + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 0", lines) + self.assertIn(" - Expected Fail: 1", lines) + + def test_history_show_failing(self): + self.assertRunExit("stestr run passing", 0) + self.assertRunExit("stestr run", 1) + self.assertRunExit("stestr run passing", 0) + output, _ = self.assertRunExit("stestr history show 1", 1) + lines = [x.rstrip() for x in output.decode("utf8").split("\n")] + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 3", lines) + self.assertIn(" - Expected Fail: 1", lines) + self.assertIn(" - Unexpected Success: 1", lines) + + def test_run_no_discover_pytest_path(self): + passing_string = "tests/test_passing.py::test_pass_list" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 0) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 1", lines) + self.assertIn(" - Failed: 0", lines) + + def test_run_no_discover_pytest_path_failing(self): + passing_string = "tests/test_failing.py::test_pass_list" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 1) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 0", lines) + self.assertIn(" - Failed: 1", lines) + + def test_run_no_discover_file_path(self): + passing_string = "tests/test_passing.py" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 0) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 0", lines) + self.assertIn(" - Expected Fail: 1", lines) + + def test_run_no_discover_file_path_failing(self): + passing_string = "tests/test_failing.py" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 1) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 0", lines) + self.assertIn(" - Failed: 3", lines) + self.assertIn(" - Unexpected Success: 1", lines)