Skip to content

Commit

Permalink
Add support for Python 3.11 (gh-142)
Browse files Browse the repository at this point in the history
  • Loading branch information
hugovk authored Nov 4, 2022
1 parent f0a9111 commit 0318d44
Show file tree
Hide file tree
Showing 14 changed files with 20 additions and 29 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,16 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python: ['3.10']
python: ['3.11']
include:
- os: ubuntu-latest
python: '3.6'
- os: ubuntu-latest
python: '3.7'
- os: ubuntu-latest
python: '3.8'
- os: ubuntu-latest
python: '3.9'
- os: ubuntu-latest
python: '3.10'

steps:
- uses: actions/checkout@v2
Expand Down
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ Command to install pyperf on Python 3::

python3 -m pip install pyperf

pyperf requires Python 3.6 or newer.
pyperf requires Python 3.7 or newer.

Python 2.7 users can use pyperf 1.7.1 which is the last version compatible with
Python 2.7.
Expand Down
4 changes: 2 additions & 2 deletions pyperf/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ def cmd_hist(args):
checks=checks):
print(line)

if not(is_last or ignored):
if not (is_last or ignored):
print()

for suite, ignored in ignored:
Expand Down Expand Up @@ -691,7 +691,7 @@ def cmd_convert(args):
for benchmark in suite:
benchmark._remove_all_metadata()

compact = not(args.indent)
compact = not args.indent
if args.output_filename:
suite.dump(args.output_filename, compact=compact)
else:
Expand Down
4 changes: 2 additions & 2 deletions pyperf/_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class Run(object):

def __init__(self, values, warmups=None,
metadata=None, collect_metadata=True):
if any(not(isinstance(value, NUMBER_TYPES) and value > 0)
if any(not (isinstance(value, NUMBER_TYPES) and value > 0)
for value in values):
raise ValueError("values must be a sequence of number > 0.0")

Expand Down Expand Up @@ -425,7 +425,7 @@ def median_abs_dev(self):
return value

def percentile(self, p):
if not(0 <= p <= 100):
if not (0 <= p <= 100):
raise ValueError("p must be in the range [0; 100]")
return percentile(self.get_values(), p / 100.0)

Expand Down
4 changes: 2 additions & 2 deletions pyperf/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def format_stats(bench, lines):
else:
text = "%s (average)" % total_loops

if not(isinstance(inner_loops, int) and inner_loops == 1):
if not (isinstance(inner_loops, int) and inner_loops == 1):
if isinstance(loops, int):
loops = format_number(loops, 'outer-loop')
else:
Expand Down Expand Up @@ -324,7 +324,7 @@ def format_limit(mean, value):
iqr = q3 - q1
outlier_min = (q1 - 1.5 * iqr)
outlier_max = (q3 + 1.5 * iqr)
noutlier = sum(not(outlier_min <= value <= outlier_max)
noutlier = sum(not (outlier_min <= value <= outlier_max)
for value in values)
bounds = bench.format_values((outlier_min, outlier_max))
lines.append('Number of outlier (out of %s..%s): %s'
Expand Down
4 changes: 2 additions & 2 deletions pyperf/_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def compare_suites_list(self, all_results):
for result in results:
lines.extend(result.format(self.verbose))

if not(significant or self.verbose):
if not (significant or self.verbose):
not_significant.append(results.name)
continue

Expand Down Expand Up @@ -472,7 +472,7 @@ def compare(self):
]
self.compare_suites(all_results)
print()
display_title(f"All benchmarks:")
display_title("All benchmarks:")
self.compare_suites(self.all_results)

if not self.quiet:
Expand Down
2 changes: 1 addition & 1 deletion pyperf/_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def spawn_worker(self, calibrate_loops, calibrate_warmups):
# bInheritHandles=True. For pass_handles, see
# http://bugs.python.org/issue19764
kw['close_fds'] = False
elif sys.version_info >= (3, 2):
else:
kw['pass_fds'] = [wpipe.fd]

proc = subprocess.Popen(cmd, env=env, **kw)
Expand Down
3 changes: 1 addition & 2 deletions pyperf/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ def median_abs_dev(values):


def percentile(values, p):
if not isinstance(p, float) or not(0.0 <= p <= 1.0):
if not isinstance(p, float) or not (0.0 <= p <= 1.0):
raise ValueError("p must be a float in the range [0.0; 1.0]")

values = sorted(values)
Expand Down Expand Up @@ -427,4 +427,3 @@ def merge_profile_stats(profiler, dst):
dst_stats.dump_stats(dst)
else:
profiler.dump_stats(dst)

4 changes: 2 additions & 2 deletions pyperf/_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_calibrate_warmups(self, nwarmup, unit):
iqr = q3 - q1
outlier_max = (q3 + 1.5 * iqr)
# only check maximum, not minimum
outlier = not(first_value <= outlier_max)
outlier = not (first_value <= outlier_max)

mean1 = statistics.mean(sample1)
mean2 = statistics.mean(sample2)
Expand Down Expand Up @@ -185,7 +185,7 @@ def test_calibrate_warmups(self, nwarmup, unit):

if outlier:
return False
if not(-0.5 <= mean_diff <= 0.10):
if not (-0.5 <= mean_diff <= 0.10):
return False
if abs(mad_diff) > 0.10:
return False
Expand Down
5 changes: 1 addition & 4 deletions pyperf/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,7 @@
def _capture_stream(name):
old_stream = getattr(sys, name)
try:
if sys.version_info >= (3,):
stream = io.StringIO()
else:
stream = io.BytesIO()
stream = io.StringIO()
setattr(sys, name, stream)
yield stream
finally:
Expand Down
1 change: 0 additions & 1 deletion pyperf/tests/test_perf_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ def test_compare_to_rest_table(self):
self.assertEqual(stdout.rstrip(),
expected)


def test_compare_to_md_table(self):
ref_result = self.create_bench((1.0,),
metadata={'name': 'telco'})
Expand Down
1 change: 1 addition & 0 deletions pyperf/tests/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ class ClockInfo:
self.assertEqual(bench.get_nrun(), 1)

return Result(runner, bench, stdout)

def test_worker(self):
result = self.exec_runner('--worker', '-l1', '-w1')
self.assertRegex(result.stdout,
Expand Down
2 changes: 0 additions & 2 deletions setup.cfg

This file was deleted.

7 changes: 2 additions & 5 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#
# Release a new version:
#
# - go to the Github release tab: https://github.com/psf/pyperf/releases
# - go to the GitHub release tab: https://github.com/psf/pyperf/releases
# - click "Draft a new release" and fill the contents
# - finally click the "Publish release" button! Done!
# - monitor the publish status: https://github.com/psf/pyperf/actions/workflows/publish.yml
Expand All @@ -39,7 +39,7 @@


# put most of the code inside main() to be able to import setup.py in
# test_tools.py, to ensure that VERSION is the same than
# test_tools.py, to ensure that VERSION is the same as
# pyperf.__version__.
def main():
from setuptools import setup
Expand All @@ -61,9 +61,6 @@ def main():
'install_requires': [],
# don't use environment markers in install_requires, but use weird
# syntax of extras_require, to support setuptools 18
'extras_require': {
":python_version < '3.4'": ["statistics"],
},
'entry_points': {
'console_scripts': ['pyperf=pyperf.__main__:main']
}
Expand Down

0 comments on commit 0318d44

Please sign in to comment.