Skip to content

Commit

Permalink
fix failling tests
Browse files Browse the repository at this point in the history
  • Loading branch information
baptistecolle committed Oct 15, 2024
1 parent dbbed90 commit 3cd0519
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 11 deletions.
11 changes: 11 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,17 @@ run_trt_container:
--workdir /optimum-benchmark \
huggingface/optimum-nvidia:latest

run_cloud_cuda_container:
docker run \
-it \
--rm \
--pid host \
--gpus all \
--shm-size 64G \
--volume $(PWD):/optimum-benchmark \
--workdir /optimum-benchmark \
ghcr.io/huggingface/optimum-benchmark:latest-cuda

## Install extras

install_api_misc:
Expand Down
6 changes: 6 additions & 0 deletions examples/pytorch_bert.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import os
import warnings

from huggingface_hub import whoami

from optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ProcessConfig, PyTorchConfig
from optimum_benchmark.logging_utils import setup_logging

if os.environ.get("LOG_LEVEL", "INFO") == "ERROR":
warnings.filterwarnings("ignore") # This disables all warnings

try:
USERNAME = whoami()["name"]
except Exception as e:
Expand Down Expand Up @@ -32,6 +36,8 @@ def run_benchmark():


if __name__ == "__main__":
# level = os.environ.get("LOG_LEVEL", "INFO")
level = "INFO"
to_file = os.environ.get("LOG_TO_FILE", "0") == "1"
setup_logging(level=level, to_file=to_file, prefix="MAIN-PROCESS")

Expand Down
6 changes: 4 additions & 2 deletions examples/pytorch_gpt2.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import os
import warnings

from huggingface_hub import whoami

from optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ProcessConfig, PyTorchConfig
from optimum_benchmark.logging_utils import setup_logging

if os.environ.get("LOG_LEVEL", "INFO") == "ERROR":
warnings.filterwarnings("ignore") # This disables all warnings

try:
USERNAME = whoami()["name"]
except Exception as e:
Expand Down Expand Up @@ -65,8 +69,6 @@ def run_benchmark(weight_config: str):

if __name__ == "__main__":
level = os.environ.get("LOG_LEVEL", "INFO")
print("level", level)
raise Exception("This is a test exception")
to_file = os.environ.get("LOG_TO_FILE", "0") == "1"
setup_logging(level=level, to_file=to_file, prefix="MAIN-PROCESS")

Expand Down
17 changes: 8 additions & 9 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,26 +152,25 @@ def test_yaml_config(config_name):
def execute_python_script(script_name):
script_path = EXAMPLES_DIR / script_name
# Run the example file as a separate process
process = subprocess.Popen([sys.executable, str(script_path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)

process = subprocess.Popen(
[sys.executable, str(script_path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)

# Capture and display output in real-time
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
if output == "" and process.poll() is not None:
break
if output:
print(output.strip())
sys.stdout.flush()

# Capture any remaining output
stdout, stderr = process.communicate()

# Create a result object similar to subprocess.run
result = subprocess.CompletedProcess(
args=[sys.executable, str(script_path)],
returncode=process.returncode,
stdout=stdout,
stderr=stderr
args=[sys.executable, str(script_path)], returncode=process.returncode, stdout=stdout, stderr=stderr
)

# Check that the process completed successfully (return code 0)
Expand Down

0 comments on commit 3cd0519

Please sign in to comment.