|
10 | 10 | import torch.utils.data |
11 | 11 | from torch.testing._internal.common_cuda import TEST_MULTIGPU |
12 | 12 | from torch.testing._internal.common_utils import ( |
13 | | - TestCase, run_tests, TEST_WITH_ASAN, IS_WINDOWS, TemporaryFileName, TemporaryDirectoryName) |
| 13 | + TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS, |
| 14 | + TemporaryFileName, TemporaryDirectoryName) |
14 | 15 | from torch.autograd.profiler import profile as _profile |
15 | 16 | from torch.profiler import ( |
16 | 17 | kineto_available, profile, record_function, DeviceType, ProfilerActivity |
@@ -118,7 +119,7 @@ def payload(self, use_cuda=False): |
118 | 119 |
|
119 | 120 | @unittest.skipIf(not kineto_available(), "Kineto is required") |
120 | 121 | def test_kineto(self): |
121 | | - use_cuda = torch.cuda.is_available() |
| 122 | + use_cuda = torch.cuda.is_available() and (not TEST_WITH_ROCM) |
122 | 123 | with _profile(use_cuda=use_cuda, use_kineto=True): |
123 | 124 | self.payload(use_cuda=use_cuda) |
124 | 125 |
|
@@ -147,6 +148,7 @@ def test_kineto(self): |
147 | 148 |
|
148 | 149 | @unittest.skipIf(not kineto_available(), "Kineto is required") |
149 | 150 | @unittest.skipIf(not TEST_MULTIGPU, "Multiple GPUs needed") |
| 151 | + @unittest.skipIf(TEST_WITH_ROCM, "Not supported on ROCm") |
150 | 152 | def test_kineto_multigpu(self): |
151 | 153 | with profile( |
152 | 154 | activities=[ |
|
0 commit comments