Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GRAPHBOLT] Activation of several graphbolt tests for CPU #7586

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 6 additions & 18 deletions tests/python/pytorch/graphbolt/test_subgraph_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -944,12 +944,8 @@ def test_SubgraphSampler_unique_csc_format_Homo_Node_cpu(labor):
)


@unittest.skipIf(
F._default_context_str == "cpu",
reason="Fails due to different result on the CPU.",
)
@pytest.mark.parametrize("labor", [False, True])
def test_SubgraphSampler_unique_csc_format_Homo_Node_gpu(labor):
def test_SubgraphSampler_unique_csc_format_Homo_Node(labor):
torch.manual_seed(1205)
graph = dgl.graph(([5, 0, 7, 7, 2, 4], [0, 1, 2, 2, 3, 4]))
graph = gb.from_dglgraph(graph, is_homogeneous=True).to(F.ctx())
Expand All @@ -970,7 +966,7 @@ def test_SubgraphSampler_unique_csc_format_Homo_Node_gpu(labor):
deduplicate=True,
)

if torch.cuda.get_device_capability()[0] < 7:
if F.ctx() != F.cpu() and torch.cuda.get_device_capability()[0] < 7:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd prefer to keep the check on the top of the test cases. CPU is actually not running even after your fix.

original_row_node_ids = [
torch.tensor([0, 3, 4, 2, 5, 7]).to(F.ctx()),
torch.tensor([0, 3, 4, 2, 5]).to(F.ctx()),
Expand Down Expand Up @@ -1371,12 +1367,8 @@ def test_SubgraphSampler_unique_csc_format_Homo_Link_cpu(labor):
)


@unittest.skipIf(
F._default_context_str == "cpu",
reason="Fails due to different result on the CPU.",
)
@pytest.mark.parametrize("labor", [False, True])
def test_SubgraphSampler_unique_csc_format_Homo_Link_gpu(labor):
def test_SubgraphSampler_unique_csc_format_Homo_Link(labor):
torch.manual_seed(1205)
graph = dgl.graph(([5, 0, 7, 7, 2, 4], [0, 1, 2, 2, 3, 4]))
graph = gb.from_dglgraph(graph, is_homogeneous=True).to(F.ctx())
Expand All @@ -1395,7 +1387,7 @@ def test_SubgraphSampler_unique_csc_format_Homo_Link_gpu(labor):
deduplicate=True,
)

if torch.cuda.get_device_capability()[0] < 7:
if F.ctx() != F.cpu() and torch.cuda.get_device_capability()[0] < 7:
original_row_node_ids = [
torch.tensor([0, 3, 4, 2, 5, 7]).to(F.ctx()),
torch.tensor([0, 3, 4, 2, 5]).to(F.ctx()),
Expand Down Expand Up @@ -1752,12 +1744,8 @@ def test_SubgraphSampler_unique_csc_format_Homo_HyperLink_cpu(labor):
)


@unittest.skipIf(
F._default_context_str == "cpu",
reason="Fails due to different result on the CPU.",
)
@pytest.mark.parametrize("labor", [False, True])
def test_SubgraphSampler_unique_csc_format_Homo_HyperLink_gpu(labor):
def test_SubgraphSampler_unique_csc_format_Homo_HyperLink(labor):
torch.manual_seed(1205)
graph = dgl.graph(([5, 0, 7, 7, 2, 4], [0, 1, 2, 2, 3, 4]))
graph = gb.from_dglgraph(graph, is_homogeneous=True).to(F.ctx())
Expand All @@ -1776,7 +1764,7 @@ def test_SubgraphSampler_unique_csc_format_Homo_HyperLink_gpu(labor):
deduplicate=True,
)

if torch.cuda.get_device_capability()[0] < 7:
if F.ctx() != F.cpu() and torch.cuda.get_device_capability()[0] < 7:
original_row_node_ids = [
torch.tensor([0, 3, 4, 2, 5, 7]).to(F.ctx()),
torch.tensor([0, 3, 4, 2, 5]).to(F.ctx()),
Expand Down
8 changes: 2 additions & 6 deletions tests/python/pytorch/graphbolt/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,7 @@ def test_exclude_seed_edges_homo_cpu():
)


@unittest.skipIf(
F._default_context_str == "cpu",
reason="Fails due to different result on the CPU.",
)
def test_exclude_seed_edges_gpu():
def test_exclude_seed_edges():
graph = dgl.graph(([5, 0, 7, 7, 2, 4], [0, 1, 2, 2, 3, 4]))
graph = gb.from_dglgraph(graph, is_homogeneous=True).to(F.ctx())
items = torch.LongTensor([[0, 3], [4, 4]])
Expand All @@ -138,7 +134,7 @@ def test_exclude_seed_edges_gpu():
deduplicate=True,
)
datapipe = datapipe.transform(partial(gb.exclude_seed_edges))
if torch.cuda.get_device_capability()[0] < 7:
if F.ctx() != F.cpu() and torch.cuda.get_device_capability()[0] < 7:
original_row_node_ids = [
torch.tensor([0, 3, 4, 2, 5, 7]).to(F.ctx()),
torch.tensor([0, 3, 4, 2, 5]).to(F.ctx()),
Expand Down
Loading