Skip to content

Commit

Permalink
Upgrade fbcode/deeplearning/fbgemm to Python Scientific Stack 2 (pyto…
Browse files Browse the repository at this point in the history
…rch#3250)

Summary:
Pull Request resolved: pytorch#3250

X-link: facebookresearch/FBGEMM#350

Differential Revision: D64008037

fbshipit-source-id: a77d4662299ce65827f707ba583b6766ba17b285
  • Loading branch information
igorsugak authored and facebook-github-bot committed Oct 17, 2024
1 parent 6183d43 commit b435ead
Show file tree
Hide file tree
Showing 6 changed files with 20 additions and 1 deletion.
13 changes: 13 additions & 0 deletions fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,8 @@ def context_factory(on_trace_ready: Callable[[profile], None]):
if do_pooling:
grad_output = torch.randn(B, sum(Ds)).to(get_device())
else:
# pyre-fixme[6]: For 2nd argument expected `Union[int, SymInt]` but got
# `Union[floating[typing.Any], int]`.
grad_output = torch.randn(B * T * L, D).to(get_device())

with context_factory(lambda p: _kineto_trace_handler(p, "fwd_bwd")):
Expand Down Expand Up @@ -839,6 +841,8 @@ def cache( # noqa C901
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
# pyre-fixme[58]: `*` is not supported for operand types `int` and
# `Union[np.floating[typing.Any], int]`.
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)

Expand Down Expand Up @@ -1049,6 +1053,8 @@ def nbit_cpu( # noqa C901
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
# pyre-fixme[58]: `*` is not supported for operand types `int` and
# `Union[np.floating[typing.Any], int]`.
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)

Expand Down Expand Up @@ -1232,6 +1238,8 @@ def nbit_device( # noqa C901
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
# pyre-fixme[58]: `*` is not supported for operand types `int` and
# `Union[np.floating[typing.Any], int]`.
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)

Expand Down Expand Up @@ -1337,6 +1345,8 @@ def nbit_device( # noqa C901
B,
L,
E,
# pyre-fixme[6]: For 6th argument expected `int` but got
# `Union[floating[typing.Any], int]`.
D,
pooling,
weighted,
Expand Down Expand Up @@ -2037,6 +2047,7 @@ def nbit_uvm_compare_direct_mapped(
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
# pyre-fixme[9]: D has type `int`; used as `floating[typing.Any]`.
D = np.average(Ds)
else:
Ds: List[int] = [D] * T
Expand Down Expand Up @@ -2323,6 +2334,8 @@ def nbit_cache( # noqa C901
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
# pyre-fixme[58]: `*` is not supported for operand types `int` and
# `Union[np.floating[typing.Any], int]`.
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)

Expand Down
1 change: 1 addition & 0 deletions fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,7 @@ def ssd_training( # noqa C901
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
# pyre-fixme[9]: D has type `int`; used as `floating[typing.Any]`.
D = np.average(Ds)
else:
Ds: List[int] = [D] * T
Expand Down
1 change: 1 addition & 0 deletions fbgemm_gpu/codegen/genscript/optimizer_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

@dataclass
class OptimizerArgsSetItem:
# pyre-fixme[11]: Annotation `ArgType` is not defined as a type.
ty: ArgType # type
name: str
default: Union[float, ArgType] = 0 # DEFAULT_ARG_VAL
Expand Down
2 changes: 2 additions & 0 deletions fbgemm_gpu/fbgemm_gpu/tbe/utils/offsets.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ def get_table_batched_offsets_from_dense(
if L is None and total_B is None:
(T, B, L) = merged_indices.size()
total_B = T * B
# pyre-fixme[6]: For 1st argument expected `Union[Sequence[SupportsIndex],
# SupportsIndex]` but got `Optional[int]`.
lengths = np.ones(total_B) * L
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
Expand Down
2 changes: 2 additions & 0 deletions fbgemm_gpu/test/batched_unary_embeddings_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ def _generate_unary_features(
offset = 0
for _ in range(batch_size):
n_indices = 1
# pyre-fixme[6]: For 1st argument expected `Iterable[typing.Any]` but
# got `float`.
indices += np.round(
np.random.random(n_indices) * (num_embeddings - 1)
).tolist()
Expand Down
2 changes: 1 addition & 1 deletion fbgemm_gpu/test/sparse/misc_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_offsets_range(
offsets_type: "Union[Type[torch.int32], Type[torch.int64]]",
) -> None:
lengths = np.array([np.random.randint(low=0, high=20) for _ in range(N)])
offsets = np.cumsum(np.concatenate(([0], lengths)))[:-1]
offsets = np.cumsum(np.concatenate([[0], lengths]))[:-1]
range_ref = torch.from_numpy(
np.concatenate([np.arange(size) for size in lengths])
)
Expand Down

0 comments on commit b435ead

Please sign in to comment.