Skip to content

Commit 92770d2

Browse files
frank-dong-msfacebook-github-bot
authored andcommitted
fix comparison of narrow type with wide type in loop condition (pytorch#53951)
Summary: fix Semmle warning: Comparison of narrow type with wide type in loop condition For example there is below piece of code: for (int i=0; i<array.size(); ++i) {} The problem is that array.size() return type is size_t can be larger type than int depending on the implementation so there is chance that i overflows (for very large array that array size is beyond the range of integer) and this loop will never be terminated. Pull Request resolved: pytorch#53951 Reviewed By: zou3519 Differential Revision: D27181495 Pulled By: malfet fbshipit-source-id: 0612c5cedcdc656c193085e7fbb87dd163f20688
1 parent edfc787 commit 92770d2

File tree

59 files changed

+247
-136
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+247
-136
lines changed

aten/src/ATen/TensorIterator.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
#include <ATen/native/Resize.h>
99
#include <ATen/TensorOperators.h>
1010

11+
#include <c10/util/irange.h>
12+
1113
namespace at {
1214

1315
using DimMask = TensorIteratorBase::DimMask;
@@ -1392,7 +1394,7 @@ DimCounter::DimCounter(IntArrayRef shape, Range range)
13921394
, offset(range.begin) {
13931395
int64_t linear_offset = range.begin;
13941396
int64_t ndim = values.size();
1395-
for (int dim = 0; dim < ndim; dim++) {
1397+
for (const auto dim : c10::irange(ndim)) {
13961398
int64_t size = shape[dim];
13971399
if (size > 0) {
13981400
values[dim] = linear_offset % size;

aten/src/ATen/Utils.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <c10/util/accumulate.h>
1010
#include <c10/util/ArrayRef.h>
1111
#include <c10/util/Exception.h>
12+
#include <c10/util/irange.h>
1213

1314
#include <algorithm>
1415
#include <sstream>
@@ -51,7 +52,7 @@ static inline TensorImpl* checked_dense_tensor_unwrap(const Tensor& expr, const
5152
static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(ArrayRef<Tensor> tensors, const char * name, int pos, DeviceType device_type, ScalarType scalar_type) {
5253
std::vector<TensorImpl*> unwrapped;
5354
unwrapped.reserve(tensors.size());
54-
for (unsigned int i = 0; i < tensors.size(); ++i) {
55+
for (const auto i : c10::irange(tensors.size())) {
5556
const auto& expr = tensors[i];
5657
if (expr.layout() != Layout::Strided) {
5758
AT_ERROR("Expected dense tensor but got ", expr.layout(),

aten/src/ATen/core/type.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <ATen/core/function_schema.h>
44
#include <ATen/core/jit_type.h>
55
#include <c10/macros/Macros.h>
6+
#include <c10/util/irange.h>
67
#include <ATen/core/grad_mode.h>
78
#include <ATen/core/function.h>
89
#include <iostream>
@@ -1107,7 +1108,7 @@ torch::jit::Function* ClassType::findForwardHook(const std::string& name) const
11071108
std::string getSchemaInputTypesString(const FunctionSchema& schema) {
11081109
std::stringstream input_types;
11091110
const std::vector<Argument>& forward_args = schema.arguments();
1110-
for (int i = 1; i < forward_args.size(); ++i) {
1111+
for (const auto i : c10::irange(1, forward_args.size())) {
11111112
input_types << forward_args[i].type()->annotation_str();
11121113
if (forward_args.size() - 1 != i) {
11131114
input_types << ", ";
@@ -1213,7 +1214,7 @@ void checkForwardHookInputArguments(
12131214
hook_err_msg
12141215
);
12151216

1216-
for (int i = 1; i < forward_args.size(); ++i) {
1217+
for (const auto i : c10::irange(1, forward_args.size())) {
12171218
if (*forward_args[i].type() != *input_tuple_types[i - 1]) {
12181219
TORCH_CHECK(
12191220
false,
@@ -1313,7 +1314,7 @@ void ClassType::checkForwardPreHookSchema(
13131314
pre_hook_err_msg
13141315
);
13151316
// check that contained types match forward types
1316-
for (int i = 1; i < forward_args.size(); ++i) {
1317+
for (const auto i : c10::irange(1, forward_args.size())) {
13171318
if (*forward_args[i].type() != *return_tuple_types[i - 1]) {
13181319
TORCH_CHECK(
13191320
false,

aten/src/ATen/native/Activation.cpp

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
#include <ATen/Parallel.h>
99
#include <ATen/core/DistributionsHelper.h>
1010

11+
#include <c10/util/irange.h>
12+
1113
namespace at { namespace native {
1214

1315
static const double SELU_ALPHA = 1.6732632423543772848170429916717;
@@ -453,12 +455,12 @@ void inline prelu_cpu_kernel_multi_weights(
453455
scalar_t* weight_data = weight.data_ptr<scalar_t>();
454456

455457
auto loop = [&](int64_t start, int64_t end) {
456-
for (auto i = start; i < end; ++i) {
458+
for (const auto i : c10::irange(start, end)) {
457459
int64_t offset = i * channel_size * input_stride1;
458460
scalar_t* n_input_data = input_data + offset;
459461
scalar_t* n_result_data = result_data + offset;
460-
for (auto j = 0; j < channel_size; ++j) {
461-
for (auto k = 0; k < input_stride1; ++k) {
462+
for (const auto j : c10::irange(channel_size)) {
463+
for (const auto k : c10::irange(input_stride1)) {
462464
// to allow for compiler optimization, here splitting into two lines:
463465
scalar_t w = (n_input_data[k] > 0) ? scalar_t(1) : weight_data[j];
464466
n_result_data[k] = w * n_input_data[k];
@@ -578,9 +580,9 @@ void inline prelu_cpu_backward_kernel_multi_weights(
578580
auto weight_grad_collector_data = weight_grad_collector.data_ptr<scalar_t>();
579581

580582
auto loop = [&](int64_t start, int64_t end) {
581-
for (auto i = start; i < end; i++) {
582-
for (auto j = 0; j < channel_size; j++) {
583-
for (auto k = 0; k < input_stride1; k++) {
583+
for (const auto i : c10::irange(start, end)) {
584+
for (const auto j : c10::irange(channel_size)) {
585+
for (const auto k : c10::irange(input_stride1)) {
584586
int64_t pos = i * input_stride0 + j * input_stride1 + k;
585587
scalar_t weight_data_val = weight_data[j];
586588
scalar_t input_data_val = input_data[pos];

aten/src/ATen/native/ConstantPadNd.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#include <ATen/ATen.h>
22

3+
#include <c10/util/irange.h>
4+
35
namespace at { namespace native {
46

57
Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value) {
@@ -20,7 +22,7 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value)
2022
bool all_pads_non_positive = true;
2123

2224
auto c_input = self;
23-
for (int i = l_diff; i < l_inp; i++) {
25+
for (const auto i : c10::irange(l_diff, l_inp)) {
2426
auto pad_idx = 2 * (l_inp - i - 1);
2527
if (pad[pad_idx] < 0) {
2628
c_input = c_input.narrow(i, -pad[pad_idx], c_input.size(i) + pad[pad_idx]);
@@ -69,7 +71,7 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value)
6971
output.fill_(value);
7072

7173
auto c_output = output;
72-
for (int i = l_diff; i < l_inp; i++) {
74+
for (const auto i : c10::irange(l_diff, l_inp)) {
7375
auto pad_idx = 2 * (l_inp - i - 1);
7476
if (pad[pad_idx] > 0) {
7577
c_output = c_output.narrow(i, pad[pad_idx], c_output.size(i) - pad[pad_idx]);

aten/src/ATen/native/Convolution.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <ATen/native/xnnpack/Engine.h>
88
#include <ATen/NativeFunctions.h>
99
#include <c10/util/accumulate.h>
10+
#include <c10/util/irange.h>
1011

1112
#include <ATen/Config.h>
1213
#include <c10/macros/Macros.h>
@@ -489,7 +490,7 @@ static void check_shape_forward(const at::Tensor& input,
489490
", expected bias to be 1-dimensional with ", weight_sizes[0], " elements",
490491
", but got bias of size ", bias.sizes(), " instead");
491492

492-
for (int i = 2; i < k; ++i) {
493+
for (const auto i : c10::irange(2, k)) {
493494
input_shape.push_back(input.size(i) + 2 * padding[i-2]);
494495
// log new kernel size considering dilation
495496
kernel_shape.push_back(dilation[i-2] * (weight_sizes[i]-1) + 1);

aten/src/ATen/native/Embedding.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
#include <ATen/TensorUtils.h>
44
#include <ATen/NativeFunctions.h>
55

6+
#include <c10/util/irange.h>
7+
68
#include <cstring>
79
#include <memory>
810
#include <sstream>
@@ -97,10 +99,10 @@ Tensor embedding_dense_backward_cpu(
9799
std::unique_ptr<index_t[]> counts;
98100
if (scale_grad_by_freq) {
99101
counts.reset(new index_t[num_weights]);
100-
for (int i = 0; i < numel; i++) {
102+
for (const auto i : c10::irange(numel)) {
101103
counts[indices_data[i]] = 0;
102104
}
103-
for (int i = 0; i < numel; i++) {
105+
for (const auto i : c10::irange(numel)) {
104106
counts[indices_data[i]]++;
105107
}
106108
}

aten/src/ATen/native/EmbeddingBag.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
#include <ATen/native/CPUBlas.h>
88

9+
#include <c10/util/irange.h>
10+
911
#ifdef USE_FBGEMM
1012
#include <fbgemm/Fbgemm.h>
1113
#else
@@ -535,11 +537,11 @@ void embedding_bag_cpu_max_out(
535537
auto weight_stride1 = weight.strides()[1];
536538
auto output_stride = output.strides()[0];
537539

538-
for (int i = 0; i < numIndices; ++i) {
540+
for (const auto i : c10::irange(numIndices)) {
539541
auto bag = offset2bag_data[i];
540542
auto word_idx = indices_data[i];
541543

542-
for (int dim = 0; dim < featureSize; dim++) {
544+
for (const auto dim : c10::irange(featureSize)) {
543545
auto& current_item = output_data[output_stride * bag + dim];
544546
auto weight_item =
545547
weight_data[weight_stride0 * word_idx + dim * weight_stride1];
@@ -751,7 +753,7 @@ static std::vector<index_t> compute_counts(
751753
index_t* indices_data,
752754
int64_t indices_length) {
753755
std::vector<index_t> counts(num_weights, 0);
754-
for (int i = 0; i < indices_length; i++) {
756+
for (const auto i : c10::irange(indices_length)) {
755757
counts[indices_data[i]]++;
756758
}
757759
return counts;

aten/src/ATen/native/ForeachUtils.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#pragma once
22
#include <ATen/ATen.h>
33

4+
#include <c10/util/irange.h>
5+
46
namespace at {
57
namespace native {
68
namespace {
@@ -29,7 +31,7 @@ void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) {
2931

3032
auto expected_dtype = tensors1[0].dtype();
3133

32-
for (int i = 0; i < tensors1.size(); i++) {
34+
for (const auto i : c10::irange(tensors1.size())) {
3335
TORCH_CHECK(tensors1[i].dtype() == expected_dtype, "All tensors in the tensor list must have the same dtype.");
3436
TORCH_CHECK(tensors2[i].dtype() == expected_dtype, "All tensors in the tensor list must have the same dtype.");
3537
TORCH_CHECK(tensors1[i].sizes() == tensors2[i].sizes(), "Corresponding tensors in lists must have the same size, got ", tensors1[i].sizes(), " and ", tensors2[i].sizes());
@@ -45,7 +47,7 @@ void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2, Te
4547

4648
auto expected_dtype = tensors1[0].dtype();
4749

48-
for (int i = 0; i < tensors1.size(); i++) {
50+
for (const auto i : c10::irange(tensors1.size())) {
4951
TORCH_CHECK(tensors1[i].dtype() == expected_dtype, "All tensors in the tensor list must have the same dtype.");
5052
TORCH_CHECK(tensors2[i].dtype() == expected_dtype, "All tensors in the tensor list must have the same dtype.");
5153
TORCH_CHECK(tensors1[i].sizes() == tensors2[i].sizes(), "Corresponding tensors in lists must have the same size, got ", tensors1[i].sizes(), " and ", tensors2[i].sizes());

aten/src/ATen/native/FractionalMaxPool3d.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#include <ATen/NativeFunctions.h>
33
#include <ATen/Parallel.h>
44

5+
#include <c10/util/irange.h>
6+
57
#include <tuple>
68
#include <vector>
79

@@ -20,7 +22,7 @@ static std::vector<int> generate_intervals(
2022
scalar_t alpha = static_cast<scalar_t>(inputSize - poolSize) /
2123
static_cast<scalar_t>(outputSize - 1);
2224

23-
for (int i = 0; i < outputSize - 1; ++i) {
25+
for (const auto i : c10::irange(outputSize - 1)) {
2426
sequence[i] =
2527
static_cast<int>((i + sample) * alpha) - static_cast<int>(sample * alpha);
2628
}

0 commit comments

Comments
 (0)