Skip to content

Commit 637beb4

Browse files
committed
irangeify most of runtime/core/exec_aten
Had to skip scalar_type_util.h because it needs the constexpr patch to land in PyTorch (and for us to update our pin to pick it up). ghstack-source-id: ba94b4e ghstack-comment-id: 2673136511 Pull Request resolved: #8612
1 parent b7482b9 commit 637beb4

File tree

10 files changed

+67
-57
lines changed

10 files changed

+67
-57
lines changed

runtime/core/exec_aten/testing_util/tensor_factory.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <algorithm>
66
#include <cstdint>
77

8+
#include <c10/util/irange.h>
89
#include <executorch/runtime/core/exec_aten/exec_aten.h>
910
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
1011
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -78,7 +79,7 @@ inline bool check_strides(
7879
// a.strides == (1, 1, 2). We want to sort create a mapping to make the
7980
// sorted_stride as (2, 1, 1) while sorted_size == (3, 2, 1)
8081
std::vector<std::int32_t> sorted_idx(sizes.size());
81-
for (size_t i = 0; i < sizes.size(); i++) {
82+
for (const auto i : c10::irange(sizes.size())) {
8283
sorted_idx[i] = i;
8384
}
8485
std::sort(
@@ -98,7 +99,7 @@ inline bool check_strides(
9899
// Use the mapping to rearrange the sizes and strides
99100
std::vector<std::int32_t> sorted_sizes(sizes.size());
100101
std::vector<std::int32_t> sorted_strides(sizes.size());
101-
for (size_t i = 0; i < sizes.size(); i++) {
102+
for (const auto i : c10::irange(sizes.size())) {
102103
sorted_sizes[i] = sizes[sorted_idx[i]] == 0 ? 1 : sizes[sorted_idx[i]];
103104
sorted_strides[i] = strides[sorted_idx[i]];
104105
}
@@ -132,7 +133,7 @@ inline bool check_dim_order(
132133
}
133134
size_t gauss_sum = 0;
134135
std::vector<int> count(dim_order.size(), 0);
135-
for (int i = 0; i < dim_order.size(); i++) {
136+
for (const auto i : c10::irange(dim_order.size())) {
136137
if (dim_order[i] < 0 || dim_order[i] >= sizes.size()) {
137138
return false;
138139
}
@@ -378,7 +379,7 @@ class TensorFactory {
378379
std::vector<executorch::aten::StridesType> contiguous_strides =
379380
internal::strides_from_dim_order(sizes, contiguous_dim_order);
380381

381-
for (int32_t i = 0; i < input.dim(); i++) {
382+
for (const auto i : c10::irange(input.dim())) {
382383
ET_CHECK_MSG(
383384
input.strides()[i] == contiguous_strides[i],
384385
"Input tensor is not contiguous");
@@ -394,10 +395,10 @@ class TensorFactory {
394395
std::vector<ctype> channels_last_data(
395396
N * C * H * W); // Create a new blob with the same total size to contain
396397
// channels_last data
397-
for (int32_t n = 0; n < N; ++n) {
398-
for (int32_t c = 0; c < C; ++c) {
399-
for (int32_t h = 0; h < H; ++h) {
400-
for (int32_t w = 0; w < W; ++w) {
398+
for (const auto n : c10::irange(N)) {
399+
for (const auto c : c10::irange(C)) {
400+
for (const auto h : c10::irange(H)) {
401+
for (const auto w : c10::irange(W)) {
401402
// Calculate the index in the original blob
402403
int32_t old_index = ((n * C + c) * H + h) * W + w;
403404
// Calculate the index in the new blob
@@ -598,7 +599,7 @@ inline void validate_strides(
598599
}
599600
}
600601
// No two dimensions can have same stride value
601-
for (int32_t i = 0; i < strides.size(); ++i) {
602+
for (const auto i : c10::irange(strides.size())) {
602603
for (int32_t j = i + 1; j < strides.size(); ++j) {
603604
if ((sizes[i] == 0) || (sizes[j] == 0) ||
604605
((sizes[i] == 1) || (sizes[j] == 1))) {
@@ -814,7 +815,7 @@ class TensorFactory {
814815
// given strides is empty.
815816
if (!sizes.empty() && dim_order.empty()) {
816817
default_dim_order.resize(sizes.size(), 1);
817-
for (size_t i = 0; i < sizes.size(); ++i) {
818+
for (const auto i : c10::irange(sizes.size())) {
818819
default_dim_order[i] = i;
819820
}
820821
}
@@ -888,10 +889,10 @@ class TensorFactory {
888889
std::vector<ctype> channels_last_data(
889890
N * C * H * W); // Create a new blob with the same total size to contain
890891
// channels_last data
891-
for (int32_t n = 0; n < N; ++n) {
892-
for (int32_t c = 0; c < C; ++c) {
893-
for (int32_t h = 0; h < H; ++h) {
894-
for (int32_t w = 0; w < W; ++w) {
892+
for (const auto n : c10::irange(N)) {
893+
for (const auto c : c10::irange(C)) {
894+
for (const auto h : c10::irange(H)) {
895+
for (const auto w : c10::irange(W)) {
895896
// Calculate the index in the original blob
896897
int32_t old_index = ((n * C + c) * H + h) * W + w;
897898
// Calculate the index in the new blob

runtime/core/exec_aten/testing_util/tensor_util.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <cstring>
1111
#include <ostream>
1212

13+
#include <c10/util/irange.h>
1314
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1415
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
1516
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
@@ -50,7 +51,7 @@ bool data_is_close(
5051
if (a == b) {
5152
return true;
5253
}
53-
for (size_t i = 0; i < numel; i++) {
54+
for (const auto i : c10::irange(numel)) {
5455
const auto ai = a[i];
5556
const auto bi = b[i];
5657

@@ -201,7 +202,7 @@ bool tensor_lists_are_close(
201202
if (num_tensors_a != num_tensors_b) {
202203
return false;
203204
}
204-
for (size_t i = 0; i < num_tensors_a; i++) {
205+
for (const auto i : c10::irange(num_tensors_a)) {
205206
if (!tensors_are_close(tensors_a[i], tensors_b[i], rtol, opt_atol)) {
206207
return false;
207208
}
@@ -245,7 +246,7 @@ template <typename T>
245246
std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
246247
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
247248
// break up or truncate data when it's huge
248-
for (auto i = 0; i < numel; i++) {
249+
for (const auto i : c10::irange(numel)) {
249250
os << data[i];
250251
if (i < numel - 1) {
251252
os << ", ";
@@ -257,7 +258,7 @@ std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
257258
template <typename T>
258259
std::ostream&
259260
print_data(std::ostream& os, const etensor::complex<T>* data, size_t numel) {
260-
for (auto i = 0; i < numel; i++) {
261+
for (const auto i : c10::irange(numel)) {
261262
os << data[i].real_ << " + " << data[i].imag_ << "j";
262263
if (i < numel - 1) {
263264
os << ", ";
@@ -276,7 +277,7 @@ template <>
276277
std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
277278
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
278279
// break up or truncate data when it's huge
279-
for (auto i = 0; i < numel; i++) {
280+
for (const auto i : c10::irange(numel)) {
280281
os << (uint64_t)data[i];
281282
if (i < numel - 1) {
282283
os << ", ";
@@ -292,7 +293,7 @@ std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
292293
*/
293294
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
294295
os << "ETensor(sizes={";
295-
for (auto dim = 0; dim < t.dim(); dim++) {
296+
for (const auto dim : c10::irange(t.dim())) {
296297
os << t.size(dim);
297298
if (dim < t.dim() - 1) {
298299
os << ", ";

runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/runtime/core/error.h>
1011
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
1112
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
@@ -86,7 +87,7 @@ using torch::executor::TensorImpl;
8687
"Arrays are not equal size." #a1 " size:%zu," #a2 " size:%zu", \
8788
a1.size(), \
8889
a2.size()); \
89-
for (size_t i = 0; i < a1.size(); ++i) { \
90+
for (const auto i : c10::irange(a1.size())) { \
9091
ET_CHECK_MSG( \
9192
a1[i] == a2[i], \
9293
"Value mismatch at index %zu, " #a1 \
@@ -784,7 +785,7 @@ void run_zeros_like_test(Tensor input) {
784785

785786
// A Tensor created manually, that should be identical to `actual`.
786787
std::vector<int32_t> expected_data;
787-
for (int i = 0; i < input.numel(); i++) {
788+
for (const auto i : c10::irange(input.numel())) {
788789
expected_data.push_back(0);
789790
}
790791
#ifdef USE_ATEN_LIB
@@ -842,7 +843,7 @@ void run_ones_like_test(Tensor input) {
842843

843844
// A Tensor created manually, that should be identical to `actual`.
844845
std::vector<int32_t> expected_data;
845-
for (int i = 0; i < input.numel(); i++) {
846+
for (const auto i : c10::irange(input.numel())) {
846847
expected_data.push_back(1);
847848
}
848849
#ifdef USE_ATEN_LIB

runtime/core/exec_aten/util/dim_order_util.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <cstdio>
1313
#include <cstring>
1414

15+
#include <c10/util/irange.h>
1516
#include <executorch/runtime/core/error.h>
1617
#include <executorch/runtime/platform/assert.h>
1718
#include <executorch/runtime/platform/compiler.h>
@@ -22,7 +23,7 @@ namespace runtime {
2223
namespace {
2324
template <typename DimOrderType>
2425
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
25-
for (int32_t i = 0; i < dims; ++i) {
26+
for (const auto i : c10::irange(dims)) {
2627
if (dim_order[i] >= dims) {
2728
return false;
2829
}
@@ -42,7 +43,7 @@ template <typename DimOrderType>
4243
inline bool is_contiguous_dim_order(
4344
const DimOrderType* dim_order,
4445
const size_t dims) {
45-
for (int i = 0; i < dims; ++i) {
46+
for (const auto i : c10::irange(dims)) {
4647
if (dim_order[i] != i) {
4748
return false;
4849
}
@@ -254,7 +255,7 @@ ET_NODISCARD inline Error stride_to_dim_order(
254255

255256
sorter.quick_sort(array, 0, dims - 1);
256257

257-
for (auto i = 0; i < dims; i++) {
258+
for (const auto i : c10::irange(dims)) {
258259
dim_order[i] = array[i].dim_order;
259260
}
260261
return Error::Ok;

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <cstddef> // size_t
1616
#include <limits>
1717

18+
#include <c10/util/irange.h>
1819
#include <executorch/runtime/core/array_ref.h>
1920
#include <executorch/runtime/core/error.h>
2021
#include <executorch/runtime/core/exec_aten/exec_aten.h>
@@ -275,7 +276,7 @@
275276
a_strides = a__.strides(); \
276277
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
277278
b_strides = b__.strides(); \
278-
for (size_t i = 0; i < a__.dim(); i++) { \
279+
for (const auto i : c10::irange(a__.dim())) { \
279280
ET_CHECK_MSG( \
280281
a_strides[i] == b_strides[i], \
281282
"a.strides()[%zu] shall equal to b.strides()[%zu], " \
@@ -307,7 +308,7 @@
307308
b_strides = b__.strides(); \
308309
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
309310
c_strides = c__.strides(); \
310-
for (size_t i = 0; i < a__.dim(); i++) { \
311+
for (const auto i : c10::irange(a__.dim())) { \
311312
ET_CHECK_MSG( \
312313
a_strides[i] == b_strides[i] && b_strides[i] == c_strides[i], \
313314
"a_strides[%zu], b_strides[%zu] and c_strides[%zu] " \
@@ -892,7 +893,7 @@ inline size_t getLeadingDims(
892893
dim,
893894
ssize_t(tensor.dim()));
894895
size_t dims = 1;
895-
for (size_t i = 0; i < dim; ++i) {
896+
for (const auto i : c10::irange(dim)) {
896897
dims *= static_cast<size_t>(tensor.size(i));
897898
}
898899
return dims;
@@ -929,7 +930,7 @@ inline size_t coordinateToIndex(
929930
const executorch::aten::Tensor& tensor,
930931
const size_t* const coordinate) {
931932
size_t index = 0;
932-
for (int d = 0; d < tensor.dim(); ++d) {
933+
for (const auto d : c10::irange(tensor.dim())) {
933934
index += coordinate[d] * getTrailingDims(tensor, d);
934935
}
935936
return index;
@@ -961,7 +962,7 @@ inline size_t coordinateToIndexWithTrailingDimsMemo(
961962
const size_t* const coordinate,
962963
const size_t trailing_dims_memo[kTensorDimensionLimit]) {
963964
size_t index = 0;
964-
for (int d = 0; d < tensor.dim(); ++d) {
965+
for (const auto d : c10::irange(tensor.dim())) {
965966
index += coordinate[d] * trailing_dims_memo[d];
966967
}
967968
return index;
@@ -983,7 +984,7 @@ inline void indexToCoordinate(
983984
size_t index,
984985
size_t* coordinate) {
985986
ET_CHECK(index < tensor.numel());
986-
for (auto i = 0; i < tensor.dim(); ++i) {
987+
for (const auto i : c10::irange(tensor.dim())) {
987988
auto dim = tensor.dim() - 1 - i;
988989
size_t dim_size = tensor.size(dim);
989990
coordinate[dim] = index % dim_size;
@@ -1173,7 +1174,7 @@ ET_NODISCARD inline Error resize_tensor(
11731174
std::array<executorch::aten::SizesType, kTensorDimensionLimit>
11741175
new_sizes_casted{};
11751176
size_t new_sizes_ndim = new_sizes.size();
1176-
for (size_t i = 0; i < new_sizes_ndim; ++i) {
1177+
for (const auto i : c10::irange(new_sizes_ndim)) {
11771178
new_sizes_casted[i] =
11781179
static_cast<executorch::aten::SizesType>(new_sizes[i]);
11791180
}
@@ -1304,7 +1305,7 @@ inline size_t calculate_linear_index(
13041305
const executorch::aten::StridesType* strides,
13051306
const size_t ndim) {
13061307
size_t index = 0;
1307-
for (size_t i = 0; i < ndim; i++) {
1308+
for (const auto i : c10::irange(ndim)) {
13081309
index += coordinate[i] * strides[i];
13091310
}
13101311
return index;

runtime/core/exec_aten/util/tensor_util_aten.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1010

1111
#include <ATen/Tensor.h> // @manual
12+
#include <c10/util/irange.h>
1213
#include <executorch/runtime/platform/assert.h>
1314

1415
namespace executorch {
@@ -41,7 +42,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) {
4142

4243
if (!validate_dim_order(dim_order, t.dim())) {
4344
ET_LOG(Error, "Tensor dim order is not valid:");
44-
for (size_t d = 0; d < t.dim(); ++d) {
45+
for (const auto d : c10::irange(t.dim())) {
4546
ET_LOG(
4647
Error,
4748
" dim_order(%zu): %zu",
@@ -66,7 +67,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) {
6667
ET_LOG(
6768
Error,
6869
"Expected tensor to have default or channels last dim order, but got");
69-
for (size_t d = 0; d < t.dim(); ++d) {
70+
for (const auto d : c10::irange(t.dim())) {
7071
ET_LOG(
7172
Error,
7273
" dim_order(%zu): %zu",
@@ -96,7 +97,7 @@ bool tensors_have_same_dim_order(
9697
bool all_channels_last =
9798
is_channels_last_dim_order(first_dim_order, tensor_list[0].dim());
9899

99-
for (size_t i = 1; i < tensor_list.size(); ++i) {
100+
for (const auto i : c10::irange(1, tensor_list.size())) {
100101
ET_CHECK_OR_RETURN_FALSE(
101102
get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) ==
102103
Error::Ok,

runtime/core/exec_aten/util/tensor_util_portable.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1011

1112
#include <cstring>
@@ -41,7 +42,7 @@ Error get_dim_order(
4142
bool tensor_has_valid_dim_order(torch::executor::Tensor t) {
4243
if (!validate_dim_order(t.dim_order().data(), t.dim_order().size())) {
4344
ET_LOG(Error, "Tensor dim order is not valid:");
44-
for (size_t d = 0; d < t.dim(); ++d) {
45+
for (const auto d : c10::irange(t.dim())) {
4546
ET_LOG(
4647
Error,
4748
" dim_order(%zu): %zu",
@@ -62,7 +63,7 @@ bool tensor_is_default_or_channels_last_dim_order(torch::executor::Tensor t) {
6263
ET_LOG(
6364
Error,
6465
"Expected tensor to have default or channels last dim order, but got");
65-
for (size_t d = 0; d < t.dim(); ++d) {
66+
for (const auto d : c10::irange(t.dim())) {
6667
ET_LOG(
6768
Error,
6869
" dim_order(%zu): %zu",
@@ -79,7 +80,7 @@ bool tensor_is_default_dim_order(torch::executor::Tensor t) {
7980

8081
if (!ret_val) {
8182
ET_LOG(Error, "Expected tensor to have default dim order, but got");
82-
for (size_t d = 0; d < t.dim(); ++d) {
83+
for (const auto d : c10::irange(t.dim())) {
8384
ET_LOG(
8485
Error,
8586
" dim_order(%zu): %zu",
@@ -96,7 +97,7 @@ bool tensor_is_channels_last_dim_order(torch::executor::Tensor t) {
9697

9798
if (!ret_val) {
9899
ET_LOG(Error, "Expected tensor to have channels last dim order, but got");
99-
for (size_t d = 0; d < t.dim(); ++d) {
100+
for (const auto d : c10::irange(t.dim())) {
100101
ET_LOG(
101102
Error,
102103
" dim_order(%zu): %zu",
@@ -114,7 +115,7 @@ bool tensors_have_same_dim_order(
114115
}
115116
bool all_contiguous = true;
116117
bool all_channels_last = true;
117-
for (size_t i = 0; i < tensor_list.size(); ++i) {
118+
for (const auto i : c10::irange(tensor_list.size())) {
118119
all_contiguous = all_contiguous &&
119120
is_contiguous_dim_order(
120121
tensor_list[i].dim_order().data(),

0 commit comments

Comments
 (0)