Skip to content

Commit b78fce9

Browse files
committed
Update
[ghstack-poisoned]
1 parent 2f996ba commit b78fce9

File tree

6 files changed

+75
-25
lines changed

6 files changed

+75
-25
lines changed

runtime/core/exec_aten/util/targets.bzl

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,21 @@ def define_common_targets():
7777
name = "tensor_shape_to_c_string" + aten_suffix,
7878
srcs = ["tensor_shape_to_c_string.cpp"],
7979
exported_deps = [
80-
"//executorch/runtime/core/exec_aten:lib" + aten_suffix,
80+
"//executorch/runtime/core:core",
81+
"//executorch/runtime/core/exec_aten/util:tensor_dimension_limit",
8182
],
8283
exported_headers = ["tensor_shape_to_c_string.h"],
8384
visibility = [
8485
"//executorch/...",
8586
"@EXECUTORCH_CLIENTS",
8687
],
8788
)
89+
90+
runtime.cxx_library(
91+
name = "tensor_dimension_limit",
92+
exported_headers = ["tensor_dimension_limit.h"],
93+
visibility = [
94+
"//executorch/...",
95+
"@EXECUTORCH_CLIENTS",
96+
],
97+
)
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#pragma once
10+
11+
namespace executorch::runtime {
12+
/**
13+
* The expected output size may not be the existing size of any inputs and
14+
* outputs if the operator supports both broadcast and dynamic shape.
15+
* Therefore such operators needs extra space to store the calculated expected
16+
* output size. such dynamic allocation is troublesome in executorch so we can
17+
* just hard code a static value of a relatively small value because users
18+
* don't create high dimensional tensors.
19+
*/
20+
constexpr size_t kTensorDimensionLimit = 16;
21+
} // namespace executorch::runtime

runtime/core/exec_aten/util/tensor_shape_to_c_string.cpp

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,13 @@
1010

1111
#include <executorch/runtime/platform/assert.h>
1212

13+
#include <cinttypes>
14+
1315
namespace executorch::runtime {
14-
/**
15-
* Shared implementation for tensor_util.h, may only contain code that
16-
* works whether or not ATen mode is active.
17-
*/
18-
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
19-
executorch::runtime::Span<const executorch::aten::SizesType> shape) {
16+
namespace {
17+
template <typename SizesType>
18+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string_impl(
19+
executorch::runtime::Span<SizesType> shape) {
2020
std::array<char, kTensorShapeStringSizeLimit> out;
2121
char* p = out.data();
2222
if ET_UNLIKELY (shape.size() > kTensorDimensionLimit) {
@@ -48,5 +48,16 @@ std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
4848
*(p - 1) = '\0';
4949
return out;
5050
}
51+
} // namespace
52+
53+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
54+
executorch::runtime::Span<const std::int32_t> shape) {
55+
return tensor_shape_to_c_string_impl(shape);
56+
}
57+
58+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
59+
executorch::runtime::Span<const std::int64_t> shape) {
60+
return tensor_shape_to_c_string_impl(shape);
61+
}
5162

5263
} // namespace executorch::runtime

runtime/core/exec_aten/util/tensor_shape_to_c_string.h

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010

1111
#include <array>
1212
#include <cstddef>
13+
#include <cstdint>
1314
#include <limits>
1415

15-
#include <executorch/runtime/core/exec_aten/exec_aten.h>
16-
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
16+
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
1717
#include <executorch/runtime/core/span.h>
1818

1919
namespace executorch::runtime {
@@ -34,18 +34,35 @@ constexpr size_t kTensorShapeStringSizeLimit = 1 + /* opening parenthesis */
3434

3535
namespace internal {
3636
constexpr size_t kMaximumPrintableTensorShapeElement =
37-
std::is_same_v<executorch::aten::SizesType, int32_t>
38-
? std::numeric_limits<int32_t>::max()
39-
: std::numeric_limits<uint32_t>::max();
37+
std::numeric_limits<int32_t>::max();
4038
} // namespace internal
4139

4240
/**
4341
* Convert a shape to a NUL-terminated C string with limited size. If
4442
* elements of the shape are larger than
4543
* kMaximumPrintableTensorShapeElement, those elements will be
4644
* rendered as ERR instead.
45+
*
46+
* NOTE: There are two overloads of this function to support both ATen
47+
* tensors and ExecuTorch Tensors, which have different SizesType,
48+
* while also avoiding a dependency on exec_aten.h from this header
49+
* because that would cause a circular dependency.
50+
*/
51+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
52+
executorch::runtime::Span<const std::int32_t> shape);
53+
54+
/**
55+
* Convert a shape to a NUL-terminated C string with limited size. If
56+
* elements of the shape are larger than
57+
* kMaximumPrintableTensorShapeElement, those elements will be
58+
* rendered as ERR instead.
59+
*
60+
* NOTE: There are two overloads of this function to support both ATen
61+
* tensors and ExecuTorch Tensors, which have different SizesType,
62+
* while also avoiding a dependency on exec_aten.h from this header
63+
* because that would cause a circular dependency.
4764
*/
4865
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
49-
executorch::runtime::Span<const executorch::aten::SizesType> shape);
66+
executorch::runtime::Span<const std::int64_t> shape);
5067

5168
} // namespace executorch::runtime

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <executorch/runtime/core/exec_aten/exec_aten.h>
2121
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
2222
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
23+
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
2324
#include <executorch/runtime/core/span.h>
2425
#include <executorch/runtime/platform/assert.h>
2526
#include <executorch/runtime/platform/compiler.h>
@@ -893,16 +894,6 @@ inline bool tensor_is_scalar(executorch::aten::Tensor t) {
893894
return t.dim() == 0 && t.numel() == 1;
894895
}
895896

896-
/**
897-
* The expected output size may not be the existing size of any inputs and
898-
* outputs if the operator supports both broadcast and dynamic shape.
899-
* Therefore such operators needs extra space to store the calculated expected
900-
* output size. such dynamic allocation is troublesome in executorch so we can
901-
* just hard code a static value of a relatively small value because users
902-
* don't create high dimensional tensors.
903-
*/
904-
constexpr size_t kTensorDimensionLimit = 16;
905-
906897
/// Returns the product of dim[0:dim), not including dim.
907898
inline size_t getLeadingDims(
908899
const executorch::aten::Tensor& tensor,

runtime/core/portable_type/tensor_impl.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,10 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {
9797
if (!std::equal(sizes_, sizes_ + dim_, new_sizes.begin())) {
9898
#ifdef ET_LOG_ENABLED
9999
auto old_sizes_str = executorch::runtime::tensor_shape_to_c_string(
100-
executorch::runtime::Span<const executorch::aten::SizesType>(
100+
executorch::runtime::Span<const SizesType>(
101101
sizes().data(), sizes().size()));
102102
auto new_sizes_str = executorch::runtime::tensor_shape_to_c_string(
103-
executorch::runtime::Span<const executorch::aten::SizesType>(
103+
executorch::runtime::Span<const SizesType>(
104104
new_sizes.data(), new_sizes.size()));
105105
#endif
106106

0 commit comments

Comments
 (0)