Skip to content

Commit c9bf908

Browse files
swolchokfacebook-github-bot
authored andcommitted
Fix stack buffer overflow when XNNPACK tensor has too many dims (#3233)
Summary: Noticed this overflow when I was looking through the XNNPACK backend. I am not very familiar with executorch or XNNPACK, so please be critical in review! In particular, my test is crashing in xnn_delete_runtime and I don't know why; it was previously getting a UBSAN failure for the stack buffer overflow before I patched XNNExecutor.cpp Differential Revision: D56450593
1 parent ee8c3a6 commit c9bf908

File tree

4 files changed

+113
-1
lines changed

4 files changed

+113
-1
lines changed

backends/xnnpack/runtime/XNNExecutor.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,12 @@ __ET_NODISCARD Error XNNExecutor::prepare_args(EValue** args) {
8787
if (i < input_ids_.size()) {
8888
size_t num_dims = tensor->dim();
8989
size_t dims[XNN_MAX_TENSOR_DIMS];
90+
ET_CHECK_OR_RETURN_ERROR(
91+
num_dims <= XNN_MAX_TENSOR_DIMS,
92+
InvalidArgument,
93+
"XNNPACK backend accepts tensors with at most %d dims, but got %zu",
94+
XNN_MAX_TENSOR_DIMS,
95+
num_dims);
9096
for (int d = 0; d < num_dims; ++d) {
9197
dims[d] = tensor->size(d);
9298
}

backends/xnnpack/targets.bzl

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,11 @@ def define_common_targets():
3838
preprocessor_flags = [
3939
# "-DENABLE_XNNPACK_PROFILING",
4040
],
41+
exported_deps = [
42+
"//executorch/runtime/backend:interface",
43+
],
4144
deps = [
4245
third_party_dep("XNNPACK"),
43-
"//executorch/runtime/backend:interface",
4446
"//executorch/backends/xnnpack/serialization:xnnpack_flatbuffer_header",
4547
"//executorch/backends/xnnpack/threadpool:threadpool",
4648
"//executorch/runtime/core/exec_aten/util:tensor_util",
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/backends/xnnpack/runtime/XNNExecutor.h>
10+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
11+
#include <gtest/gtest.h>
12+
#include <xnnpack/subgraph.h>
13+
14+
using torch::executor::Error;
15+
using torch::executor::EValue;
16+
using torch::executor::testing::TensorFactory;
17+
using torch::executor::xnnpack::delegate::XNNExecutor;
18+
19+
TEST(XNNExecutorTest, ArgumentWithTooManyDimensions) {
20+
XNNExecutor executor;
21+
xnn_subgraph_t subgraph = nullptr;
22+
xnn_runtime_t rt = nullptr;
23+
et_pal_init();
24+
ASSERT_EQ(xnn_initialize(nullptr), xnn_status_success);
25+
ASSERT_EQ(xnn_create_subgraph(2, 0, &subgraph), xnn_status_success);
26+
auto input_id = XNN_INVALID_NODE_ID;
27+
std::vector<size_t> dims = {
28+
1,
29+
};
30+
ASSERT_EQ(
31+
xnn_status_success,
32+
xnn_define_quantized_tensor_value(
33+
subgraph,
34+
xnn_datatype_qint8,
35+
0,
36+
1,
37+
dims.size(),
38+
dims.data(),
39+
nullptr,
40+
/*external_id=*/0,
41+
/*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT,
42+
&input_id));
43+
ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
44+
45+
auto output_id = XNN_INVALID_NODE_ID;
46+
ASSERT_EQ(
47+
xnn_status_success,
48+
xnn_define_quantized_tensor_value(
49+
subgraph,
50+
xnn_datatype_qint8,
51+
0,
52+
1,
53+
dims.size(),
54+
dims.data(),
55+
nullptr,
56+
/*external_id=*/0,
57+
/*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT,
58+
&output_id));
59+
ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
60+
61+
ASSERT_EQ(
62+
xnn_status_success,
63+
xnn_define_clamp(subgraph, 1, 2, input_id, output_id, 0));
64+
65+
ASSERT_EQ(xnn_create_runtime(subgraph, &rt), xnn_status_success);
66+
EXPECT_EQ(
67+
executor.initialize(
68+
rt,
69+
{
70+
0,
71+
},
72+
{
73+
1,
74+
}),
75+
Error::Ok);
76+
TensorFactory<exec_aten::ScalarType::Int> tf;
77+
auto input_tensor = tf.make({1, 1, 1, 1, 1, 1, 1, 1, 1}, {42});
78+
ASSERT_EQ(input_tensor.dim(), 9);
79+
auto output_tensor = tf.make(
80+
{
81+
1,
82+
},
83+
{
84+
1,
85+
});
86+
EValue input_ev(input_tensor);
87+
EValue output_ev(output_tensor);
88+
std::array<EValue*, 2> args = {&input_ev, &output_ev};
89+
// Check for invalid number of dimensions should fail without stack overflow.
90+
EXPECT_EQ(executor.prepare_args(args.data()), Error::InvalidArgument);
91+
}

backends/xnnpack/test/targets.bzl

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
load("@fbsource//xplat/executorch/backends/xnnpack/third-party:third_party_libs.bzl", "third_party_dep")
12
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
23

34
def define_common_targets():
@@ -17,3 +18,15 @@ def define_common_targets():
1718
"//executorch/backends/xnnpack:dynamic_quant_utils",
1819
],
1920
)
21+
22+
runtime.cxx_test(
23+
name = "xnnexecutor_test",
24+
srcs = ["runtime/test_xnnexecutor.cpp"],
25+
deps = [
26+
third_party_dep("XNNPACK"),
27+
"//executorch/runtime/core/exec_aten/testing_util:tensor_util",
28+
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
29+
"//executorch/backends/xnnpack:xnnpack_backend",
30+
"//executorch/extension/aten_util:aten_bridge",
31+
],
32+
)

0 commit comments

Comments
 (0)