Skip to content

Commit 05a6d6f

Browse files
Songhao Jiafacebook-github-bot
authored andcommitted
give bp a specific namespace
Summary: This diff creates a unique namespace for bundled program from core executorch schema. Such update can make bundled program more decomposed from core ET. Differential Revision: https://internalfb.com/D49326299 fbshipit-source-id: c04ba73e36397827b49b97a2ad599c7ae49576ba
1 parent 65b108c commit 05a6d6f

File tree

3 files changed

+23
-21
lines changed

3 files changed

+23
-21
lines changed

extension/pybindings/pybindings.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,11 +273,11 @@ struct PyBundledModule final {
273273
: bundled_program_ptr_(
274274
static_cast<const void*>((buffer.cast<std::string_view>().data()))),
275275
program_ptr_(static_cast<const void*>(
276-
executorch_flatbuffer::GetBundledProgram(bundled_program_ptr_)
276+
bundled_program_flatbuffer::GetBundledProgram(bundled_program_ptr_)
277277
->program()
278278
->data())),
279279
program_len_(
280-
executorch_flatbuffer::GetBundledProgram(bundled_program_ptr_)
280+
bundled_program_flatbuffer::GetBundledProgram(bundled_program_ptr_)
281281
->program()
282282
->size()),
283283
bundled_input_allocator_(

schema/bundled_program_schema.fbs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
include "scalar_type.fbs";
88

99
// TODO make this unique from the main schemas namespace
10-
namespace executorch_flatbuffer;
10+
namespace bundled_program_flatbuffer;
1111

1212
// Identifier of a valid bundled program schema.
1313
file_identifier "BP06";
@@ -30,7 +30,7 @@ table BundledDouble {
3030
// All information we need to bundle for a tensor EValue input.
3131
table BundledTensor {
3232
// The scalar type of Tensor
33-
scalar_type: ScalarType;
33+
scalar_type: executorch_flatbuffer.ScalarType;
3434
// The target sizes of the tensor.
3535
sizes: [int];
3636
// The contents of the corresponding input tensor.

util/bundled_program_verification.cpp

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,8 @@ namespace {
3434
#define kMaxDim 16
3535

3636
// Create an aten tensor with same content using bundled tensor
37-
at::Tensor tensor_like(executorch_flatbuffer::BundledTensor* bundled_tensor) {
37+
at::Tensor tensor_like(
38+
bundled_program_flatbuffer::BundledTensor* bundled_tensor) {
3839
ET_CHECK(bundled_tensor->sizes()->size() <= kMaxDim);
3940
int64_t ret_t_sizes[kMaxDim];
4041

@@ -55,7 +56,7 @@ at::Tensor tensor_like(executorch_flatbuffer::BundledTensor* bundled_tensor) {
5556
#else // !USE_ATEN_LIB
5657
// Create a tensorimpl with same content using bundled tensor
5758
TensorImpl impl_like(
58-
executorch_flatbuffer::BundledTensor* bundled_tensor,
59+
bundled_program_flatbuffer::BundledTensor* bundled_tensor,
5960
MemoryAllocator* runtime_allocator) {
6061
ScalarType scalar_type =
6162
static_cast<ScalarType>(bundled_tensor->scalar_type());
@@ -166,8 +167,8 @@ bool tensors_are_close(
166167
}
167168
}
168169

169-
Result<executorch_flatbuffer::BundledExecutionPlanTest*> get_method_test(
170-
const executorch_flatbuffer::BundledProgram* bundled_program,
170+
Result<bundled_program_flatbuffer::BundledExecutionPlanTest*> get_method_test(
171+
const bundled_program_flatbuffer::BundledProgram* bundled_program,
171172
const char* method_name) {
172173
auto method_tests = bundled_program->execution_plan_tests();
173174
for (size_t i = 0; i < method_tests->size(); i++) {
@@ -190,13 +191,13 @@ __ET_NODISCARD Error LoadBundledInput(
190191
const char* method_name,
191192
size_t testset_idx) {
192193
ET_CHECK_OR_RETURN_ERROR(
193-
executorch_flatbuffer::BundledProgramBufferHasIdentifier(
194+
bundled_program_flatbuffer::BundledProgramBufferHasIdentifier(
194195
bundled_program_ptr),
195196
NotSupported,
196197
"The input buffer should be a bundled program.");
197198

198199
auto method_test = get_method_test(
199-
executorch_flatbuffer::GetBundledProgram(bundled_program_ptr),
200+
bundled_program_flatbuffer::GetBundledProgram(bundled_program_ptr),
200201
method_name);
201202

202203
if (!method_test.ok()) {
@@ -217,9 +218,9 @@ __ET_NODISCARD Error LoadBundledInput(
217218

218219
// Set e_input with bundled_input based on different types.
219220
switch (bundled_input->val_type()) {
220-
case executorch_flatbuffer::BundledValueUnion::BundledTensor: {
221+
case bundled_program_flatbuffer::BundledValueUnion::BundledTensor: {
221222
auto bundled_input_tensor =
222-
static_cast<executorch_flatbuffer::BundledTensor*>(
223+
static_cast<bundled_program_flatbuffer::BundledTensor*>(
223224
bundled_input->mutable_val());
224225

225226
#ifdef USE_ATEN_LIB
@@ -238,19 +239,19 @@ __ET_NODISCARD Error LoadBundledInput(
238239
status = method.set_input(e_input, input_idx);
239240
break;
240241
}
241-
case executorch_flatbuffer::BundledValueUnion::BundledInt: {
242+
case bundled_program_flatbuffer::BundledValueUnion::BundledInt: {
242243
auto bundled_input_int = bundled_input->val_as_BundledInt();
243244
e_input = EValue(bundled_input_int->int_val());
244245
status = method.set_input(e_input, input_idx);
245246
break;
246247
}
247-
case executorch_flatbuffer::BundledValueUnion::BundledDouble: {
248+
case bundled_program_flatbuffer::BundledValueUnion::BundledDouble: {
248249
auto bundled_input_int = bundled_input->val_as_BundledDouble();
249250
e_input = EValue(bundled_input_int->double_val());
250251
status = method.set_input(e_input, input_idx);
251252
break;
252253
}
253-
case executorch_flatbuffer::BundledValueUnion::BundledBool: {
254+
case bundled_program_flatbuffer::BundledValueUnion::BundledBool: {
254255
auto bundled_input_int = bundled_input->val_as_BundledBool();
255256
e_input = EValue(bundled_input_int->bool_val());
256257
status = method.set_input(e_input, input_idx);
@@ -285,13 +286,13 @@ __ET_NODISCARD Error VerifyResultWithBundledExpectedOutput(
285286
double rtol,
286287
double atol) {
287288
ET_CHECK_OR_RETURN_ERROR(
288-
executorch_flatbuffer::BundledProgramBufferHasIdentifier(
289+
bundled_program_flatbuffer::BundledProgramBufferHasIdentifier(
289290
bundled_program_ptr),
290291
NotSupported,
291292
"The input buffer should be a bundled program.");
292293

293294
auto method_test = get_method_test(
294-
executorch_flatbuffer::GetBundledProgram(bundled_program_ptr),
295+
bundled_program_flatbuffer::GetBundledProgram(bundled_program_ptr),
295296
method_name);
296297

297298
if (!method_test.ok()) {
@@ -307,9 +308,9 @@ __ET_NODISCARD Error VerifyResultWithBundledExpectedOutput(
307308
bundled_expected_outputs->GetMutableObject(output_idx);
308309
auto method_output = method.get_output(output_idx);
309310
switch (bundled_expected_output->val_type()) {
310-
case executorch_flatbuffer::BundledValueUnion::BundledTensor: {
311+
case bundled_program_flatbuffer::BundledValueUnion::BundledTensor: {
311312
auto bundled_expected_output_tensor =
312-
static_cast<executorch_flatbuffer::BundledTensor*>(
313+
static_cast<bundled_program_flatbuffer::BundledTensor*>(
313314
bundled_expected_output->mutable_val());
314315
const auto method_output_tensor = method_output.toTensor();
315316

@@ -348,9 +349,10 @@ __ET_NODISCARD Error GetProgramData(
348349
if (executorch_flatbuffer::ProgramBufferHasIdentifier(file_data)) {
349350
*out_program_data = file_data;
350351
*out_program_data_len = file_data_len;
351-
} else if (executorch_flatbuffer::BundledProgramBufferHasIdentifier(
352+
} else if (bundled_program_flatbuffer::BundledProgramBufferHasIdentifier(
352353
file_data)) {
353-
auto program_bundled = executorch_flatbuffer::GetBundledProgram(file_data);
354+
auto program_bundled =
355+
bundled_program_flatbuffer::GetBundledProgram(file_data);
354356
*out_program_data = program_bundled->program()->data();
355357
*out_program_data_len = program_bundled->program()->size();
356358
} else {

0 commit comments

Comments
 (0)