Skip to content

Commit b284866

Browse files
authored
Remove torch:: references from examples/portable
Differential Revision: D61742292 Pull Request resolved: #4882
1 parent 1f0487d commit b284866

File tree

3 files changed

+31
-12
lines changed

3 files changed

+31
-12
lines changed

examples/portable/custom_ops/custom_ops_1_out.cpp

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace native {
1313

1414
using exec_aten::ScalarType;
1515
using exec_aten::Tensor;
16-
using torch::executor::RuntimeContext;
16+
using executorch::runtime::KernelRuntimeContext;
1717

1818
namespace {
1919
void check_preconditions(const Tensor& in, Tensor& out) {
@@ -35,10 +35,13 @@ void check_preconditions(const Tensor& in, Tensor& out) {
3535
ssize_t(in.numel()));
3636
}
3737
} // namespace
38-
// mul3.out(Tensor input, *, Tensor(a!) output) -> Tensor(a!)
39-
Tensor& mul3_out_impl(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
40-
(void)ctx;
4138

39+
// mul3.out(Tensor input, *, Tensor(a!) output) -> Tensor(a!)
40+
// ExecuTorch-compatible function signature, with a KernelRuntimeContext.
41+
Tensor& mul3_out_impl(
42+
ET_UNUSED KernelRuntimeContext& ctx,
43+
const Tensor& in,
44+
Tensor& out) {
4245
check_preconditions(in, out);
4346
float* out_data = out.mutable_data_ptr<float>();
4447
const float* in_data = in.const_data_ptr<float>();
@@ -47,5 +50,6 @@ Tensor& mul3_out_impl(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
4750
}
4851
return out;
4952
}
53+
5054
} // namespace native
5155
} // namespace custom

examples/portable/custom_ops/custom_ops_2_out.cpp

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace native {
1313

1414
using exec_aten::ScalarType;
1515
using exec_aten::Tensor;
16-
using torch::executor::RuntimeContext;
16+
using executorch::runtime::KernelRuntimeContext;
1717

1818
namespace {
1919
void check_preconditions(const Tensor& in, Tensor& out) {
@@ -35,7 +35,9 @@ void check_preconditions(const Tensor& in, Tensor& out) {
3535
ssize_t(in.numel()));
3636
}
3737
} // namespace
38+
3839
// mul4.out(Tensor input, *, Tensor(a!) output) -> Tensor(a!)
40+
// ATen-compatible function signature, without a KernelRuntimeContext.
3941
Tensor& mul4_out_impl(const Tensor& in, Tensor& out) {
4042
check_preconditions(in, out);
4143
float* out_data = out.mutable_data_ptr<float>();
@@ -46,8 +48,12 @@ Tensor& mul4_out_impl(const Tensor& in, Tensor& out) {
4648
return out;
4749
}
4850

49-
Tensor& mul4_out_impl(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
50-
(void)ctx;
51+
// mul4.out(Tensor input, *, Tensor(a!) output) -> Tensor(a!)
52+
// ExecuTorch-compatible function signature, with a KernelRuntimeContext.
53+
Tensor& mul4_out_impl(
54+
ET_UNUSED KernelRuntimeContext& ctx,
55+
const Tensor& in,
56+
Tensor& out) {
5157
mul4_out_impl(in, out);
5258
return out;
5359
}

examples/portable/executor_runner/executor_runner.cpp

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,20 @@ DEFINE_string(
3737
"model.pte",
3838
"Model serialized in flatbuffer format.");
3939

40-
using namespace torch::executor;
41-
using torch::executor::util::FileDataLoader;
40+
using executorch::extension::FileDataLoader;
41+
using executorch::runtime::Error;
42+
using executorch::runtime::EValue;
43+
using executorch::runtime::HierarchicalAllocator;
44+
using executorch::runtime::MemoryAllocator;
45+
using executorch::runtime::MemoryManager;
46+
using executorch::runtime::Method;
47+
using executorch::runtime::MethodMeta;
48+
using executorch::runtime::Program;
49+
using executorch::runtime::Result;
50+
using executorch::runtime::Span;
4251

4352
int main(int argc, char** argv) {
44-
runtime_init();
53+
executorch::runtime::runtime_init();
4554

4655
gflags::ParseCommandLineFlags(&argc, &argv, true);
4756
if (argc != 1) {
@@ -154,7 +163,7 @@ int main(int argc, char** argv) {
154163
// Allocate input tensors and set all of their elements to 1. The `inputs`
155164
// variable owns the allocated memory and must live past the last call to
156165
// `execute()`.
157-
auto inputs = util::prepare_input_tensors(*method);
166+
auto inputs = executorch::extension::prepare_input_tensors(*method);
158167
ET_CHECK_MSG(
159168
inputs.ok(),
160169
"Could not prepare inputs: 0x%" PRIx32,
@@ -176,7 +185,7 @@ int main(int argc, char** argv) {
176185
status = method->get_outputs(outputs.data(), outputs.size());
177186
ET_CHECK(status == Error::Ok);
178187
// Print the first and last 100 elements of long lists of scalars.
179-
std::cout << torch::executor::util::evalue_edge_items(100);
188+
std::cout << executorch::extension::evalue_edge_items(100);
180189
for (int i = 0; i < outputs.size(); ++i) {
181190
std::cout << "Output " << i << ": " << outputs[i] << std::endl;
182191
}

0 commit comments

Comments
 (0)