Skip to content

Remove references to exec_aten::RuntimeContext #5257

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion extension/llm/custom_ops/op_sdpa_aot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Tensor& sdpa_with_kv_cache_out_no_context(
// @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy
const optional<double> scale,
Tensor& output) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::native::sdpa_with_kv_cache_out(
context,
q_projected,
Expand Down
2 changes: 1 addition & 1 deletion extension/llm/custom_ops/op_sdpa_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ exec_aten::Tensor op_scaled_dot_product_attention(
bool is_causal,
exec_aten::optional<double> scale,
exec_aten::Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::native::flash_attention_kernel_out(
context, query, key, value, attn_mask, dropout_p, is_causal, scale, out);
}
Expand Down
2 changes: 1 addition & 1 deletion extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ exec_aten::Tensor op_sdpa_with_kv_cache(
bool is_causal,
exec_aten::optional<double> scale,
exec_aten::Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::native::sdpa_with_kv_cache_out(
context,
query,
Expand Down
2 changes: 1 addition & 1 deletion kernels/aten/cpu/op__to_dim_order_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ Tensor& _to_dim_order_copy_out(
bool non_blocking,
OptionalArrayRef<int64_t> dim_order,
Tensor& out) {
exec_aten::RuntimeContext ctx{};
executorch::runtime::KernelRuntimeContext ctx{};
return _to_dim_order_copy_out(ctx, self, non_blocking, dim_order, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/optimized/cpu/op_gelu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ namespace {
*/
template <typename CTYPE>
void gelu(
exec_aten::RuntimeContext& context,
executorch::runtime::KernelRuntimeContext& context,
const Tensor& input,
string_view approximate,
Tensor& output) {
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op__to_dim_order_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ Tensor& _to_dim_order_copy_out(
bool non_blocking,
OptionalArrayRef<int64_t> dim_order,
Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return _to_dim_order_copy_out(context, self, non_blocking, dim_order, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/test/op_gelu_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ using torch::executor::testing::TensorFactory;
// executorch/kernels/test/op_gelu_test.cpp instead.

Tensor& op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::native::gelu_out(context, self, approximate, out);
}

Expand Down
4 changes: 2 additions & 2 deletions kernels/quantized/test/op_add_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
using namespace ::testing;
using exec_aten::ArrayRef;
using exec_aten::optional;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::native::add_out;
using torch::executor::native::dequantize_per_tensor_out;
using torch::executor::native::quantize_per_tensor_out;
Expand Down Expand Up @@ -193,7 +193,7 @@ TEST(OpQuantizeAddTest, ConsitencyWithReferencePattern) {

optional<ScalarType> out_dtype = optional<ScalarType>();

RuntimeContext context{};
KernelRuntimeContext context{};
// q -> qadd -> dq
// 3.5 / 0.5 + 1 = 8
quantize_per_tensor_out(
Expand Down
4 changes: 2 additions & 2 deletions kernels/quantized/test/op_embedding4b_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
using namespace ::testing;
using exec_aten::ArrayRef;
using exec_aten::optional;
using exec_aten::RuntimeContext;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::native::quantized_embedding_4bit_out;

using torch::executor::testing::TensorFactory;
Expand Down Expand Up @@ -62,7 +62,7 @@ TEST(OpQuantizedEmbedding4bTest, TestGroupWiseQuantizedEmbedding) {
EXPECT_TENSOR_EQ(out, expected);

out = tf.zeros({3, 4});
auto context = RuntimeContext();
auto context = KernelRuntimeContext();
torch::executor::native::quantized_embedding_4bit_out(
context,
qweight,
Expand Down
4 changes: 2 additions & 2 deletions kernels/quantized/test/op_embedding_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
using namespace ::testing;
using exec_aten::ArrayRef;
using exec_aten::optional;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::native::dequantize_per_tensor_out;
using torch::executor::native::embedding_out;
using torch::executor::native::quantize_per_tensor_out;
Expand Down Expand Up @@ -120,7 +120,7 @@ TEST(OpQuantizedEmbeddingTest, ConsitencyWithReferencePattern) {

TensorFactory<ScalarType::Byte> tfo;
Tensor qweight = tfo.zeros({3, 1});
RuntimeContext context{};
KernelRuntimeContext context{};
// 3.5 / 0.5 + 1 = 8
// 5.5 / 0.5 + 1 = 12
// 1 / 0.5 + 1 = 3
Expand Down
6 changes: 3 additions & 3 deletions kernels/quantized/test/op_mixed_linear_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@

using namespace ::testing;
using exec_aten::optional;
using exec_aten::RuntimeContext;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::native::quantized_mixed_linear_out;
using torch::executor::testing::TensorFactory;

Expand Down Expand Up @@ -57,7 +57,7 @@ void test_dtype() {
/*sizes=*/{1, 2},
/*data=*/{2.3, 3.6});

RuntimeContext ctx{};
KernelRuntimeContext ctx{};

quantized_mixed_linear_out(
ctx, input, weight, weight_scales, opt_weight_zp, opt_dtype_out, out);
Expand Down Expand Up @@ -112,7 +112,7 @@ void test_dtype_partials() {
{(1.0 * 5 + 1.5 * 3) * 0.2 + 2.0 * 1 * 1,
(1.0 * 4 + 1.5 * 2) * 0.4 + 2.0 * 1 * 0.5});

RuntimeContext ctx{};
KernelRuntimeContext ctx{};

quantized_mixed_linear_out(
ctx, input, weight, weight_scales, opt_weight_zp, opt_dtype_out, out);
Expand Down
4 changes: 2 additions & 2 deletions kernels/quantized/test/op_mixed_mm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@

using namespace ::testing;
using exec_aten::optional;
using exec_aten::RuntimeContext;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::native::quantized_mixed_mm_out;
using torch::executor::testing::TensorFactory;

Expand Down Expand Up @@ -55,7 +55,7 @@ void test_dtype() {
/*sizes=*/{1, 2},
/*data=*/{3.8, 3.0});

RuntimeContext ctx{};
KernelRuntimeContext ctx{};

quantized_mixed_mm_out(ctx, input, weight, weight_scales, opt_weight_zp, out);

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/TestUtil.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,6 @@ class OperatorTest : public ::testing::Test {
}

protected:
exec_aten::RuntimeContext context_;
executorch::runtime::KernelRuntimeContext context_;
bool expect_failure_;
};
5 changes: 3 additions & 2 deletions kernels/test/custom_kernel_example/op_relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ namespace native {

using Tensor = exec_aten::Tensor;
using ScalarType = exec_aten::ScalarType;
using exec_aten::RuntimeContext;
using executor::Error;
using executorch::runtime::KernelRuntimeContext;

namespace {

Expand Down Expand Up @@ -61,7 +61,8 @@ void relu(const Tensor& input, Tensor& output) {
*
* relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
*/
Tensor& my_relu_out(RuntimeContext& context, const Tensor& input, Tensor& out) {
Tensor&
my_relu_out(KernelRuntimeContext& context, const Tensor& input, Tensor& out) {
(void)context;
resize(out, input.sizes());
ET_KERNEL_CHECK(
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_atan2_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_atan2_out(const Tensor& self, const Tensor& other, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::atan2_outf(context, self, other, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_cdist_forward_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ Tensor& op_cdist_forward_out(
double p,
optional<int64_t> compute_mode,
Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::_cdist_forward_outf(
context, x1, x2, p, compute_mode, out);
}
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_clamp_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ class OpClampTensorOutTest : public OperatorTest {
const optional<Tensor>& min,
const optional<Tensor>& max,
Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::clamp_outf(context, self, min, max, out);
}
};
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_diagonal_copy_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Tensor& op_diagonal_copy_out(
int64_t dim1,
int64_t dim2,
Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::diagonal_copy_outf(
context, input, offset, dim1, dim2, out);
}
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_expm1_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_expm1_out(const Tensor& a, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::expm1_outf(context, a, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_flip_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ using exec_aten::Tensor;
using torch::executor::testing::TensorFactory;

Tensor& op_flip_out(const Tensor& input, IntArrayRef dims, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::flip_outf(context, input, dims, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_ge_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::testing::TensorFactory;

class OpGeTensorOutTest : public OperatorTest {
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_gt_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::testing::TensorFactory;

class OpGtScalarOutTest : public OperatorTest {
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_le_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::testing::TensorFactory;

class OpLeScalarOutTest : public OperatorTest {
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_log10_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_log10_out(const Tensor& a, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::log10_outf(context, a, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_log1p_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_log1p_out(const Tensor& a, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::log1p_outf(context, a, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_log2_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_log2_out(const Tensor& a, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::log2_outf(context, a, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_lt_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::testing::TensorFactory;

class OpLtScalarOutTest : public OperatorTest {
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_maximum_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ using exec_aten::Tensor;
using torch::executor::testing::TensorFactory;

Tensor& op_maximum_out(const Tensor& self, const Tensor& other, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::maximum_outf(context, self, other, out);
}

Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_native_batch_norm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class OpNativeBatchNormLegitOutTest : public OperatorTest {
exec_aten::Tensor& out0,
exec_aten::Tensor& out1,
exec_aten::Tensor& out2) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::_native_batch_norm_legit_outf(
context,
input,
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_native_group_norm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ ::std::tuple<Tensor&, Tensor&, Tensor&> op_native_group_norm_out(
Tensor& out0,
Tensor& out1,
Tensor& out2) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::native_group_norm_outf(
context, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
}
Expand Down
4 changes: 2 additions & 2 deletions kernels/test/op_ne_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <gtest/gtest.h>

using namespace ::testing;
using exec_aten::RuntimeContext;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using torch::executor::testing::TensorFactory;

class OpNeTest : public OperatorTest {
Expand All @@ -34,7 +34,7 @@ class OpNeTest : public OperatorTest {
Tensor a = tf_input.make(/*sizes=*/{2, 2}, /*data=*/{2, 3, 2, 4});
Tensor b = tf_input.make({2, 2}, {2, 2, 2, 2});
Tensor out = tf_bool.zeros({2, 2});
RuntimeContext context{};
KernelRuntimeContext context{};

torch::executor::aten::ne_outf(context, a, b, out);
EXPECT_TENSOR_EQ(out, tf_bool.make({2, 2}, {false, true, false, true}));
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_pdist_forward_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ using exec_aten::Tensor;
using torch::executor::testing::TensorFactory;

Tensor& op_pdist_forward_out(const Tensor& input, double p, Tensor& out) {
exec_aten::RuntimeContext context{};
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::_pdist_forward_outf(context, input, p, out);
}

Expand Down
Loading
Loading