Skip to content

Rename exec_aten:: to executorch::aten:: #5296

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/qualcomm/runtime/QnnExecuTorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct CustomMemTensorInfo {
size_t tensor_bytes;
uint32_t* shape;
uint32_t rank;
torch::executor::ScalarType dtype;
exec_aten::ScalarType dtype;
};

/// Allocate specific tensors (usually graph inputs and outputs) on shared
Expand Down
2 changes: 1 addition & 1 deletion backends/qualcomm/runtime/SharedBuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ std::size_t std::hash<CustomMemTensorInfo>::operator()(
hash_val ^= info.shape[i];
}
hash_val ^= std::hash<uint32_t>()(info.rank);
hash_val ^= std::hash<torch::executor::ScalarType>()(info.dtype);
hash_val ^= std::hash<exec_aten::ScalarType>()(info.dtype);
return hash_val;
}

Expand Down
4 changes: 2 additions & 2 deletions backends/vulkan/test/utils/test_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

#include <executorch/backends/vulkan/test/utils/test_utils.h>

#include <executorch/runtime/core/portable_type/half.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>

#include <executorch/backends/vulkan/runtime/graph/ops/impl/utils/TensorUtils.h>

Expand Down Expand Up @@ -322,7 +322,7 @@ void record_reference_matmul(
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int32_t, Int) \
_(torch::executor::Half, Half) \
_(exec_aten::Half, Half) \
_(float, Float) \
_(int8_t, QInt8)

Expand Down
8 changes: 4 additions & 4 deletions backends/vulkan/test/vulkan_compute_api_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include <utility>
#include <vector>

#include <executorch/runtime/core/portable_type/half.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>

#include <executorch/backends/vulkan/runtime/api/api.h>

Expand Down Expand Up @@ -485,7 +485,7 @@ TEST_F(VulkanComputeAPITest, test_buffer_float16) {
if (!context()->adapter_ptr()->has_full_float16_buffers_support()) {
GTEST_SKIP();
}
test_storage_buffer_type<torch::executor::Half, vkapi::kHalf>(16);
test_storage_buffer_type<exec_aten::Half, vkapi::kHalf>(16);
}

TEST_F(VulkanComputeAPITest, test_buffer_int8) {
Expand Down Expand Up @@ -567,7 +567,7 @@ TEST_F(VulkanComputeAPITest, buffer_tensor_sanity_check) {
run_buffer_tensor_sanity_check<float>(a);
break;
case vkapi::kHalf:
run_buffer_tensor_sanity_check<torch::executor::Half>(a);
run_buffer_tensor_sanity_check<exec_aten::Half>(a);
break;
case vkapi::kChar:
run_buffer_tensor_sanity_check<int8_t>(a);
Expand Down Expand Up @@ -2395,7 +2395,7 @@ TEST(VulkanToFromGPUShaderTest, round_trip_tests) {

for (auto& sizes : to_test) {
RUN_TESTS(float, vkapi::kFloat)
RUN_TESTS(torch::executor::Half, vkapi::kHalf)
RUN_TESTS(exec_aten::Half, vkapi::kHalf)
}

for (auto& sizes : to_test_int8) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
#include <gtest/gtest.h>

using namespace ::testing;
using torch::executor::ScalarType;
using torch::executor::Tensor;
using torch::executor::TensorImpl;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using exec_aten::TensorImpl;

TEST(CrossAttentxnMaskTest, TestCrossAttentionMask) {
std::vector<int> tokens = {
Expand Down
2 changes: 1 addition & 1 deletion examples/qualcomm/oss_scripts/llama2/runner/runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ int32_t Runner::logitsToToken(const exec_aten::Tensor& logits_tensor) {

// Given an input token. Set up the inputs for the model and execute a single
// step. Returning the logits tensor.
Result<torch::executor::Tensor> Runner::run_model_step(
Result<exec_aten::Tensor> Runner::run_model_step(
int64_t input_token,
TensorPtr& token,
TensorPtr& start_pos,
Expand Down
2 changes: 1 addition & 1 deletion examples/qualcomm/oss_scripts/llama2/runner/runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class IoMemMgr {
std::vector<uint32_t> shape;
uint32_t rank;
size_t element_size;
torch::executor::ScalarType dtype;
exec_aten::ScalarType dtype;
};

struct IoInfo {
Expand Down
4 changes: 2 additions & 2 deletions extension/android/jni/jni_layer_constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

#include <unordered_map>

#include <executorch/runtime/core/portable_type/scalar_type.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>

namespace executorch::extension {

Expand Down Expand Up @@ -37,7 +37,7 @@ constexpr static int kTensorDTypeBits4x2 = 20;
constexpr static int kTensorDTypeBits8 = 21;
constexpr static int kTensorDTypeBits16 = 22;

using torch::executor::ScalarType;
using exec_aten::ScalarType;

const std::unordered_map<ScalarType, int> scalar_type_to_java_dtype = {
{ScalarType::Byte, kTensorDTypeUInt8},
Expand Down
46 changes: 23 additions & 23 deletions extension/android/src/main/java/org/pytorch/executorch/DType.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,51 +17,51 @@ public enum DType {
// NOTE: "jniCode" must be kept in sync with scalar_type.h.
// NOTE: Never serialize "jniCode", because it can change between releases.

/** Code for dtype torch::executor::Byte */
/** Code for dtype ScalarType::Byte */
UINT8(0),
/** Code for dtype torch::executor::Char */
/** Code for dtype ScalarType::Char */
INT8(1),
/** Code for dtype torch::executor::Short */
/** Code for dtype ScalarType::Short */
INT16(2),
/** Code for dtype torch::executor::Int */
/** Code for dtype ScalarType::Int */
INT32(3),
/** Code for dtype torch::executor::Long */
/** Code for dtype ScalarType::Long */
INT64(4),
/** Code for dtype torch::executor::Half */
/** Code for dtype ScalarType::Half */
HALF(5),
/** Code for dtype torch::executor::Float */
/** Code for dtype ScalarType::Float */
FLOAT(6),
/** Code for dtype torch::executor::Double */
/** Code for dtype ScalarType::Double */
DOUBLE(7),
/** Code for dtype torch::executor::ComplexHalf */
/** Code for dtype ScalarType::ComplexHalf */
COMPLEX_HALF(8),
/** Code for dtype torch::executor::ComplexFloat */
/** Code for dtype ScalarType::ComplexFloat */
COMPLEX_FLOAT(9),
/** Code for dtype torch::executor::ComplexDouble */
/** Code for dtype ScalarType::ComplexDouble */
COMPLEX_DOUBLE(10),
/** Code for dtype torch::executor::Bool */
/** Code for dtype ScalarType::Bool */
BOOL(11),
/** Code for dtype torch::executor::QInt8 */
/** Code for dtype ScalarType::QInt8 */
QINT8(12),
/** Code for dtype torch::executor::QUInt8 */
/** Code for dtype ScalarType::QUInt8 */
QUINT8(13),
/** Code for dtype torch::executor::QInt32 */
/** Code for dtype ScalarType::QInt32 */
QINT32(14),
/** Code for dtype torch::executor::BFloat16 */
/** Code for dtype ScalarType::BFloat16 */
BFLOAT16(15),
/** Code for dtype torch::executor::QUInt4x2 */
/** Code for dtype ScalarType::QUInt4x2 */
QINT4X2(16),
/** Code for dtype torch::executor::QUInt2x4 */
/** Code for dtype ScalarType::QUInt2x4 */
QINT2X4(17),
/** Code for dtype torch::executor::Bits1x8 */
/** Code for dtype ScalarType::Bits1x8 */
BITS1X8(18),
/** Code for dtype torch::executor::Bits2x4 */
/** Code for dtype ScalarType::Bits2x4 */
BITS2X4(19),
/** Code for dtype torch::executor::Bits4x2 */
/** Code for dtype ScalarType::Bits4x2 */
BITS4X2(20),
/** Code for dtype torch::executor::Bits8 */
/** Code for dtype ScalarType::Bits8 */
BITS8(21),
/** Code for dtype torch::executor::Bits16 */
/** Code for dtype ScalarType::Bits16 */
BITS16(22),
;

Expand Down
3 changes: 2 additions & 1 deletion kernels/optimized/blas/CPUBlas.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ extern "C" void sgemm_(char *transa, char *transb, int *m, int *n, int *k, float
namespace executorch {
namespace cpublas {

// using Half = exec_aten::Half;
using exec_aten::BFloat16;
using exec_aten::Half;

#ifdef ET_BUILD_WITH_BLAS
#ifdef ET_BUILD_FOR_APPLE
Expand Down
23 changes: 10 additions & 13 deletions kernels/optimized/blas/CPUBlas.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@
namespace executorch {
namespace cpublas {

using BFloat16 = torch::executor::BFloat16;
using Half = torch::executor::Half;

enum class TransposeType {
NoTranspose,
Transpose,
Expand Down Expand Up @@ -100,20 +97,20 @@ void gemm(
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
const Half alpha,
const Half *a, int64_t lda,
const Half *b, int64_t ldb,
const Half beta,
Half *c, int64_t ldc);
const exec_aten::Half alpha,
const exec_aten::Half *a, int64_t lda,
const exec_aten::Half *b, int64_t ldb,
const exec_aten::Half beta,
exec_aten::Half *c, int64_t ldc);

void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
const BFloat16 alpha,
const BFloat16 *a, int64_t lda,
const BFloat16 *b, int64_t ldb,
const BFloat16 beta,
BFloat16 *c, int64_t ldc);
const exec_aten::BFloat16 alpha,
const exec_aten::BFloat16 *a, int64_t lda,
const exec_aten::BFloat16 *b, int64_t ldb,
const exec_aten::BFloat16 beta,
exec_aten::BFloat16 *c, int64_t ldc);
// clang-format on

// clang-format off
Expand Down
8 changes: 4 additions & 4 deletions kernels/optimized/cpu/op_exp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ template <
typename CTYPE_OUT,
typename std::enable_if<
std::is_same<CTYPE_IN, CTYPE_OUT>::value &&
!std::is_same<CTYPE_IN, torch::executor::Half>::value &&
!std::is_same<CTYPE_OUT, torch::executor::Half>::value,
!std::is_same<CTYPE_IN, exec_aten::Half>::value &&
!std::is_same<CTYPE_OUT, exec_aten::Half>::value,
int>::type = 0>
void exp_data(
const CTYPE_IN* in_data,
Expand All @@ -47,8 +47,8 @@ template <
typename CTYPE_OUT,
typename std::enable_if<
!std::is_same<CTYPE_IN, CTYPE_OUT>::value ||
std::is_same<CTYPE_IN, torch::executor::Half>::value ||
std::is_same<CTYPE_OUT, torch::executor::Half>::value,
std::is_same<CTYPE_IN, exec_aten::Half>::value ||
std::is_same<CTYPE_OUT, exec_aten::Half>::value,
int>::type = 0>
void exp_data(
const CTYPE_IN* in_data,
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_empty.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ using exec_aten::Tensor;
Tensor& empty_out(
RuntimeContext& context,
IntArrayRef size,
torch::executor::optional<torch::executor::MemoryFormat> memory_format,
exec_aten::optional<exec_aten::MemoryFormat> memory_format,
Tensor& out) {
(void)context;

Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/test/scalar_utils_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ struct promote_type_with_scalar_type_is_valid
(std::is_same<T2, torch::executor::internal::B1>::value ||
std::is_same<T2, torch::executor::internal::I8>::value ||
std::is_same<T2, torch::executor::internal::F8>::value) &&
!std::is_same<T1, torch::executor::BFloat16>::value &&
!std::is_same<T1, exec_aten::BFloat16>::value &&
!torch::executor::is_qint_type<T1>::value &&
!torch::executor::is_bits_type<T1>::value> {};

Expand Down
10 changes: 4 additions & 6 deletions kernels/portable/cpu/util/math_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,8 @@ INT_T max_override(INT_T a, INT_T b) {

template <
typename T,
typename std::enable_if<
std::is_same<T, torch::executor::Half>::value,
bool>::type = true>
typename std::enable_if<std::is_same<T, exec_aten::Half>::value, bool>::
type = true>
T min_override(T a, T b) {
const auto float_a = static_cast<float>(a);
if (std::isnan(float_a)) {
Expand All @@ -117,9 +116,8 @@ T min_override(T a, T b) {

template <
typename T,
typename std::enable_if<
std::is_same<T, torch::executor::Half>::value,
bool>::type = true>
typename std::enable_if<std::is_same<T, exec_aten::Half>::value, bool>::
type = true>
T max_override(T a, T b) {
const auto float_a = static_cast<float>(a);
if (std::isnan(float_a)) {
Expand Down
12 changes: 6 additions & 6 deletions kernels/portable/cpu/util/test/broadcast_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
using namespace ::testing;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using torch::executor::ArrayRef;
using torch::executor::testing::TensorFactory;
using executorch::runtime::ArrayRef;
using executorch::runtime::testing::TensorFactory;

TEST(BroadcastUtilTest, BroadcastTensor) {
TensorFactory<ScalarType::Int> tf;
Expand Down Expand Up @@ -112,17 +112,17 @@ TEST(BroadcastUtilTest, GetBroadcastTargetSize) {
Tensor a = tf.zeros({2, 1});
Tensor b = tf.zeros({5, 1, 2});

get_broadcast_target_size(
executorch::runtime::Error err = get_broadcast_target_size(
a,
b,
expected_output_size,
torch::executor::kTensorDimensionLimit,
&expected_output_dim);
EXPECT_EQ(err, torch::executor::Error::Ok);

EXPECT_TRUE(
torch::executor::ArrayRef<Tensor::SizesType>(
expected_output_size, expected_output_dim)
.equals(torch::executor::ArrayRef<Tensor::SizesType>({5, 2, 2})));
ArrayRef<Tensor::SizesType>(expected_output_size, expected_output_dim)
.equals(ArrayRef<Tensor::SizesType>({5, 2, 2})));
}

size_t linearize_indexes(size_t* indexes, size_t indexes_len, const Tensor& t) {
Expand Down
13 changes: 6 additions & 7 deletions kernels/portable/test/op_mul_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,19 +36,18 @@ class OpMulOutKernelTest : public OperatorTest {
TEST_F(OpMulOutKernelTest, UnhandledDtypeDies) {
// mul_out() doesn't handle QInt8.
// TensorFactory cannot be used with ScalarType::QInt8 since
// torch::executor::qint8 does not have a default constructor. It must be
// exec_aten::qint8 does not have a default constructor. It must be
// initialized with an explicit value. So, we need to manually create the
// underlying data without default construction and then the tensors from that
// data via TensorImpl.

std::vector<SizesType> sizes = {2, 2};

std::vector<torch::executor::qint8> a_data{};
std::generate_n(std::back_inserter(a_data), 4, []() {
return torch::executor::qint8{0};
});
std::vector<torch::executor::qint8> b_data(a_data);
std::vector<torch::executor::qint8> out_data(a_data);
std::vector<exec_aten::qint8> a_data{};
std::generate_n(
std::back_inserter(a_data), 4, []() { return exec_aten::qint8{0}; });
std::vector<exec_aten::qint8> b_data(a_data);
std::vector<exec_aten::qint8> out_data(a_data);

auto a_impl = torch::executor::TensorImpl(
ScalarType::QInt8, 2, sizes.data(), a_data.data());
Expand Down
6 changes: 3 additions & 3 deletions kernels/prim_ops/register_prim_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/kernel/operator_registry.h>

using KernelArrayRef = ::torch::executor::ArrayRef<::torch::executor::Kernel>;
using torch::executor::function::et_copy_index;

namespace torch {
Expand Down Expand Up @@ -294,13 +293,14 @@ static Kernel prim_ops[] = {

};

static KernelArrayRef kernel_array_ref(
executorch::runtime::Span<const executorch::runtime::Kernel> kernel_span(
prim_ops,
prim_ops + sizeof(prim_ops) / sizeof(Kernel));

// Return value not used. Keep the static variable assignment to register
// operators in static initialization time.
static auto success_with_kernel_reg = register_kernels(kernel_array_ref);
auto success_with_kernel_reg =
executorch::runtime::register_kernels(kernel_span);

} // namespace
} // namespace function
Expand Down
Loading
Loading