Skip to content

Migrate backends/qualcomm to the new namespace #6025

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/ir/qcir_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@

#include <unordered_map>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {

qcir::TensorType ToTensorType(Qnn_TensorType_t type) {
Expand Down Expand Up @@ -278,5 +278,5 @@ Qnn_Tensor_t ToTensor(const tensor_type& tensor) {
}

} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/ir/qcir_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
#include <executorch/backends/qualcomm/aot/ir/qcir_generated.h>
#include "QnnTypes.h"

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {

typedef flatbuffers::Vector<::flatbuffers::Offset<qcir::Tensor>>::return_type
Expand All @@ -36,5 +36,5 @@ flatbuffers::Offset<qcir::Tensor> ToTensor(
Qnn_Tensor_t ToTensor(const tensor_type& tensor);

} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
11 changes: 7 additions & 4 deletions backends/qualcomm/aot/python/PyQnnManagerAdaptor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,12 @@
#include <pybind11/pybind11.h>

namespace py = pybind11;
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {

using executorch::runtime::Error;

PYBIND11_MODULE(PyQnnManagerAdaptor, m) {
// TODO: Add related documents for configurations listed below
using namespace qnn_delegate;
Expand Down Expand Up @@ -39,5 +42,5 @@ PYBIND11_MODULE(PyQnnManagerAdaptor, m) {
.def("GetSpillFillBufferSize", &PyQnnManager::GetSpillFillBufferSize);
}
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
19 changes: 10 additions & 9 deletions backends/qualcomm/aot/python/PyQnnManagerAdaptor.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
#include <string_view>

namespace py = pybind11;
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
class PyQnnManager {
public:
Expand Down Expand Up @@ -48,7 +48,7 @@ class PyQnnManager {
qnn_executorch_options, qnn_executorch_context_binary_);
}

Error Init() {
executorch::runtime::Error Init() {
return qnn_manager_->Init();
}
bool IsNodeSupportedByBackend(
Expand Down Expand Up @@ -97,8 +97,8 @@ class PyQnnManager {
wrapper->SetName(param->GetName());
set_tensor(wrapper, params);
} else {
Error err = param->PopulateQnnParam();
if (err != Error::Ok) {
executorch::runtime::Error err = param->PopulateQnnParam();
if (err != executorch::runtime::Error::Ok) {
QNN_EXECUTORCH_LOG_ERROR(
"Fail to get scalar parameter in online prepare stage");
return py::array_t<char>(0);
Expand Down Expand Up @@ -131,7 +131,8 @@ class PyQnnManager {
context_binary.buffer = builder.GetBufferPointer();
context_binary.nbytes = builder.GetSize();
} else if (
qnn_manager_->Compile(op_wrappers, context_binary) != Error::Ok) {
qnn_manager_->Compile(op_wrappers, context_binary) !=
executorch::runtime::Error::Ok) {
return py::array_t<char>(0);
}

Expand All @@ -155,7 +156,7 @@ class PyQnnManager {
return qnn_manager_->IsTensorDump();
}

Error AllocateTensor() {
executorch::runtime::Error AllocateTensor() {
return qnn_manager_->AllocateTensor();
}

Expand Down Expand Up @@ -189,5 +190,5 @@ class PyQnnManager {
std::shared_ptr<QnnManager> qnn_manager_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/python/PyQnnWrapperAdaptor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
#include <string>

namespace py = pybind11;
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
const Qnn_QuantizationEncoding_t& encoding,
Expand Down Expand Up @@ -250,5 +250,5 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
.def("GetEncodings", &PyQnnTensorWrapper::GetEncodings);
}
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/python/PyQnnWrapperAdaptor.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
namespace py = pybind11;
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
class PyQnnOpWrapper {
public:
Expand Down Expand Up @@ -183,5 +183,5 @@ class PyQnnTensorWrapper {
std::shared_ptr<TensorWrapper> tensor_wrapper_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/wrappers/OpWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
* LICENSE file in the root directory of this source tree.
*/
#include <executorch/backends/qualcomm/aot/wrappers/OpWrapper.h>
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
Qnn_OpConfig_t OpWrapper::GetOpConfig() {
param_types_.clear();
Expand Down Expand Up @@ -44,5 +44,5 @@ Qnn_OpConfig_t OpWrapper::GetOpConfig() {
return ret;
}
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/wrappers/OpWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
#include <memory>
#include <sstream>
#include <typeinfo>
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
class OpWrapper final {
public:
Expand Down Expand Up @@ -116,5 +116,5 @@ class OpWrapper final {
std::vector<Qnn_Param_t> param_types_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
13 changes: 8 additions & 5 deletions backends/qualcomm/aot/wrappers/ParamWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,17 @@
#include <utility>

#include "QnnTypes.h"
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {

using executorch::runtime::Error;

class ParamWrapper {
public:
// Populate Qnn_Param_t. Return an error code Error::Ok if succeeded,
// Error::Internal if failed
virtual Error PopulateQnnParam() = 0;
virtual executorch::runtime::Error PopulateQnnParam() = 0;
virtual ~ParamWrapper() = default;

ParamWrapper(const ParamWrapper& rhs) = default;
Expand Down Expand Up @@ -50,5 +53,5 @@ class ParamWrapper {
std::string name_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/wrappers/QuantizeParamsWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
*/
#include <executorch/backends/qualcomm/aot/wrappers/QuantizeParamsWrapper.h>
#include <executorch/backends/qualcomm/runtime/Logging.h>
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
const Qnn_QuantizeParams_t& quantization) {
Expand Down Expand Up @@ -69,5 +69,5 @@ std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
return quantize_param_wrapper;
}
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/qualcomm/aot/wrappers/QuantizeParamsWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
#include <vector>

#include "QnnTypes.h"
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
class QuantizeParamsWrapper {
public:
Expand Down Expand Up @@ -285,5 +285,5 @@ class AxisScaleOffsetQuantizeParamsWrapper final
std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
const Qnn_QuantizeParams_t& quantization);
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
14 changes: 7 additions & 7 deletions backends/qualcomm/aot/wrappers/ScalarParamWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@

#include <executorch/backends/qualcomm/aot/wrappers/ParamWrapper.h>
#include <executorch/runtime/core/error.h>
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
template <typename T>
class ScalarParamWrapper final : public ParamWrapper {
Expand All @@ -25,7 +25,7 @@ class ScalarParamWrapper final : public ParamWrapper {

// Populate appropriate field in Qnn scalarParam depending on the datatype
// of the scalar
Error PopulateQnnParam() override {
executorch::runtime::Error PopulateQnnParam() override {
qnn_param_.scalarParam.dataType = data_type_;
switch (data_type_) {
case QNN_DATATYPE_BOOL_8:
Expand Down Expand Up @@ -57,9 +57,9 @@ class ScalarParamWrapper final : public ParamWrapper {
"ScalarParamWrapper failed to assign scalarParam value - "
"invalid datatype %d",
data_type_);
return Error::Internal;
return executorch::runtime::Error::Internal;
}
return Error::Ok;
return executorch::runtime::Error::Ok;
}

const T& GetData() const {
Expand All @@ -71,5 +71,5 @@ class ScalarParamWrapper final : public ParamWrapper {
T data_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
16 changes: 8 additions & 8 deletions backends/qualcomm/aot/wrappers/TensorParamWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@

#include <memory>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {
class TensorParamWrapper final : public ParamWrapper {
public:
Expand All @@ -24,12 +24,12 @@ class TensorParamWrapper final : public ParamWrapper {
: ParamWrapper(QNN_PARAMTYPE_TENSOR, std::move(name)),
static_tensor_wrapper_(std::move(static_tensor)) {}
// Populate Qnn tensorParam with tensor wrapper
Error PopulateQnnParam() override {
// Error out if underlying tensor is not static:
executorch::runtime::Error PopulateQnnParam() override {
// executorch::runtime::Error out if underlying tensor is not static:
if (!static_tensor_wrapper_->IsTensorStatic())
return Error::Internal;
return executorch::runtime::Error::Internal;
qnn_param_.tensorParam = static_tensor_wrapper_->CloneTensorStruct();
return Error::Ok;
return executorch::runtime::Error::Ok;
}

// Accessor functions:
Expand All @@ -45,5 +45,5 @@ class TensorParamWrapper final : public ParamWrapper {
std::shared_ptr<TensorWrapper> static_tensor_wrapper_;
};
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
11 changes: 7 additions & 4 deletions backends/qualcomm/aot/wrappers/TensorWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,12 @@
#include <cstring>
#include <limits>
#include <numeric>
namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace qnn {

using executorch::runtime::Error;

std::uint32_t GetDataTypeSize(Qnn_DataType_t data_type) {
std::uint32_t size = 0;

Expand Down Expand Up @@ -214,5 +217,5 @@ std::shared_ptr<TensorWrapper> CreateTensorWrapper(const Qnn_Tensor_t& tensor) {
QNN_VER_PTR(tensor)->clientBuf.data);
}
} // namespace qnn
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
Loading
Loading