Skip to content

Commit 938d8f2

Browse files
committed
feat: Add DrawGraph tool for graph visualization
- Implemented the DrawGraph tool to visualize graphs. - Added test cases to validate the correctness of the generated graphs.
1 parent 8861b9a commit 938d8f2

File tree

5 files changed

+781
-2
lines changed

5 files changed

+781
-2
lines changed

backends/qualcomm/aot/python/PyQnnWrapperAdaptor.cpp

Lines changed: 236 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,37 @@ std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
6666
return quantize_param_wrapper;
6767
}
6868

69+
std::string GetScalarValue(const Qnn_Scalar_t& scalar) {
70+
switch (scalar.dataType) {
71+
case QNN_DATATYPE_FLOAT_32:
72+
return std::to_string(scalar.floatValue);
73+
case QNN_DATATYPE_FLOAT_64:
74+
return std::to_string(scalar.doubleValue);
75+
case QNN_DATATYPE_UINT_64:
76+
return std::to_string(scalar.uint64Value);
77+
case QNN_DATATYPE_INT_64:
78+
return std::to_string(scalar.int64Value);
79+
case QNN_DATATYPE_UINT_32:
80+
return std::to_string(scalar.uint32Value);
81+
case QNN_DATATYPE_INT_32:
82+
return std::to_string(scalar.int32Value);
83+
case QNN_DATATYPE_UINT_16:
84+
return std::to_string(scalar.uint16Value);
85+
case QNN_DATATYPE_INT_16:
86+
return std::to_string(scalar.int16Value);
87+
case QNN_DATATYPE_UINT_8:
88+
return std::to_string(scalar.uint8Value);
89+
case QNN_DATATYPE_INT_8:
90+
return std::to_string(scalar.int8Value);
91+
case QNN_DATATYPE_BOOL_8:
92+
return std::to_string(static_cast<int>(scalar.bool8Value));
93+
case QNN_DATATYPE_STRING:
94+
return std::string(scalar.stringValue);
95+
default:
96+
return "QNN_DATATYPE_UNDEFINED";
97+
}
98+
}
99+
69100
std::shared_ptr<TensorWrapper> CreateTensorWrapper(
70101
const std::string& tensor_name,
71102
Qnn_TensorType_t tensor_type,
@@ -176,11 +207,60 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
176207
Qnn_QuantizationEncoding_t::
177208
QNN_QUANTIZATION_ENCODING_BW_AXIS_SCALE_OFFSET)
178209
.export_values();
210+
179211
py::class_<OpWrapper, std::shared_ptr<OpWrapper>>(m, "OpWrapper")
180212
.def(py::init<
181213
const std::string&,
182214
const std::string&,
183-
const std::string&>());
215+
const std::string&>())
216+
.def(
217+
"GetInputTensors",
218+
&OpWrapper::GetInputTensors,
219+
"A function which gets input tensors")
220+
.def(
221+
"GetOutputTensors",
222+
&OpWrapper::GetOutputTensors,
223+
"A function which gets output tensors")
224+
.def("GetOpType", &OpWrapper::GetOpType, "A function which gets op type")
225+
.def("GetName", &OpWrapper::GetName, "A function which gets name")
226+
.def(
227+
"GetPackageName",
228+
&OpWrapper::GetPackageName,
229+
"A function which gets package name")
230+
.def(
231+
"GetParams", &OpWrapper::GetRawParams, "A function which gets params")
232+
// lambda function
233+
// python: op_wrapper.GetOpConfig()
234+
.def(
235+
"GetOpConfig",
236+
[](OpWrapper& self) {
237+
auto op_config = self.GetOpConfig();
238+
py::dict result;
239+
py::list params_list;
240+
py::list input_tensors_list;
241+
py::list output_tensors_list;
242+
result["version"] = op_config.version;
243+
result["name"] = op_config.v1.name;
244+
result["packageName"] = op_config.v1.packageName;
245+
result["typeName"] = op_config.v1.typeName;
246+
result["numOfParams"] = op_config.v1.numOfParams;
247+
for (size_t i = 0; i < op_config.v1.numOfParams; ++i) {
248+
params_list.append(op_config.v1.params[i]);
249+
}
250+
result["params"] = params_list;
251+
result["numOfInputs"] = op_config.v1.numOfInputs;
252+
for (size_t i = 0; i < op_config.v1.numOfInputs; ++i) {
253+
input_tensors_list.append(op_config.v1.inputTensors[i]);
254+
}
255+
result["inputTensors"] = input_tensors_list;
256+
result["numOfOutputs"] = op_config.v1.numOfOutputs;
257+
for (size_t i = 0; i < op_config.v1.numOfOutputs; ++i) {
258+
output_tensors_list.append(op_config.v1.outputTensors[i]);
259+
}
260+
result["outputTensors"] = output_tensors_list;
261+
return result;
262+
},
263+
"Get operator configuration");
184264

185265
py::class_<TensorWrapper, std::shared_ptr<TensorWrapper>>(m, "TensorWrapper")
186266
.def(py::init(py::overload_cast<
@@ -197,7 +277,9 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
197277
py::class_<QuantizeParamsWrapper>(m, "QuantizeParamsWrapper");
198278

199279
py::class_<Qnn_ScaleOffset_t>(m, "Qnn_ScaleOffset_t")
200-
.def(py::init<float, int32_t>());
280+
.def(py::init<float, int32_t>())
281+
.def_readonly("scale", &Qnn_ScaleOffset_t::scale)
282+
.def_readonly("offset", &Qnn_ScaleOffset_t::offset);
201283

202284
py::class_<PyQnnOpWrapper, std::shared_ptr<PyQnnOpWrapper>>(
203285
m, "PyQnnOpWrapper")
@@ -248,6 +330,158 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
248330
.def("GetDataType", &PyQnnTensorWrapper::GetDataType)
249331
.def("GetName", &PyQnnTensorWrapper::GetName)
250332
.def("GetEncodings", &PyQnnTensorWrapper::GetEncodings);
333+
334+
py::class_<Qnn_OpConfig_t>(m, "Qnn_OpConfig")
335+
.def_readonly("version", &Qnn_OpConfig_t::version)
336+
// getter
337+
// python: op_wrapper.GetOpConfig().v1
338+
.def_property_readonly(
339+
"v1", [](const Qnn_OpConfig_t& config) -> const Qnn_OpConfigV1_t& {
340+
return config.v1;
341+
});
342+
343+
py::enum_<Qnn_OpConfigVersion_t>(m, "Qnn_OpConfigVersion")
344+
.value("QNN_OPCONFIG_VERSION_1", QNN_OPCONFIG_VERSION_1)
345+
.value("QNN_OPCONFIG_VERSION_UNDEFINED", QNN_OPCONFIG_VERSION_UNDEFINED)
346+
.export_values();
347+
348+
py::class_<Qnn_OpConfigV1_t>(m, "Qnn_OpConfigV1")
349+
.def_readonly("name", &Qnn_OpConfigV1_t::name)
350+
.def_readonly("packageName", &Qnn_OpConfigV1_t::packageName)
351+
.def_readonly("typeName", &Qnn_OpConfigV1_t::typeName)
352+
.def_readonly("numOfParams", &Qnn_OpConfigV1_t::numOfParams)
353+
.def_readonly("params", &Qnn_OpConfigV1_t::params)
354+
.def_readonly("numOfInputs", &Qnn_OpConfigV1_t::numOfInputs)
355+
.def_readonly("inputTensors", &Qnn_OpConfigV1_t::inputTensors)
356+
.def_readonly("numOfOutputs", &Qnn_OpConfigV1_t::numOfOutputs)
357+
.def_readonly("outputTensors", &Qnn_OpConfigV1_t::outputTensors);
358+
359+
py::class_<Qnn_Param_t>(m, "Qnn_Param")
360+
.def_readonly("paramType", &Qnn_Param_t::paramType)
361+
.def_readonly("name", &Qnn_Param_t::name)
362+
.def_property_readonly(
363+
"scalarParam",
364+
[](const Qnn_Param_t& param) -> const Qnn_Scalar_t& {
365+
if (param.paramType == Qnn_ParamType_t::QNN_PARAMTYPE_SCALAR) {
366+
return param.scalarParam;
367+
}
368+
throw std::runtime_error("ParamType is not scalar.");
369+
})
370+
.def_property_readonly(
371+
"tensorParam", [](const Qnn_Param_t& param) -> const Qnn_Tensor_t& {
372+
if (param.paramType == Qnn_ParamType_t::QNN_PARAMTYPE_TENSOR) {
373+
return param.tensorParam;
374+
}
375+
throw std::runtime_error("ParamType is not tensor.");
376+
});
377+
378+
py::enum_<Qnn_ParamType_t>(m, "Qnn_ParamType_t")
379+
.value("QNN_PARAMTYPE_SCALAR", Qnn_ParamType_t::QNN_PARAMTYPE_SCALAR)
380+
.value("QNN_PARAMTYPE_TENSOR", Qnn_ParamType_t::QNN_PARAMTYPE_TENSOR)
381+
.value(
382+
"QNN_PARAMTYPE_UNDEFINED", Qnn_ParamType_t::QNN_PARAMTYPE_UNDEFINED)
383+
.export_values();
384+
385+
py::class_<Qnn_Scalar_t>(m, "Qnn_Scalar_t")
386+
.def_readonly("dataType", &Qnn_Scalar_t::dataType)
387+
.def("value", &GetScalarValue, "Get the value of the scalar as a string");
388+
389+
py::class_<Qnn_Tensor_t>(m, "Qnn_Tensor_t")
390+
.def_readonly("version", &Qnn_Tensor_t::version)
391+
.def_property_readonly(
392+
"v1",
393+
[](Qnn_Tensor_t& t) -> Qnn_TensorV1_t& {
394+
if (t.version == QNN_TENSOR_VERSION_1) {
395+
return t.v1;
396+
}
397+
throw std::runtime_error("Tensor version is not V1.");
398+
})
399+
.def_property_readonly("v2", [](Qnn_Tensor_t& t) -> Qnn_TensorV2_t& {
400+
if (t.version == QNN_TENSOR_VERSION_2) {
401+
return t.v2;
402+
}
403+
throw std::runtime_error("Tensor version is not V2.");
404+
});
405+
406+
py::enum_<Qnn_TensorVersion_t>(m, "Qnn_TensorVersion_t")
407+
.value("QNN_TENSOR_VERSION_1", Qnn_TensorVersion_t::QNN_TENSOR_VERSION_1)
408+
.value("QNN_TENSOR_VERSION_2", Qnn_TensorVersion_t::QNN_TENSOR_VERSION_2)
409+
.value(
410+
"QNN_TENSOR_VERSION_UNDEFINED",
411+
Qnn_TensorVersion_t::QNN_TENSOR_VERSION_UNDEFINED)
412+
.export_values();
413+
414+
py::class_<Qnn_TensorV1_t>(m, "QnnTensorV1")
415+
.def_readonly("id", &Qnn_TensorV1_t::id)
416+
.def_readonly("name", &Qnn_TensorV1_t::name)
417+
.def_readonly("type", &Qnn_TensorV1_t::type)
418+
.def_readonly("dataFormat", &Qnn_TensorV1_t::dataFormat)
419+
.def_readonly("dataType", &Qnn_TensorV1_t::dataType)
420+
.def_readonly("quantizeParams", &Qnn_TensorV1_t::quantizeParams)
421+
.def_readonly("rank", &Qnn_TensorV1_t::rank)
422+
// change dimensions pointer to vector(begin to rank)
423+
.def_property_readonly(
424+
"dimensions",
425+
[](const Qnn_TensorV1_t& t) {
426+
return std::vector<uint32_t>(t.dimensions, t.dimensions + t.rank);
427+
})
428+
.def_readonly("memType", &Qnn_TensorV1_t::memType);
429+
430+
py::enum_<Qnn_TensorMemType_t>(m, "Qnn_TensorMemType_t")
431+
.value(
432+
"QNN_TENSORMEMTYPE_RAW", Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_RAW)
433+
.value(
434+
"QNN_TENSORMEMTYPE_MEMHANDLE",
435+
Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_MEMHANDLE)
436+
.value(
437+
"QNN_TENSORMEMTYPE_UNDEFINED",
438+
Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_UNDEFINED)
439+
.export_values();
440+
441+
py::class_<Qnn_QuantizeParams_t>(m, "QnnQuantizeParams")
442+
.def_readonly(
443+
"encodingDefinition", &Qnn_QuantizeParams_t::encodingDefinition)
444+
.def_readonly(
445+
"quantizationEncoding", &Qnn_QuantizeParams_t::quantizationEncoding)
446+
.def_property_readonly(
447+
"scaleOffsetEncoding",
448+
[](const Qnn_QuantizeParams_t& qp) {
449+
if (qp.quantizationEncoding ==
450+
QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) {
451+
return qp.scaleOffsetEncoding;
452+
}
453+
throw std::runtime_error(
454+
"Invalid quantization encoding type for scaleOffsetEncoding.");
455+
})
456+
.def_property_readonly(
457+
"axisScaleOffsetEncoding", [](const Qnn_QuantizeParams_t& qp) {
458+
if (qp.quantizationEncoding ==
459+
QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) {
460+
return qp.axisScaleOffsetEncoding;
461+
}
462+
throw std::runtime_error(
463+
"Invalid quantization encoding type for axisScaleOffsetEncoding.");
464+
});
465+
466+
py::enum_<Qnn_Definition_t>(m, "QnnDefinition")
467+
.value(
468+
"QNN_DEFINITION_IMPL_GENERATED",
469+
Qnn_Definition_t::QNN_DEFINITION_IMPL_GENERATED)
470+
.value("QNN_DEFINITION_DEFINED", Qnn_Definition_t::QNN_DEFINITION_DEFINED)
471+
.value(
472+
"QNN_DEFINITION_UNDEFINED",
473+
Qnn_Definition_t::QNN_DEFINITION_UNDEFINED)
474+
.export_values();
475+
476+
py::class_<Qnn_AxisScaleOffset_t>(m, "QnnAxisScaleOffset")
477+
.def_readonly("axis", &Qnn_AxisScaleOffset_t::axis)
478+
.def_readonly("numScaleOffsets", &Qnn_AxisScaleOffset_t::numScaleOffsets)
479+
.def_property_readonly(
480+
"scaleOffset", [](const Qnn_AxisScaleOffset_t& aso) {
481+
return std::vector<Qnn_ScaleOffset_t>(
482+
aso.scaleOffset, aso.scaleOffset + aso.numScaleOffsets);
483+
});
484+
// op_wrapper.GetParams() get std::vector<ParamWrapper*>
251485
}
252486
} // namespace qnn
253487
} // namespace backends

backends/qualcomm/aot/wrappers/OpWrapper.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,19 @@ class OpWrapper final {
102102
const std::string GetOpType() {
103103
return op_type_;
104104
}
105+
const std::string GetName() {
106+
return name_;
107+
}
108+
const std::string GetPackageName() {
109+
return package_name_;
110+
}
111+
std::vector<ParamWrapper*> GetRawParams() const {
112+
std::vector<ParamWrapper*> raw_params;
113+
for (const auto& param : params_) {
114+
raw_params.push_back(param.get());
115+
}
116+
return raw_params;
117+
}
105118
Qnn_OpConfig_t GetOpConfig();
106119

107120
private:

0 commit comments

Comments
 (0)