Skip to content

Commit eb5c334

Browse files
committed
feat: Add DrawGraph tool for graph visualization
- Implemented the DrawGraph tool to visualize graphs. - Added test cases to validate the correctness of the generated graphs.
1 parent ac8bf78 commit eb5c334

File tree

5 files changed

+729
-2
lines changed

5 files changed

+729
-2
lines changed

backends/qualcomm/aot/python/PyQnnWrapperAdaptor.cpp

Lines changed: 238 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,37 @@ std::unique_ptr<QuantizeParamsWrapper> CreateQuantizationParamWrapper(
6666
return quantize_param_wrapper;
6767
}
6868

69+
std::string GetScalarValue(const Qnn_Scalar_t& scalar) {
70+
switch (scalar.dataType) {
71+
case QNN_DATATYPE_FLOAT_32:
72+
return std::to_string(scalar.floatValue);
73+
case QNN_DATATYPE_FLOAT_64:
74+
return std::to_string(scalar.doubleValue);
75+
case QNN_DATATYPE_UINT_64:
76+
return std::to_string(scalar.uint64Value);
77+
case QNN_DATATYPE_INT_64:
78+
return std::to_string(scalar.int64Value);
79+
case QNN_DATATYPE_UINT_32:
80+
return std::to_string(scalar.uint32Value);
81+
case QNN_DATATYPE_INT_32:
82+
return std::to_string(scalar.int32Value);
83+
case QNN_DATATYPE_UINT_16:
84+
return std::to_string(scalar.uint16Value);
85+
case QNN_DATATYPE_INT_16:
86+
return std::to_string(scalar.int16Value);
87+
case QNN_DATATYPE_UINT_8:
88+
return std::to_string(scalar.uint8Value);
89+
case QNN_DATATYPE_INT_8:
90+
return std::to_string(scalar.int8Value);
91+
case QNN_DATATYPE_BOOL_8:
92+
return std::to_string(static_cast<int>(scalar.bool8Value));
93+
case QNN_DATATYPE_STRING:
94+
return std::string(scalar.stringValue);
95+
default:
96+
return "QNN_DATATYPE_UNDEFINED";
97+
}
98+
}
99+
69100
std::shared_ptr<TensorWrapper> CreateTensorWrapper(
70101
const std::string& tensor_name,
71102
Qnn_TensorType_t tensor_type,
@@ -176,11 +207,68 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
176207
Qnn_QuantizationEncoding_t::
177208
QNN_QUANTIZATION_ENCODING_BW_AXIS_SCALE_OFFSET)
178209
.export_values();
210+
179211
py::class_<OpWrapper, std::shared_ptr<OpWrapper>>(m, "OpWrapper")
180212
.def(py::init<
181213
const std::string&,
182214
const std::string&,
183-
const std::string&>());
215+
const std::string&>())
216+
.def(
217+
"GetInputTensors",
218+
&OpWrapper::GetInputTensors,
219+
"A function which get input tensors")
220+
.def(
221+
"GetOutputTensors",
222+
&OpWrapper::GetOutputTensors,
223+
"A function which get output tensors")
224+
.def(
225+
"GetOpType",
226+
&OpWrapper::GetOpType,
227+
"A function which get op type")
228+
.def(
229+
"GetName",
230+
&OpWrapper::GetName,
231+
"A function which get name")
232+
.def(
233+
"GetPackageName",
234+
&OpWrapper::GetPackageName,
235+
"A function which get package name")
236+
.def(
237+
"GetParams",
238+
&OpWrapper::GetRawParams,
239+
"A function which get params")
240+
// lambda function
241+
// python: op_wrapper.GetOpConfig()
242+
.def(
243+
"GetOpConfig",
244+
[](OpWrapper& self) {
245+
auto op_config = self.GetOpConfig();
246+
py::dict result;
247+
py::list params_list;
248+
py::list input_tensors_list;
249+
py::list output_tensors_list;
250+
result["version"] = op_config.version;
251+
result["name"] = op_config.v1.name;
252+
result["packageName"] = op_config.v1.packageName;
253+
result["typeName"] = op_config.v1.typeName;
254+
result["numOfParams"] = op_config.v1.numOfParams;
255+
for (size_t i = 0; i < op_config.v1.numOfParams; ++i) {
256+
params_list.append(op_config.v1.params[i]);
257+
}
258+
result["params"] = params_list;
259+
result["numOfInputs"] = op_config.v1.numOfInputs;
260+
for (size_t i = 0; i < op_config.v1.numOfInputs; ++i) {
261+
input_tensors_list.append(op_config.v1.inputTensors[i]);
262+
}
263+
result["inputTensors"] = input_tensors_list;
264+
result["numOfOutputs"] = op_config.v1.numOfOutputs;
265+
for (size_t i = 0; i < op_config.v1.numOfOutputs; ++i) {
266+
output_tensors_list.append(op_config.v1.outputTensors[i]);
267+
}
268+
result["outputTensors"] = output_tensors_list;
269+
return result;
270+
},
271+
"Get operator configuration");
184272

185273
py::class_<TensorWrapper, std::shared_ptr<TensorWrapper>>(m, "TensorWrapper")
186274
.def(py::init(py::overload_cast<
@@ -197,7 +285,9 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
197285
py::class_<QuantizeParamsWrapper>(m, "QuantizeParamsWrapper");
198286

199287
py::class_<Qnn_ScaleOffset_t>(m, "Qnn_ScaleOffset_t")
200-
.def(py::init<float, int32_t>());
288+
.def(py::init<float, int32_t>())
289+
.def_readonly("scale", &Qnn_ScaleOffset_t::scale)
290+
.def_readonly("offset", &Qnn_ScaleOffset_t::offset);
201291

202292
py::class_<PyQnnOpWrapper, std::shared_ptr<PyQnnOpWrapper>>(
203293
m, "PyQnnOpWrapper")
@@ -248,6 +338,152 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
248338
.def("GetDataType", &PyQnnTensorWrapper::GetDataType)
249339
.def("GetName", &PyQnnTensorWrapper::GetName)
250340
.def("GetEncodings", &PyQnnTensorWrapper::GetEncodings);
341+
342+
py::class_<Qnn_OpConfig_t>(m, "Qnn_OpConfig")
343+
.def_readonly("version", &Qnn_OpConfig_t::version)
344+
// getter
345+
// python: op_wrapper.GetOpConfig().v1
346+
.def_property_readonly(
347+
"v1",
348+
[](const Qnn_OpConfig_t& config) -> const Qnn_OpConfigV1_t& {
349+
return config.v1;
350+
});
351+
352+
py::enum_<Qnn_OpConfigVersion_t>(m, "Qnn_OpConfigVersion")
353+
.value("QNN_OPCONFIG_VERSION_1", QNN_OPCONFIG_VERSION_1)
354+
.value("QNN_OPCONFIG_VERSION_UNDEFINED", QNN_OPCONFIG_VERSION_UNDEFINED)
355+
.export_values();
356+
357+
py::class_<Qnn_OpConfigV1_t>(m, "Qnn_OpConfigV1")
358+
.def_readonly("name", &Qnn_OpConfigV1_t::name)
359+
.def_readonly("packageName", &Qnn_OpConfigV1_t::packageName)
360+
.def_readonly("typeName", &Qnn_OpConfigV1_t::typeName)
361+
.def_readonly("numOfParams", &Qnn_OpConfigV1_t::numOfParams)
362+
.def_readonly("params", &Qnn_OpConfigV1_t::params)
363+
.def_readonly("numOfInputs", &Qnn_OpConfigV1_t::numOfInputs)
364+
.def_readonly("inputTensors", &Qnn_OpConfigV1_t::inputTensors)
365+
.def_readonly("numOfOutputs", &Qnn_OpConfigV1_t::numOfOutputs)
366+
.def_readonly("outputTensors", &Qnn_OpConfigV1_t::outputTensors);
367+
368+
py::class_<Qnn_Param_t>(m, "Qnn_Param")
369+
.def_readonly("paramType", &Qnn_Param_t::paramType)
370+
.def_readonly("name", &Qnn_Param_t::name)
371+
.def_property_readonly(
372+
"scalarParam",
373+
[](const Qnn_Param_t& param) -> const Qnn_Scalar_t& {
374+
if (param.paramType == Qnn_ParamType_t::QNN_PARAMTYPE_SCALAR) {
375+
return param.scalarParam;
376+
}
377+
throw std::runtime_error("ParamType is not scalar.");
378+
})
379+
.def_property_readonly(
380+
"tensorParam",
381+
[](const Qnn_Param_t& param) -> const Qnn_Tensor_t& {
382+
if (param.paramType == Qnn_ParamType_t::QNN_PARAMTYPE_TENSOR) {
383+
return param.tensorParam;
384+
}
385+
throw std::runtime_error("ParamType is not tensor.");
386+
});
387+
388+
py::enum_<Qnn_ParamType_t>(m, "Qnn_ParamType_t")
389+
.value("QNN_PARAMTYPE_SCALAR", Qnn_ParamType_t::QNN_PARAMTYPE_SCALAR)
390+
.value("QNN_PARAMTYPE_TENSOR", Qnn_ParamType_t::QNN_PARAMTYPE_TENSOR)
391+
.value("QNN_PARAMTYPE_UNDEFINED", Qnn_ParamType_t::QNN_PARAMTYPE_UNDEFINED)
392+
.export_values();
393+
394+
py::class_<Qnn_Scalar_t>(m, "Qnn_Scalar_t")
395+
.def_readonly("dataType", &Qnn_Scalar_t::dataType)
396+
.def("value", &GetScalarValue, "Get the value of the scalar as a string");
397+
398+
py::class_<Qnn_Tensor_t>(m, "Qnn_Tensor_t")
399+
.def_readonly("version", &Qnn_Tensor_t::version)
400+
.def_property_readonly("v1",
401+
[](Qnn_Tensor_t& t) -> Qnn_TensorV1_t& {
402+
if (t.version == QNN_TENSOR_VERSION_1) {
403+
return t.v1;
404+
}
405+
throw std::runtime_error("Tensor version is not V1.");
406+
})
407+
.def_property_readonly("v2",
408+
[](Qnn_Tensor_t& t) -> Qnn_TensorV2_t& {
409+
if (t.version == QNN_TENSOR_VERSION_2) {
410+
return t.v2;
411+
}
412+
throw std::runtime_error("Tensor version is not V2.");
413+
});
414+
415+
py::enum_<Qnn_TensorVersion_t>(m, "Qnn_TensorVersion_t")
416+
.value(
417+
"QNN_TENSOR_VERSION_1",
418+
Qnn_TensorVersion_t::QNN_TENSOR_VERSION_1)
419+
.value(
420+
"QNN_TENSOR_VERSION_2",
421+
Qnn_TensorVersion_t::QNN_TENSOR_VERSION_2)
422+
.value(
423+
"QNN_TENSOR_VERSION_UNDEFINED",
424+
Qnn_TensorVersion_t::QNN_TENSOR_VERSION_UNDEFINED)
425+
.export_values();
426+
427+
py::class_<Qnn_TensorV1_t>(m, "QnnTensorV1")
428+
.def_readonly("id", &Qnn_TensorV1_t::id)
429+
.def_readonly("name", &Qnn_TensorV1_t::name)
430+
.def_readonly("type", &Qnn_TensorV1_t::type)
431+
.def_readonly("dataFormat", &Qnn_TensorV1_t::dataFormat)
432+
.def_readonly("dataType", &Qnn_TensorV1_t::dataType)
433+
.def_readonly("quantizeParams", &Qnn_TensorV1_t::quantizeParams)
434+
.def_readonly("rank", &Qnn_TensorV1_t::rank)
435+
// change dimensions pointer to vector(begin to rank)
436+
.def_property_readonly(
437+
"dimensions",
438+
[](const Qnn_TensorV1_t& t) {
439+
return std::vector<uint32_t>(t.dimensions, t.dimensions + t.rank);
440+
})
441+
.def_readonly("memType", &Qnn_TensorV1_t::memType);
442+
443+
py::enum_<Qnn_TensorMemType_t>(m, "Qnn_TensorMemType_t")
444+
.value("QNN_TENSORMEMTYPE_RAW", Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_RAW)
445+
.value("QNN_TENSORMEMTYPE_MEMHANDLE", Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_MEMHANDLE)
446+
.value("QNN_TENSORMEMTYPE_UNDEFINED", Qnn_TensorMemType_t::QNN_TENSORMEMTYPE_UNDEFINED)
447+
.export_values();
448+
449+
py::class_<Qnn_QuantizeParams_t>(m, "QnnQuantizeParams")
450+
.def_readonly("encodingDefinition", &Qnn_QuantizeParams_t::encodingDefinition)
451+
.def_readonly("quantizationEncoding", &Qnn_QuantizeParams_t::quantizationEncoding)
452+
.def_property_readonly(
453+
"scaleOffsetEncoding",
454+
[](const Qnn_QuantizeParams_t& qp) {
455+
if (qp.quantizationEncoding == QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) {
456+
return qp.scaleOffsetEncoding;
457+
}
458+
throw std::runtime_error("Invalid quantization encoding type for scaleOffsetEncoding.");
459+
}
460+
)
461+
.def_property_readonly(
462+
"axisScaleOffsetEncoding",
463+
[](const Qnn_QuantizeParams_t& qp) {
464+
if (qp.quantizationEncoding == QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) {
465+
return qp.axisScaleOffsetEncoding;
466+
}
467+
throw std::runtime_error("Invalid quantization encoding type for axisScaleOffsetEncoding.");
468+
}
469+
);
470+
471+
py::enum_<Qnn_Definition_t>(m, "QnnDefinition")
472+
.value("IMPL_GENERATED", Qnn_Definition_t::QNN_DEFINITION_IMPL_GENERATED)
473+
.value("DEFINED", Qnn_Definition_t::QNN_DEFINITION_DEFINED)
474+
.value("UNDEFINED", Qnn_Definition_t::QNN_DEFINITION_UNDEFINED)
475+
.export_values();
476+
477+
py::class_<Qnn_AxisScaleOffset_t>(m, "QnnAxisScaleOffset")
478+
.def_readonly("axis", &Qnn_AxisScaleOffset_t::axis)
479+
.def_readonly("numScaleOffsets", &Qnn_AxisScaleOffset_t::numScaleOffsets)
480+
.def_property_readonly(
481+
"scaleOffset",
482+
[](const Qnn_AxisScaleOffset_t& aso) {
483+
return std::vector<Qnn_ScaleOffset_t>(aso.scaleOffset, aso.scaleOffset + aso.numScaleOffsets);
484+
}
485+
);
486+
// op_wrapper.GetParams() get std::vector<ParamWrapper*>
251487
}
252488
} // namespace qnn
253489
} // namespace backends

backends/qualcomm/aot/wrappers/OpWrapper.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,19 @@ class OpWrapper final {
102102
const std::string GetOpType() {
103103
return op_type_;
104104
}
105+
const std::string GetName() {
106+
return name_;
107+
}
108+
const std::string GetPackageName() {
109+
return package_name_;
110+
}
111+
std::vector<ParamWrapper*> GetRawParams() const {
112+
std::vector<ParamWrapper*> raw_params;
113+
for (const auto& param : params_) {
114+
raw_params.push_back(param.get());
115+
}
116+
return raw_params;
117+
}
105118
Qnn_OpConfig_t GetOpConfig();
106119

107120
private:

backends/qualcomm/tests/models.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,3 +1079,19 @@ def forward(self, x, y):
10791079
x = x.view(new_shape)
10801080
x = x.permute(0, 2, 1, 3)
10811081
return torch.matmul(x, y.transpose(-1, -2))
1082+
1083+
class draw_graph_model(torch.nn.Module):
1084+
def __init__(self):
1085+
super().__init__()
1086+
self.relu1 = torch.nn.ReLU()
1087+
self.relu2 = torch.nn.ReLU()
1088+
kernel_sz = 32
1089+
self.conv1 = torch.nn.Conv2d(kernel_sz, kernel_sz, 3, padding=1, bias=True)
1090+
self.conv2 = torch.nn.Conv2d(kernel_sz, kernel_sz, 3, padding=1, bias=True)
1091+
1092+
def forward(self, x):
1093+
x1 = self.conv1(x)
1094+
x2 = self.conv2(x)
1095+
y1 = self.relu1(x1)
1096+
y2 = self.relu1(x2)
1097+
return y1 + y2

0 commit comments

Comments
 (0)