|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <pybind11/pybind11.h> |
| 10 | +#include <pybind11/stl.h> |
| 11 | + |
| 12 | +#include <executorch/runtime/platform/assert.h> |
| 13 | +#include <executorch/schema/program_generated.h> |
| 14 | + |
| 15 | +namespace py = pybind11; |
| 16 | + |
| 17 | +namespace torch { |
| 18 | +namespace executor { |
| 19 | + |
| 20 | +namespace { |
| 21 | + |
| 22 | +// Metadata for kernel call io variables. |
| 23 | +// dtype and dim_order will exist only if corresponding variable is Tensor. |
| 24 | +struct IOMetaData { |
| 25 | + int kernel_type; |
| 26 | + int dtype; |
| 27 | + std::vector<unsigned int> dim_order; |
| 28 | + |
| 29 | + // Create tensor metadata. It records tensor's dtype and dim order. |
| 30 | + explicit IOMetaData(const executorch_flatbuffer::Tensor* t) |
| 31 | + : kernel_type( |
| 32 | + static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)), |
| 33 | + dtype(static_cast<int>(t->scalar_type())) { |
| 34 | + for (size_t i = 0; i < t->dim_order()->size(); i++) { |
| 35 | + dim_order.push_back(static_cast<unsigned int>(t->dim_order()->Get(i))); |
| 36 | + } |
| 37 | + } |
| 38 | + |
| 39 | + // Create metadata for non-tensor variable. |
| 40 | + explicit IOMetaData(executorch_flatbuffer::KernelTypes type) |
| 41 | + : kernel_type(static_cast<int>(type)) { |
| 42 | + ET_CHECK( |
| 43 | + type != executorch_flatbuffer::KernelTypes::Tensor && |
| 44 | + type != executorch_flatbuffer::KernelTypes::TensorList && |
| 45 | + type != executorch_flatbuffer::KernelTypes::OptionalTensorList); |
| 46 | + } |
| 47 | +}; |
| 48 | + |
| 49 | +struct KernelIOMetaDataComparsion { |
| 50 | + bool operator()( |
| 51 | + const std::vector<IOMetaData>& lhs, |
| 52 | + const std::vector<IOMetaData>& rhs) const { |
| 53 | + if (lhs.size() != rhs.size()) { |
| 54 | + return lhs.size() < rhs.size(); |
| 55 | + } |
| 56 | + for (size_t i = 0; i < lhs.size(); i++) { |
| 57 | + if (lhs[i].kernel_type != rhs[i].kernel_type) { |
| 58 | + return lhs[i].kernel_type < rhs[i].kernel_type; |
| 59 | + } |
| 60 | + if (lhs[i].kernel_type != |
| 61 | + static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)) { |
| 62 | + continue; |
| 63 | + } |
| 64 | + if (lhs[i].dtype != rhs[i].dtype) { |
| 65 | + return lhs[i].dtype < rhs[i].dtype; |
| 66 | + } |
| 67 | + if (lhs[i].dim_order != rhs[i].dim_order) { |
| 68 | + return lhs[i].dim_order < rhs[i].dim_order; |
| 69 | + } |
| 70 | + } |
| 71 | + return false; |
| 72 | + } |
| 73 | +}; |
| 74 | + |
| 75 | +using KernelIOMetadata = std::vector<IOMetaData>; |
| 76 | + |
| 77 | +using OpIOMetaData = std::set<KernelIOMetadata, KernelIOMetaDataComparsion>; |
| 78 | + |
| 79 | +std::vector<std::string> get_operators_from_execution_plan( |
| 80 | + const executorch_flatbuffer::ExecutionPlan& plan) { |
| 81 | + std::vector<std::string> op_names; |
| 82 | + for (const executorch_flatbuffer::Operator* op : *plan.operators()) { |
| 83 | + if (op->overload()->str().empty()) { |
| 84 | + op_names.push_back(op->name()->str()); |
| 85 | + } else { |
| 86 | + op_names.push_back(op->name()->str() + "." + op->overload()->str()); |
| 87 | + } |
| 88 | + } |
| 89 | + return op_names; |
| 90 | +} |
| 91 | + |
| 92 | +std::map<std::string, OpIOMetaData> |
| 93 | +get_kernel_tensor_metadatas_from_execution_plan( |
| 94 | + const executorch_flatbuffer::ExecutionPlan* plan) { |
| 95 | + std::map<std::string, OpIOMetaData> op_io_metadata; |
| 96 | + for (const executorch_flatbuffer::Chain* chain : *plan->chains()) { |
| 97 | + for (const executorch_flatbuffer::Instruction* inst : |
| 98 | + *chain->instructions()) { |
| 99 | + if (inst->instr_args_type() == |
| 100 | + executorch_flatbuffer::InstructionArguments::KernelCall) { |
| 101 | + const executorch_flatbuffer::KernelCall* kernel_call = |
| 102 | + inst->instr_args_as_KernelCall(); |
| 103 | + const executorch_flatbuffer::Operator* op = |
| 104 | + plan->operators()->Get(kernel_call->op_index()); |
| 105 | + std::string op_overload_name = op->name()->str(); |
| 106 | + if (op->overload()->size()) { |
| 107 | + op_overload_name += "." + op->overload()->str(); |
| 108 | + } |
| 109 | + |
| 110 | + // create an empty entry if current kernel is not in the map. |
| 111 | + if (op_io_metadata.count(op_overload_name) == 0) { |
| 112 | + op_io_metadata.insert( |
| 113 | + std::make_pair(op_overload_name, OpIOMetaData())); |
| 114 | + } |
| 115 | + |
| 116 | + // go through IOs of this operator and collect tensor metadatas. |
| 117 | + KernelIOMetadata kernel_io_metadata; |
| 118 | + for (int arg_id : *kernel_call->args()) { |
| 119 | + const executorch_flatbuffer::EValue* arg = |
| 120 | + plan->values()->Get(arg_id); |
| 121 | + if (arg->val_type() == executorch_flatbuffer::KernelTypes::Tensor) { |
| 122 | + kernel_io_metadata.push_back(IOMetaData(arg->val_as_Tensor())); |
| 123 | + } else if ( |
| 124 | + arg->val_type() == |
| 125 | + executorch_flatbuffer::KernelTypes::TensorList) { |
| 126 | + if (arg->val_as_TensorList()->items()->size() == 0) { |
| 127 | + // treat empty tensor list as null type since we can not get |
| 128 | + // metadata from it. |
| 129 | + kernel_io_metadata.push_back( |
| 130 | + IOMetaData(executorch_flatbuffer::KernelTypes::Null)); |
| 131 | + } else { |
| 132 | + // all eles in TensorList are tensor and share same tensor |
| 133 | + // metadata. use the metadata of first element as the metadata for |
| 134 | + // whole list. |
| 135 | + const executorch_flatbuffer::Tensor* tensor_arg = |
| 136 | + plan->values() |
| 137 | + ->Get(arg->val_as_TensorList()->items()->Get(0)) |
| 138 | + ->val_as_Tensor(); |
| 139 | + kernel_io_metadata.push_back(IOMetaData(tensor_arg)); |
| 140 | + } |
| 141 | + } else if ( |
| 142 | + arg->val_type() == |
| 143 | + executorch_flatbuffer::KernelTypes::OptionalTensorList) { |
| 144 | + // all eles in OptionalTensorList are either tensor or null, and all |
| 145 | + // tensors share same metadata. Use the metadata of first tensor |
| 146 | + // element as the metadata for whole list. If no tensor exists (e.g. |
| 147 | + // each element is None), treat the whole list as a single null |
| 148 | + // element. |
| 149 | + const executorch_flatbuffer::OptionalTensorList* opt_tensor_list = |
| 150 | + arg->val_as_OptionalTensorList(); |
| 151 | + |
| 152 | + // Find one non-null tensor |
| 153 | + bool found_tensor_element = false; |
| 154 | + for (size_t i = 0; i < opt_tensor_list->items()->size(); i++) { |
| 155 | + // We now adopt both index == -1 and actually serialize a null |
| 156 | + // type EValue to represent a null data. |
| 157 | + if (opt_tensor_list->items()->Get(i) != -1 && |
| 158 | + plan->values() |
| 159 | + ->Get(opt_tensor_list->items()->Get(i)) |
| 160 | + ->val_type() == |
| 161 | + executorch_flatbuffer::KernelTypes::Tensor) { |
| 162 | + const executorch_flatbuffer::Tensor* tensor_arg = |
| 163 | + plan->values() |
| 164 | + ->Get(opt_tensor_list->items()->Get(i)) |
| 165 | + ->val_as_Tensor(); |
| 166 | + kernel_io_metadata.push_back(IOMetaData(tensor_arg)); |
| 167 | + found_tensor_element = true; |
| 168 | + break; |
| 169 | + } |
| 170 | + } |
| 171 | + if (!found_tensor_element) { |
| 172 | + kernel_io_metadata.push_back( |
| 173 | + IOMetaData(executorch_flatbuffer::KernelTypes::Null)); |
| 174 | + } |
| 175 | + } else { |
| 176 | + kernel_io_metadata.push_back(IOMetaData(arg->val_type())); |
| 177 | + } |
| 178 | + } |
| 179 | + op_io_metadata[op_overload_name].insert(kernel_io_metadata); |
| 180 | + } |
| 181 | + } |
| 182 | + } |
| 183 | + return op_io_metadata; |
| 184 | +} |
| 185 | +} // namespace |
| 186 | + |
| 187 | +const executorch_flatbuffer::Program* _get_program_from_buffer( |
| 188 | + const py::bytes& buffer) { |
| 189 | + return executorch_flatbuffer::GetProgram( |
| 190 | + buffer.cast<std::string_view>().data()); |
| 191 | +} |
| 192 | + |
| 193 | +py::list _get_program_operators(const executorch_flatbuffer::Program* program) { |
| 194 | + const auto& plans = *program->execution_plan(); |
| 195 | + std::vector<std::string> op_names; |
| 196 | + for (const auto& plan : plans) { |
| 197 | + auto plan_ops = get_operators_from_execution_plan(*plan); |
| 198 | + if (!plan_ops.empty()) { |
| 199 | + op_names.insert(op_names.end(), plan_ops.begin(), plan_ops.end()); |
| 200 | + } |
| 201 | + } |
| 202 | + return py::cast(op_names); |
| 203 | +} |
| 204 | + |
| 205 | +// expose IO metadatas for all operators in given program |
| 206 | +py::dict _get_io_metadata_for_program_operators( |
| 207 | + const executorch_flatbuffer::Program* program) { |
| 208 | + const auto& plans = *program->execution_plan(); |
| 209 | + std::map<std::string, OpIOMetaData> program_op_io_metadata; |
| 210 | + |
| 211 | + // aggregrate op metadata from different execution plan. |
| 212 | + for (const executorch_flatbuffer::ExecutionPlan* plan : plans) { |
| 213 | + std::map<std::string, OpIOMetaData> plan_op_io_metadata = |
| 214 | + get_kernel_tensor_metadatas_from_execution_plan(plan); |
| 215 | + |
| 216 | + for (const auto& op_io_metadata : plan_op_io_metadata) { |
| 217 | + std::string op_name = op_io_metadata.first; |
| 218 | + if (program_op_io_metadata.count(op_name) == 0) { |
| 219 | + program_op_io_metadata.insert(std::make_pair(op_name, OpIOMetaData())); |
| 220 | + } |
| 221 | + program_op_io_metadata[op_name].insert( |
| 222 | + plan_op_io_metadata[op_name].begin(), |
| 223 | + plan_op_io_metadata[op_name].end()); |
| 224 | + } |
| 225 | + } |
| 226 | + |
| 227 | + // convert program_op_io_metadata to py data structure. |
| 228 | + py::dict py_program_op_io_metadata; |
| 229 | + for (const auto& op_io_meta : program_op_io_metadata) { |
| 230 | + py::set py_op_io_meta; |
| 231 | + for (const auto& io_metas : op_io_meta.second) { |
| 232 | + py::list py_io_metadatas; |
| 233 | + for (const auto& io_metadata : io_metas) { |
| 234 | + py_io_metadatas.append(io_metadata); |
| 235 | + } |
| 236 | + py_op_io_meta.add(py::tuple(py_io_metadatas)); |
| 237 | + } |
| 238 | + py_program_op_io_metadata[op_io_meta.first.data()] = py_op_io_meta; |
| 239 | + } |
| 240 | + |
| 241 | + return py_program_op_io_metadata; |
| 242 | +} |
| 243 | + |
| 244 | +PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) { |
| 245 | + py::class_<executorch_flatbuffer::Program>(m, "_Program"); |
| 246 | + |
| 247 | + m.def( |
| 248 | + "_get_program_from_buffer", |
| 249 | + &_get_program_from_buffer, |
| 250 | + py::return_value_policy::reference); |
| 251 | + |
| 252 | + m.def( |
| 253 | + "_get_program_operators", |
| 254 | + &_get_program_operators, |
| 255 | + py::return_value_policy::copy); |
| 256 | + |
| 257 | + m.def( |
| 258 | + "_get_io_metadata_for_program_operators", |
| 259 | + &_get_io_metadata_for_program_operators, |
| 260 | + py::return_value_policy::copy); |
| 261 | + |
| 262 | + py::class_<IOMetaData>(m, "_IOMetaData") |
| 263 | + .def_readwrite("kernel_type", &IOMetaData::kernel_type) |
| 264 | + .def_readwrite("dtype", &IOMetaData::dtype) |
| 265 | + .def_readwrite("dim_order", &IOMetaData::dim_order); |
| 266 | +} |
| 267 | + |
| 268 | +} // namespace executor |
| 269 | +} // namespace torch |
0 commit comments