|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +# Example CMakeLists.txt for registering custom ops into Executorch. In this |
| 8 | +# example we have custom ops `my_ops::mul3.out` implemented in C++ in |
| 9 | +# `examples/custom_ops/custom_ops_1.cpp`. We also have it registered into EXIR |
| 10 | +# in `examples/custom_ops/custom_ops_1.py`. This CMakeLists.txt runs a script to |
| 11 | +# generate wrapper code based on the operator-kernel binding defined in |
| 12 | +# `examples/custom_ops/custom_ops.yaml`. Then creates a library that contains |
| 13 | +# both binding wrapper and the implementation source file. This library can be |
| 14 | +# linked into Executorch binary (`executor_runner` in this example) and it is |
| 15 | +# ready to run models containing that custom op. |
| 16 | +cmake_minimum_required(VERSION 3.13) |
| 17 | + |
| 18 | +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) |
| 19 | +if(NOT CMAKE_CXX_STANDARD) |
| 20 | + set(CMAKE_CXX_STANDARD 17) |
| 21 | +endif() |
| 22 | + |
| 23 | +if(NOT PYTHON_EXECUTABLE) |
| 24 | + set(PYTHON_EXECUTABLE python3) |
| 25 | +endif() |
| 26 | +# Source root directory for executorch. |
| 27 | +if(NOT EXECUTORCH_ROOT) |
| 28 | + set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..) |
| 29 | +endif() |
| 30 | +# Source root directory for pytorch. |
| 31 | +if(NOT TORCH_ROOT) |
| 32 | + set(TORCH_ROOT ${EXECUTORCH_ROOT}/third-party/pytorch) |
| 33 | +endif() |
| 34 | +# Command to generate selected_operators.yaml from custom_ops.yaml. |
| 35 | +set(_oplist_yaml ${CMAKE_CURRENT_BINARY_DIR}/selected_operators.yaml) |
| 36 | +file(GLOB_RECURSE _codegen_tools_srcs "${EXECUTORCH_ROOT}/codegen/tools/*.py") |
| 37 | +file(GLOB_RECURSE _codegen_templates "${EXECUTORCH_ROOT}/codegen/templates/*") |
| 38 | +file(GLOB_RECURSE _torchgen_srcs "${TORCH_ROOT}/torchgen/*.py") |
| 39 | + |
| 40 | +set(_gen_oplist_command |
| 41 | + "${PYTHON_EXECUTABLE}" -m codegen.tools.gen_oplist |
| 42 | + --output_path=${_oplist_yaml} |
| 43 | + --ops_schema_yaml_path=${CMAKE_CURRENT_LIST_DIR}/custom_ops.yaml) |
| 44 | + |
| 45 | +# Command to codegen C++ wrappers to register custom ops to both PyTorch and |
| 46 | +# Executorch runtime. |
| 47 | +set(_gen_command |
| 48 | + "${PYTHON_EXECUTABLE}" -m torchgen.gen_executorch |
| 49 | + --source-path=${EXECUTORCH_ROOT}/codegen |
| 50 | + --install-dir=${CMAKE_CURRENT_BINARY_DIR} |
| 51 | + --tags-path=${TORCH_ROOT}/aten/src/ATen/native/tags.yaml |
| 52 | + --aten-yaml-path=${TORCH_ROOT}/aten/src/ATen/native/native_functions.yaml |
| 53 | + --op-selection-yaml-path=${_oplist_yaml} |
| 54 | + --custom-ops-yaml-path=${CMAKE_CURRENT_LIST_DIR}/custom_ops.yaml) |
| 55 | + |
| 56 | +set(_gen_command_sources |
| 57 | + ${CMAKE_CURRENT_BINARY_DIR}/RegisterCodegenUnboxedKernelsEverything.cpp |
| 58 | + ${CMAKE_CURRENT_BINARY_DIR}/RegisterCPUCustomOps.cpp |
| 59 | + ${CMAKE_CURRENT_BINARY_DIR}/RegisterSchema.cpp |
| 60 | + ${CMAKE_CURRENT_BINARY_DIR}/Functions.h |
| 61 | + ${CMAKE_CURRENT_BINARY_DIR}/NativeFunctions.h |
| 62 | + ${CMAKE_CURRENT_BINARY_DIR}/CustomOpsNativeFunctions.h) |
| 63 | +message(STATUS "Generating selected operator list ${_gen_oplist_command}") |
| 64 | + |
| 65 | +add_custom_command( |
| 66 | + COMMENT "Generating selected_operators.yaml for custom ops" |
| 67 | + OUTPUT ${_oplist_yaml} |
| 68 | + COMMAND ${_gen_oplist_command} |
| 69 | + DEPENDS ${CMAKE_CURRENT_LIST_DIR}/custom_ops.yaml ${_codegen_tools_srcs} |
| 70 | + WORKING_DIRECTORY ${EXECUTORCH_ROOT}) |
| 71 | + |
| 72 | +add_custom_command( |
| 73 | + COMMENT "Generating code for custom operator registration" |
| 74 | + OUTPUT ${_gen_command_sources} |
| 75 | + COMMAND ${_gen_command} |
| 76 | + DEPENDS ${_oplist_yaml} ${CMAKE_CURRENT_LIST_DIR}/custom_ops.yaml |
| 77 | + ${_codegen_templates} ${_torchgen_srcs} |
| 78 | + WORKING_DIRECTORY ${EXECUTORCH_ROOT}) |
| 79 | +# Prepare for C++ libraries. |
| 80 | + |
| 81 | +# 1. TODO: C++ library to register custom ops into PyTorch. |
| 82 | +# ~~~ |
| 83 | +# add_library(custom_ops_aot_lib SHARED |
| 84 | +# ${OUTPUT_DIRECTORY}/RegisterCPUCustomOps.cpp |
| 85 | +# ${OUTPUT_DIRECTORY}/RegisterSchema.cpp |
| 86 | +# ${OUTPUT_DIRECTORY}/CustomOpsNativeFunctions.h) |
| 87 | +# ~~~ |
| 88 | + |
| 89 | +# Find `Torch`. |
| 90 | +# ~~~ |
| 91 | +# find_package(Torch REQUIRED) |
| 92 | +# target_link_libraries(custom_ops_aot_lib PUBLIC Torch) |
| 93 | +# ~~~ |
| 94 | + |
| 95 | +# 1. C++ library to register custom ops into Executorch runtime. |
| 96 | + |
| 97 | +add_library(custom_ops_lib) |
| 98 | +target_sources( |
| 99 | + custom_ops_lib |
| 100 | + PRIVATE |
| 101 | + ${CMAKE_CURRENT_BINARY_DIR}/RegisterCodegenUnboxedKernelsEverything.cpp |
| 102 | + ${CMAKE_CURRENT_BINARY_DIR}/Functions.h |
| 103 | + ${CMAKE_CURRENT_BINARY_DIR}/NativeFunctions.h |
| 104 | + ${CMAKE_CURRENT_BINARY_DIR}/CustomOpsNativeFunctions.h |
| 105 | + ${CMAKE_CURRENT_LIST_DIR}/custom_ops_1.cpp) |
| 106 | + |
| 107 | +target_link_libraries(custom_ops_lib PRIVATE executorch) |
| 108 | + |
| 109 | +# Ensure that the load-time constructor functions run. By default, the linker |
| 110 | +# would remove them since there are no other references to them. |
| 111 | +if((CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") |
| 112 | + OR (APPLE AND CMAKE_CXX_COMPILER_ID MATCHES "Clang")) |
| 113 | + target_link_options(custom_ops_lib INTERFACE |
| 114 | + "-Wl,-force_load,$<TARGET_FILE:custom_ops_lib>") |
| 115 | +elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") |
| 116 | + target_link_options( |
| 117 | + custom_ops_lib INTERFACE |
| 118 | + "-Wl,--whole-archive,$<TARGET_FILE:custom_ops_lib>,--no-whole-archive") |
| 119 | +endif() |
0 commit comments