|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <executorch/extension/flat_tensor/serialize/serialize.h> |
| 10 | + |
| 11 | +#include <executorch/extension/flat_tensor/serialize/flat_tensor_header.h> |
| 12 | +#include <executorch/extension/flat_tensor/serialize/scalar_type_generated.h> |
| 13 | +#include <executorch/extension/flat_tensor/serialize/schema_generated.h> |
| 14 | + |
| 15 | +#include <fstream> |
| 16 | +#include <string> |
| 17 | + |
| 18 | +namespace executorch { |
| 19 | +namespace extension { |
| 20 | +namespace flat_tensor { |
| 21 | + |
| 22 | +namespace { |
| 23 | +size_t padding_required(size_t offset, size_t alignment) { |
| 24 | + // Returns the padding required to align `offset` to `alignment`. |
| 25 | + size_t remainder = offset % alignment; |
| 26 | + if (remainder != 0) { |
| 27 | + return alignment - remainder; |
| 28 | + } |
| 29 | + return 0; |
| 30 | +} |
| 31 | + |
| 32 | +size_t aligned_size(size_t input_size, size_t alignment) { |
| 33 | + // Returns input_size padded up to the next whole multiple of alignment. |
| 34 | + return input_size + padding_required(input_size, alignment); |
| 35 | +} |
| 36 | + |
| 37 | +void write_nulls(std::ostream& out, size_t num_bytes) { |
| 38 | + for (size_t i = 0; i < num_bytes; i++) { |
| 39 | + out.write("\0", 1); |
| 40 | + } |
| 41 | +} |
| 42 | +} // namespace |
| 43 | + |
| 44 | +runtime::Error save_ptd( |
| 45 | + const std::string& path, |
| 46 | + const std::map<std::string, exec_aten::Tensor>& tensor_map, |
| 47 | + const size_t tensor_alignment) { |
| 48 | + // Create File |
| 49 | + std::ofstream file; |
| 50 | + file.open(path); |
| 51 | + runtime::Error e = save_ptd(file, tensor_map, tensor_alignment); |
| 52 | + file.close(); |
| 53 | + return e; |
| 54 | +} |
| 55 | + |
| 56 | +runtime::Error save_ptd( |
| 57 | + std::ostream& out, |
| 58 | + const std::map<std::string, exec_aten::Tensor>& tensor_map, |
| 59 | + const size_t tensor_alignment) { |
| 60 | + // Assert the system is little endian. Since we are sending the data over |
| 61 | + // the wire, we need to ensure that the data is always in the same format. |
| 62 | + // for now we only support little endian. |
| 63 | + int n = 1; |
| 64 | + if (*(char*)&n != 1) { |
| 65 | + ET_LOG(Error, "Cannot save_ptd on big endian system"); |
| 66 | + return runtime::Error::NotSupported; |
| 67 | + } |
| 68 | + // Create flatbuffer |
| 69 | + flatbuffers::FlatBufferBuilder builder; |
| 70 | + |
| 71 | + std::vector<flatbuffers::Offset<::flat_tensor_flatbuffer::TensorMetadata>> |
| 72 | + tensors; |
| 73 | + std::vector<flatbuffers::Offset<::flat_tensor_flatbuffer::DataSegment>> |
| 74 | + buffers; |
| 75 | + |
| 76 | + // Write the tensors. |
| 77 | + size_t total_segment_size = 0; |
| 78 | + size_t i = tensor_map.size(); |
| 79 | + for (const auto& [name, tensor] : tensor_map) { |
| 80 | + auto name_offset = builder.CreateString(name); |
| 81 | + // Write the tensor metadata. |
| 82 | + auto tensor_metadata = ::flat_tensor_flatbuffer::CreateTensorMetadata( |
| 83 | + builder, |
| 84 | + name_offset, |
| 85 | + static_cast<executorch_flatbuffer::ScalarType>(tensor.scalar_type()), |
| 86 | + builder.CreateVector(tensor.sizes().data(), tensor.sizes().size()), |
| 87 | + builder.CreateVector( |
| 88 | + tensor.dim_order().data(), tensor.dim_order().size()), |
| 89 | + 0, // segment index |
| 90 | + total_segment_size); |
| 91 | + |
| 92 | + tensors.push_back(tensor_metadata); |
| 93 | + // Don't pad last entry. |
| 94 | + if (i != 1) { |
| 95 | + // Precalculate the size of the data blob. |
| 96 | + total_segment_size += aligned_size(tensor.nbytes(), tensor_alignment); |
| 97 | + } else { |
| 98 | + total_segment_size += tensor.nbytes(); |
| 99 | + } |
| 100 | + i--; |
| 101 | + } |
| 102 | + // Only have one segment |
| 103 | + buffers.push_back(::flat_tensor_flatbuffer::CreateDataSegment( |
| 104 | + builder, 0, total_segment_size)); |
| 105 | + |
| 106 | + auto flat_tensor = CreateFlatTensor( |
| 107 | + builder, |
| 108 | + kSchemaVersion, |
| 109 | + tensor_alignment, |
| 110 | + builder.CreateVector(tensors), |
| 111 | + builder.CreateVector(buffers)); |
| 112 | + builder.Finish(flat_tensor); // Our flatbuffer is created now. |
| 113 | + |
| 114 | + // Calculate flatbuffer padding. |
| 115 | + auto padded_flatbufer_size = |
| 116 | + aligned_size(builder.GetSize(), tensor_alignment); |
| 117 | + auto padded_header_size = |
| 118 | + aligned_size(FlatTensorHeader::kHeaderExpectedLength, tensor_alignment); |
| 119 | + |
| 120 | + // Write header |
| 121 | + out.write(FlatTensorHeader::kMagic, sizeof(FlatTensorHeader::kMagic)); |
| 122 | + out.write( |
| 123 | + reinterpret_cast<const char*>(&FlatTensorHeader::kHeaderExpectedLength), |
| 124 | + sizeof(FlatTensorHeader::kHeaderExpectedLength)); |
| 125 | + |
| 126 | + FlatTensorHeader header = { |
| 127 | + padded_header_size, // Offset to flatbuffer |
| 128 | + builder.GetSize(), // flatbuffer size |
| 129 | + padded_header_size + padded_flatbufer_size, // offset to segments |
| 130 | + total_segment_size // segment data size |
| 131 | + }; |
| 132 | + |
| 133 | + out.write( |
| 134 | + reinterpret_cast<const char*>(&header.flatbuffer_offset), |
| 135 | + sizeof(header.flatbuffer_offset)); |
| 136 | + out.write( |
| 137 | + reinterpret_cast<const char*>(&header.flatbuffer_size), |
| 138 | + sizeof(header.flatbuffer_size)); |
| 139 | + out.write( |
| 140 | + reinterpret_cast<const char*>(&header.segment_base_offset), |
| 141 | + sizeof(header.segment_base_offset)); |
| 142 | + out.write( |
| 143 | + reinterpret_cast<const char*>(&header.segment_data_size), |
| 144 | + sizeof(header.segment_data_size)); |
| 145 | + |
| 146 | + // Write header padding |
| 147 | + write_nulls( |
| 148 | + out, |
| 149 | + padding_required( |
| 150 | + FlatTensorHeader::kHeaderExpectedLength, tensor_alignment)); |
| 151 | + |
| 152 | + // Write flatbuffer |
| 153 | + out.write( |
| 154 | + reinterpret_cast<const char*>(builder.GetBufferPointer()), |
| 155 | + builder.GetSize()); |
| 156 | + |
| 157 | + // Write flatbuffer padding |
| 158 | + write_nulls(out, padding_required(builder.GetSize(), tensor_alignment)); |
| 159 | + |
| 160 | + // Write segment: buffers + tensor padding |
| 161 | + i = tensor_map.size(); |
| 162 | + for (const auto& [name, tensor] : tensor_map) { |
| 163 | + out.write( |
| 164 | + reinterpret_cast<const char*>(tensor.data_ptr()), tensor.nbytes()); |
| 165 | + // Don't pad last entry. |
| 166 | + if (i != 1) { |
| 167 | + write_nulls(out, padding_required(tensor.nbytes(), tensor_alignment)); |
| 168 | + } |
| 169 | + i--; |
| 170 | + } |
| 171 | + return runtime::Error::Ok; |
| 172 | +} |
| 173 | + |
| 174 | +} // namespace flat_tensor |
| 175 | +} // namespace extension |
| 176 | +} // namespace executorch |
0 commit comments