Skip to content

Commit 97e0417

Browse files
pytorchbotlucylq
andauthored
[executorch][serialization] Format program.fbs with consistent whitespace (#6809)
Pull Request resolved: #6680 ^ ghstack-source-id: 253177479 @exported-using-ghexport Differential Revision: [D65495297](https://our.internmc.facebook.com/intern/diff/D65495297/) Co-authored-by: lucylq <[email protected]>
1 parent 7b85117 commit 97e0417

File tree

1 file changed

+56
-56
lines changed

1 file changed

+56
-56
lines changed

schema/program.fbs

Lines changed: 56 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ file_extension "pte";
1616
// Table that contains the metadata about how
1717
// to unflatten the flattened input/output from compiler
1818
table ContainerMetadata {
19-
encoded_inp_str:string;
20-
encoded_out_str:string;
19+
encoded_inp_str: string;
20+
encoded_out_str: string;
2121
}
2222

2323
table Null {}
@@ -27,7 +27,7 @@ table Null {}
2727
// This refers to where the buffer needs to be placed in an existing
2828
// memory and at what offset from its base address.
2929
table AllocationDetails {
30-
memory_id:uint; // ID of the memory where this data needs to be placed.
30+
memory_id: uint; // ID of the memory where this data needs to be placed.
3131

3232
// Offset in bytes relative to the start of the memory area indicated by
3333
// memory_id.
@@ -36,8 +36,8 @@ table AllocationDetails {
3636
// larger models. To preserve backwards compatibility, the high bits are
3737
// managed in a separate 32-bit field. Users should combine the two fields
3838
// to get the full 64-bit offset.
39-
memory_offset_low:uint; // Least significant 32 bits
40-
memory_offset_high:uint; // Most significant 32 bits. Defaults to zero.
39+
memory_offset_low: uint; // Least significant 32 bits
40+
memory_offset_high: uint; // Most significant 32 bits. Defaults to zero.
4141
}
4242

4343
// Indicates the types of shape a Tensor may have, from the point
@@ -61,21 +61,21 @@ table ExtraTensorInfo {
6161
// program.mutable_data_segments that specifies where the data is located in.
6262
// If not present and the data is located in a segment, then the data is in
6363
// the first index.
64-
mutable_data_segments_idx:uint64;
64+
mutable_data_segments_idx: uint64;
6565

6666
// [Optional] The unique name of the tensor. e.g. 'mod.linear.weight'
67-
fully_qualified_name:string;
67+
fully_qualified_name: string;
6868
}
6969

7070
table Tensor {
71-
scalar_type:ScalarType;
71+
scalar_type: ScalarType;
7272

7373
// Offset in scalar_type elements (e.g., multiples of 4 bytes for an int
7474
// scalar type) from the beginning of the tensor buffer to the beginning of
7575
// the actual data. Currently, the runtime only supports a value of zero.
76-
storage_offset:int;
76+
storage_offset: int;
7777

78-
sizes:[int];
78+
sizes: [int];
7979

8080
// Specifies in what order the dimensions are laid out in memory (from outer
8181
// to inner).
@@ -88,10 +88,10 @@ table Tensor {
8888
// - (0, 2, 1) represents a [row, batch, column] ordering where "column" is
8989
// the innermost dimension, then comes "batch", and the outermost dimension
9090
// is "row".
91-
dim_order:[ubyte];
91+
dim_order: [ubyte];
9292

9393
// out of scope M1
94-
requires_grad:bool;
94+
requires_grad: bool;
9595

9696
// Overall, a Tensor is either constant or mutable. At method load time
9797
// constant tensors receive a dataptr into the serialized program. Mutable
@@ -117,13 +117,13 @@ table Tensor {
117117
// in program.mutable_data_segments[0] otherwise if tensor_info is non-null
118118
// then the mutable_data_segment index is specified by
119119
// tensor_info.mutable_data_segments_index.
120-
data_buffer_idx:uint;
120+
data_buffer_idx: uint;
121121

122122
// [Optional] preallocation details for non-constants (null otherwise).
123-
allocation_info:AllocationDetails;
123+
allocation_info: AllocationDetails;
124124

125125
// May not be needed.
126-
layout:byte;
126+
layout: byte;
127127

128128
// Determines the type of the tensor's shape, from the point of view of its
129129
// dynamic or not behavior, and consequently how the allocation of the
@@ -137,52 +137,52 @@ table Tensor {
137137
//
138138
// 3. dynamism == DYNAMIC_UNBOUND: the stored sizes field can be ignored since
139139
// shape is fully dynamic.
140-
shape_dynamism:TensorShapeDynamism;
140+
shape_dynamism: TensorShapeDynamism;
141141

142142
// [Optional] Additional information about the Tensor that is not applicable
143143
// to most tensors.
144-
extra_tensor_info:ExtraTensorInfo;
144+
extra_tensor_info: ExtraTensorInfo;
145145
}
146146

147147
table Int {
148-
int_val:long;
148+
int_val: long;
149149
}
150150

151151
table Bool {
152-
bool_val:bool;
152+
bool_val: bool;
153153
}
154154

155155
table Double {
156-
double_val:double;
156+
double_val: double;
157157
}
158158

159159
table String {
160-
string_val:string;
160+
string_val: string;
161161
}
162162

163163
table IntList {
164-
items:[long];
164+
items: [long];
165165
}
166166

167167
table DoubleList {
168-
items:[double];
168+
items: [double];
169169
}
170170

171171
table BoolList {
172-
items:[bool];
172+
items: [bool];
173173
}
174174

175175
// Unlike primitive lists, tensor lists have mutable members and aliasing behavior when
176176
// elements are added to them. To match this aliasing behavior, the runtime tensor list is
177177
// serialized by serializing its elements into the ExecutionPlan.values array, and then
178178
// serializing their corresponding indices into TensorList.items.
179179
table TensorList {
180-
items:[int]; // EValue indices.
180+
items: [int]; // EValue indices.
181181
}
182182

183183
// Similar to TensorList except the indices can also point to None.
184184
table OptionalTensorList {
185-
items:[int];
185+
items: [int];
186186
}
187187

188188
// Supported values in Executorch kernels, Enums are serialized as ints.
@@ -202,30 +202,30 @@ union KernelTypes {
202202

203203
// Abstraction for program values. A subset of types supported in core pytorch kernels.
204204
table EValue {
205-
val:KernelTypes;
205+
val: KernelTypes;
206206
}
207207

208208
table Operator {
209209
// Operator registry and lookup is uniquely identified by its name, and overload name.
210210
// TODO(larryliu): is there a more efficient way to represent this
211-
name:string;
212-
overload:string;
211+
name: string;
212+
overload: string;
213213
}
214214

215215
table KernelCall {
216216
// Index to the operators table in the program.
217-
op_index:int;
217+
op_index: int;
218218

219219
// Indexes to the (values) required by the operation (in and out).
220-
args:[int];
220+
args: [int];
221221
}
222222

223223
table DelegateCall {
224224
// Index to the delegates table in the program.
225-
delegate_index:int;
225+
delegate_index: int;
226226

227227
// Indexes to the (values) required by the delegates (in and out).
228-
args:[int];
228+
args: [int];
229229
}
230230

231231
table MoveCall {
@@ -259,20 +259,20 @@ union InstructionArguments {
259259

260260
// Basic unit of execution
261261
table Instruction {
262-
instr_args:InstructionArguments;
262+
instr_args: InstructionArguments;
263263
}
264264

265265
table Frame {
266266
// For storing the frame to print stacktraces
267-
filename:string; // Name of the file in which the instruction exists
268-
lineno:int; // Line number at which the instruction was called
269-
name:string; // Name of the function the instruction was called from
270-
context:string; // Source code of the instruction
267+
filename: string; // Name of the file in which the instruction exists
268+
lineno: int; // Line number at which the instruction was called
269+
name: string; // Name of the function the instruction was called from
270+
context: string; // Source code of the instruction
271271
}
272272

273273
table FrameList {
274274
// For storing the frames to print stacktraces
275-
items:[Frame];
275+
items: [Frame];
276276
}
277277

278278
// Indicates where a piece of data is stored.
@@ -322,17 +322,17 @@ table BackendDelegate {
322322
// seperate chains.
323323
table Chain {
324324
// Indices of the values that are (non-static) inputs into this Chain.
325-
inputs:[int];
325+
inputs: [int];
326326

327327
// Indices of the values that are outputs out of this Chain.
328-
outputs:[int];
328+
outputs: [int];
329329

330330
// List of instructions to be executed in order.
331-
instructions:[Instruction];
331+
instructions: [Instruction];
332332

333333
// Optional list of frames for each instruction.
334334
// The backend config must have 'emit_stacktrace' set to true to emit
335-
stacktrace:[FrameList];
335+
stacktrace: [FrameList];
336336
}
337337

338338
table ExecutionPlan {
@@ -344,22 +344,22 @@ table ExecutionPlan {
344344
container_meta_type: ContainerMetadata;
345345

346346
// A list of all values used in this execution plan.
347-
values:[EValue];
347+
values: [EValue];
348348

349349
// Indices to the 'Evalues' that are inputs to this execution plan.
350350
// This list contains only the non-constant tensors (i.e. not part of
351351
// the saved program).
352-
inputs:[int];
352+
inputs: [int];
353353

354354
// Indices to the 'Evalues' that are outputs of this execution plan.
355355
// This signals a lifespan that goes beyond the execution.
356-
outputs:[int];
356+
outputs: [int];
357357

358358
// List of Chains of kernels.
359-
chains:[Chain];
359+
chains: [Chain];
360360

361361
// Operators used in this execution plan
362-
operators:[Operator];
362+
operators: [Operator];
363363

364364
// A list of delegates and each is a special instance of execution, the same level of chains.
365365
delegates: [BackendDelegate];
@@ -379,7 +379,7 @@ table Buffer {
379379
// During serialization, this alignment may be rewritten to a larger value.
380380
// The magic "@executorch-tensor-alignment" comment tells EXIR which lines to
381381
// patch.
382-
storage:[ubyte] (force_align: 16); // @executorch-tensor-alignment
382+
storage: [ubyte] (force_align: 16); // @executorch-tensor-alignment
383383
}
384384

385385
// Delegate data stored directly in the flatbuffer. This is a different type
@@ -419,31 +419,31 @@ table SubsegmentOffsets {
419419

420420
table Program {
421421
// Schema version.
422-
version:uint;
422+
version: uint;
423423

424424
// List of ExecutionPlans that make up the program. Each ExecutionPlan corresponds with a
425425
// different entry point into the model.
426-
execution_plan:[ExecutionPlan];
426+
execution_plan: [ExecutionPlan];
427427

428428
// Tables of constant data, used for constant Values (e.g.data field of weight tensors).
429429
// Each constant is assigned an index into the table which are each individually aligned.
430430
// 0 index is reserved to be pointed to by non-constant Tensors.
431431
// If this field is non-empty, constant_segment.offsets must be empty.
432432
// DEPRECATED: After D61996249 on 2024-09-05, no new PTE files will use this field.
433-
constant_buffer:[Buffer];
433+
constant_buffer: [Buffer];
434434

435435
// List of delegate data. Pointed to by BackendDelegateDataReference.
436-
backend_delegate_data:[BackendDelegateInlineData];
436+
backend_delegate_data: [BackendDelegateInlineData];
437437

438438
// List of data segments that follow the Program data in this file, sorted by
439439
// offset. Elements in this schema can refer to these segments by index.
440-
segments:[DataSegment];
440+
segments: [DataSegment];
441441

442442
// Describes the offsets of each constant tensor, relative to the segment
443443
// offset. If constant_segment.offsets field is non-empty, constant_buffer
444444
// must be empty. constant_segment.offsets[0] is reserved to be pointed to by
445445
// non-constant Tensors.
446-
constant_segment:SubsegmentOffsets;
446+
constant_segment: SubsegmentOffsets;
447447

448448
// [Optional] Describes the offsets into various segments for each mutable
449449
// tensor. Only mutable tensors with a meaningful initial state are
@@ -453,7 +453,7 @@ table Program {
453453
// into the mutable tensor, as opposed to loading the .pte data into
454454
// constant memory, copying it over, and then being unable to release the
455455
// constant segment. No two elements should point to the same segment.
456-
mutable_data_segments:[SubsegmentOffsets];
456+
mutable_data_segments: [SubsegmentOffsets];
457457
}
458458

459459
root_type Program;

0 commit comments

Comments
 (0)