@@ -1202,6 +1202,99 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version,
1202
1202
}
1203
1203
}
1204
1204
1205
+ Error IndexedMemProfReader::deserialize (const unsigned char *Start,
1206
+ uint64_t MemProfOffset) {
1207
+ const unsigned char *Ptr = Start + MemProfOffset;
1208
+
1209
+ // Read the first 64-bit word, which may be RecordTableOffset in
1210
+ // memprof::MemProfVersion0 or the MemProf version number in
1211
+ // memprof::MemProfVersion1 and above.
1212
+ const uint64_t FirstWord =
1213
+ support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1214
+
1215
+ memprof::IndexedVersion Version = memprof::Version0;
1216
+ if (FirstWord == memprof::Version1 || FirstWord == memprof::Version2) {
1217
+ // Everything is good. We can proceed to deserialize the rest.
1218
+ Version = static_cast <memprof::IndexedVersion>(FirstWord);
1219
+ } else if (FirstWord >= 24 ) {
1220
+ // This is a heuristic/hack to detect memprof::MemProfVersion0,
1221
+ // which does not have a version field in the header.
1222
+ // In memprof::MemProfVersion0, FirstWord will be RecordTableOffset,
1223
+ // which should be at least 24 because of the MemProf header size.
1224
+ Version = memprof::Version0;
1225
+ } else {
1226
+ return make_error<InstrProfError>(
1227
+ instrprof_error::unsupported_version,
1228
+ formatv (" MemProf version {} not supported; "
1229
+ " requires version between {} and {}, inclusive" ,
1230
+ FirstWord, memprof::MinimumSupportedVersion,
1231
+ memprof::MaximumSupportedVersion));
1232
+ }
1233
+
1234
+ // The value returned from RecordTableGenerator.Emit.
1235
+ const uint64_t RecordTableOffset =
1236
+ Version == memprof::Version0
1237
+ ? FirstWord
1238
+ : support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1239
+ // The offset in the stream right before invoking
1240
+ // FrameTableGenerator.Emit.
1241
+ const uint64_t FramePayloadOffset =
1242
+ support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1243
+ // The value returned from FrameTableGenerator.Emit.
1244
+ const uint64_t FrameTableOffset =
1245
+ support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1246
+
1247
+ // The offset in the stream right before invoking
1248
+ // CallStackTableGenerator.Emit.
1249
+ uint64_t CallStackPayloadOffset = 0 ;
1250
+ // The value returned from CallStackTableGenerator.Emit.
1251
+ uint64_t CallStackTableOffset = 0 ;
1252
+ if (Version >= memprof::Version2) {
1253
+ CallStackPayloadOffset =
1254
+ support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1255
+ CallStackTableOffset =
1256
+ support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1257
+ }
1258
+
1259
+ // Read the schema.
1260
+ auto SchemaOr = memprof::readMemProfSchema (Ptr);
1261
+ if (!SchemaOr)
1262
+ return SchemaOr.takeError ();
1263
+ Schema = SchemaOr.get ();
1264
+
1265
+ // Now initialize the table reader with a pointer into data buffer.
1266
+ MemProfRecordTable.reset (MemProfRecordHashTable::Create (
1267
+ /* Buckets=*/ Start + RecordTableOffset,
1268
+ /* Payload=*/ Ptr,
1269
+ /* Base=*/ Start, memprof::RecordLookupTrait (Version, Schema)));
1270
+
1271
+ // Initialize the frame table reader with the payload and bucket offsets.
1272
+ MemProfFrameTable.reset (MemProfFrameHashTable::Create (
1273
+ /* Buckets=*/ Start + FrameTableOffset,
1274
+ /* Payload=*/ Start + FramePayloadOffset,
1275
+ /* Base=*/ Start, memprof::FrameLookupTrait ()));
1276
+
1277
+ if (Version >= memprof::Version2)
1278
+ MemProfCallStackTable.reset (MemProfCallStackHashTable::Create (
1279
+ /* Buckets=*/ Start + CallStackTableOffset,
1280
+ /* Payload=*/ Start + CallStackPayloadOffset,
1281
+ /* Base=*/ Start, memprof::CallStackLookupTrait ()));
1282
+
1283
+ #ifdef EXPENSIVE_CHECKS
1284
+ // Go through all the records and verify that CSId has been correctly
1285
+ // populated. Do this only under EXPENSIVE_CHECKS. Otherwise, we
1286
+ // would defeat the purpose of OnDiskIterableChainedHashTable.
1287
+ // Note that we can compare CSId against actual call stacks only for
1288
+ // Version0 and Version1 because IndexedAllocationInfo::CallStack and
1289
+ // IndexedMemProfRecord::CallSites are not populated in Version2.
1290
+ if (Version <= memprof::Version1)
1291
+ for (const auto &Record : MemProfRecordTable->data ())
1292
+ verifyIndexedMemProfRecord (Record);
1293
+ #endif
1294
+
1295
+ return Error::success ();
1296
+ }
1297
+
1205
1298
Error IndexedInstrProfReader::readHeader () {
1206
1299
using namespace support ;
1207
1300
@@ -1244,95 +1337,8 @@ Error IndexedInstrProfReader::readHeader() {
1244
1337
uint64_t MemProfOffset =
1245
1338
endian::byte_swap<uint64_t , llvm::endianness::little>(
1246
1339
Header->MemProfOffset );
1247
-
1248
- const unsigned char *Ptr = Start + MemProfOffset;
1249
-
1250
- // Read the first 64-bit word, which may be RecordTableOffset in
1251
- // memprof::MemProfVersion0 or the MemProf version number in
1252
- // memprof::MemProfVersion1 and above.
1253
- const uint64_t FirstWord =
1254
- support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1255
-
1256
- memprof::IndexedVersion Version = memprof::Version0;
1257
- if (FirstWord == memprof::Version1 || FirstWord == memprof::Version2) {
1258
- // Everything is good. We can proceed to deserialize the rest.
1259
- Version = static_cast <memprof::IndexedVersion>(FirstWord);
1260
- } else if (FirstWord >= 24 ) {
1261
- // This is a heuristic/hack to detect memprof::MemProfVersion0,
1262
- // which does not have a version field in the header.
1263
- // In memprof::MemProfVersion0, FirstWord will be RecordTableOffset,
1264
- // which should be at least 24 because of the MemProf header size.
1265
- Version = memprof::Version0;
1266
- } else {
1267
- return make_error<InstrProfError>(
1268
- instrprof_error::unsupported_version,
1269
- formatv (" MemProf version {} not supported; "
1270
- " requires version between {} and {}, inclusive" ,
1271
- FirstWord, memprof::MinimumSupportedVersion,
1272
- memprof::MaximumSupportedVersion));
1273
- }
1274
-
1275
- // The value returned from RecordTableGenerator.Emit.
1276
- const uint64_t RecordTableOffset =
1277
- Version == memprof::Version0
1278
- ? FirstWord
1279
- : support::endian::readNext<uint64_t , llvm::endianness::little>(
1280
- Ptr);
1281
- // The offset in the stream right before invoking
1282
- // FrameTableGenerator.Emit.
1283
- const uint64_t FramePayloadOffset =
1284
- support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1285
- // The value returned from FrameTableGenerator.Emit.
1286
- const uint64_t FrameTableOffset =
1287
- support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1288
-
1289
- // The offset in the stream right before invoking
1290
- // CallStackTableGenerator.Emit.
1291
- uint64_t CallStackPayloadOffset = 0 ;
1292
- // The value returned from CallStackTableGenerator.Emit.
1293
- uint64_t CallStackTableOffset = 0 ;
1294
- if (Version >= memprof::Version2) {
1295
- CallStackPayloadOffset =
1296
- support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1297
- CallStackTableOffset =
1298
- support::endian::readNext<uint64_t , llvm::endianness::little>(Ptr);
1299
- }
1300
-
1301
- // Read the schema.
1302
- auto SchemaOr = memprof::readMemProfSchema (Ptr);
1303
- if (!SchemaOr)
1304
- return SchemaOr.takeError ();
1305
- Schema = SchemaOr.get ();
1306
-
1307
- // Now initialize the table reader with a pointer into data buffer.
1308
- MemProfRecordTable.reset (MemProfRecordHashTable::Create (
1309
- /* Buckets=*/ Start + RecordTableOffset,
1310
- /* Payload=*/ Ptr,
1311
- /* Base=*/ Start, memprof::RecordLookupTrait (Version, Schema)));
1312
-
1313
- // Initialize the frame table reader with the payload and bucket offsets.
1314
- MemProfFrameTable.reset (MemProfFrameHashTable::Create (
1315
- /* Buckets=*/ Start + FrameTableOffset,
1316
- /* Payload=*/ Start + FramePayloadOffset,
1317
- /* Base=*/ Start, memprof::FrameLookupTrait ()));
1318
-
1319
- if (Version >= memprof::Version2)
1320
- MemProfCallStackTable.reset (MemProfCallStackHashTable::Create (
1321
- /* Buckets=*/ Start + CallStackTableOffset,
1322
- /* Payload=*/ Start + CallStackPayloadOffset,
1323
- /* Base=*/ Start, memprof::CallStackLookupTrait ()));
1324
-
1325
- #ifdef EXPENSIVE_CHECKS
1326
- // Go through all the records and verify that CSId has been correctly
1327
- // populated. Do this only under EXPENSIVE_CHECKS. Otherwise, we
1328
- // would defeat the purpose of OnDiskIterableChainedHashTable.
1329
- // Note that we can compare CSId against actual call stacks only for
1330
- // Version0 and Version1 because IndexedAllocationInfo::CallStack and
1331
- // IndexedMemProfRecord::CallSites are not populated in Version2.
1332
- if (Version <= memprof::Version1)
1333
- for (const auto &Record : MemProfRecordTable->data ())
1334
- verifyIndexedMemProfRecord (Record);
1335
- #endif
1340
+ if (Error E = MemProfReader.deserialize (Start, MemProfOffset))
1341
+ return E;
1336
1342
}
1337
1343
1338
1344
// BinaryIdOffset field in the header is only valid when the format version
@@ -1501,7 +1507,7 @@ Expected<InstrProfRecord> IndexedInstrProfReader::getInstrProfRecord(
1501
1507
}
1502
1508
1503
1509
Expected<memprof::MemProfRecord>
1504
- IndexedInstrProfReader ::getMemProfRecord (const uint64_t FuncNameHash) {
1510
+ IndexedMemProfReader ::getMemProfRecord (const uint64_t FuncNameHash) const {
1505
1511
// TODO: Add memprof specific errors.
1506
1512
if (MemProfRecordTable == nullptr )
1507
1513
return make_error<InstrProfError>(instrprof_error::invalid_prof,
0 commit comments