20
20
*
21
21
\*===----------------------------------------------------------------------===*/
22
22
23
-
24
23
#ifdef _MSC_VER
25
- #define PACKED (__decl__ ) __pragma(pack(push,1 )) __decl__ __pragma (pack(pop))
24
+ #define PACKED (... ) __pragma(pack(push,1 )) __VA_ARGS__ __pragma (pack(pop))
26
25
#else
27
- #define PACKED (__decl__ ) __decl__ __attribute__ ((__packed__))
26
+ #define PACKED (...) __VA_ARGS__ __attribute__ ((__packed__))
28
27
#endif
29
28
30
29
// A 64-bit magic number to uniquely identify the raw binary memprof profile file.
@@ -47,14 +46,106 @@ PACKED(struct Header {
47
46
uint64_t StackOffset;
48
47
});
49
48
49
+
50
50
// A struct describing the information necessary to describe a /proc/maps
51
51
// segment entry for a particular binary/library identified by its build id.
52
52
PACKED (struct SegmentEntry {
53
53
uint64_t Start;
54
54
uint64_t End;
55
55
uint64_t Offset;
56
- uint8_t BuildId[32 ];
56
+ // This field is unused until sanitizer procmaps support for build ids for
57
+ // Linux-Elf is implemented.
58
+ uint8_t BuildId[32 ] = {0 };
59
+
60
+ SegmentEntry (uint64_t S, uint64_t E, uint64_t O) :
61
+ Start (S), End(E), Offset(O) {}
62
+
63
+ SegmentEntry (const SegmentEntry& S) {
64
+ Start = S.Start ;
65
+ End = S.End ;
66
+ Offset = S.Offset ;
67
+ }
68
+
69
+ SegmentEntry& operator =(const SegmentEntry& S) {
70
+ Start = S.Start ;
71
+ End = S.End ;
72
+ Offset = S.Offset ;
73
+ return *this ;
74
+ }
75
+
76
+ bool operator ==(const SegmentEntry& S) const {
77
+ return Start == S.Start &&
78
+ End == S.End &&
79
+ Offset == S.Offset ;
80
+ }
57
81
});
82
+
83
+ // A struct representing the heap allocation characteristics of a particular
84
+ // runtime context. This struct is shared between the compiler-rt runtime and
85
+ // the raw profile reader. The indexed format uses a separate, self-describing
86
+ // backwards compatible format.
87
+ PACKED (struct MemInfoBlock {
88
+ uint32_t alloc_count;
89
+ uint64_t total_access_count, min_access_count, max_access_count;
90
+ uint64_t total_size;
91
+ uint32_t min_size, max_size;
92
+ uint32_t alloc_timestamp, dealloc_timestamp;
93
+ uint64_t total_lifetime;
94
+ uint32_t min_lifetime, max_lifetime;
95
+ uint32_t alloc_cpu_id, dealloc_cpu_id;
96
+ uint32_t num_migrated_cpu;
97
+
98
+ // Only compared to prior deallocated object currently.
99
+ uint32_t num_lifetime_overlaps;
100
+ uint32_t num_same_alloc_cpu;
101
+ uint32_t num_same_dealloc_cpu;
102
+
103
+ uint64_t data_type_id; // TODO: hash of type name
104
+
105
+ MemInfoBlock () : alloc_count(0 ) {}
106
+
107
+ MemInfoBlock (uint32_t size, uint64_t access_count, uint32_t alloc_timestamp,
108
+ uint32_t dealloc_timestamp, uint32_t alloc_cpu, uint32_t dealloc_cpu)
109
+ : alloc_count(1 ), total_access_count(access_count),
110
+ min_access_count (access_count), max_access_count(access_count),
111
+ total_size(size), min_size(size), max_size(size),
112
+ alloc_timestamp(alloc_timestamp), dealloc_timestamp(dealloc_timestamp),
113
+ total_lifetime(dealloc_timestamp - alloc_timestamp),
114
+ min_lifetime(total_lifetime), max_lifetime(total_lifetime),
115
+ alloc_cpu_id(alloc_cpu), dealloc_cpu_id(dealloc_cpu),
116
+ num_lifetime_overlaps(0 ), num_same_alloc_cpu(0 ),
117
+ num_same_dealloc_cpu(0 ) {
118
+ num_migrated_cpu = alloc_cpu_id != dealloc_cpu_id;
119
+ }
120
+
121
+ void Merge (const MemInfoBlock &newMIB) {
122
+ alloc_count += newMIB.alloc_count ;
123
+
124
+ total_access_count += newMIB.total_access_count ;
125
+ min_access_count = newMIB.min_access_count < min_access_count ? newMIB.min_access_count : min_access_count;
126
+ max_access_count = newMIB.max_access_count < max_access_count ? newMIB.max_access_count : max_access_count;
127
+
128
+ total_size += newMIB.total_size ;
129
+ min_size = newMIB.min_size < min_size ? newMIB.min_size : min_size;
130
+ max_size = newMIB.max_size < max_size ? newMIB.max_size : max_size;
131
+
132
+ total_lifetime += newMIB.total_lifetime ;
133
+ min_lifetime = newMIB.min_lifetime < min_lifetime ? newMIB.min_lifetime : min_lifetime;
134
+ max_lifetime = newMIB.max_lifetime > max_lifetime ? newMIB.max_lifetime : max_lifetime;
135
+
136
+ // We know newMIB was deallocated later, so just need to check if it was
137
+ // allocated before last one deallocated.
138
+ num_lifetime_overlaps += newMIB.alloc_timestamp < dealloc_timestamp;
139
+ alloc_timestamp = newMIB.alloc_timestamp ;
140
+ dealloc_timestamp = newMIB.dealloc_timestamp ;
141
+
142
+ num_same_alloc_cpu += alloc_cpu_id == newMIB.alloc_cpu_id ;
143
+ num_same_dealloc_cpu += dealloc_cpu_id == newMIB.dealloc_cpu_id ;
144
+ alloc_cpu_id = newMIB.alloc_cpu_id ;
145
+ dealloc_cpu_id = newMIB.dealloc_cpu_id ;
146
+ }
147
+ });
148
+
58
149
} // namespace memprof
59
150
} // namespace llvm
60
151
0 commit comments