@@ -88,31 +88,33 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
88
88
#endif // SCUDO_LINUX
89
89
90
90
inline void disableMemoryTagChecksTestOnly () {
91
- __asm__ __volatile__ (" .arch_extension mte ; msr tco, #1" );
91
+ __asm__ __volatile__ (" .arch_extension memtag ; msr tco, #1" );
92
92
}
93
93
94
94
inline void enableMemoryTagChecksTestOnly () {
95
- __asm__ __volatile__ (" .arch_extension mte ; msr tco, #0" );
95
+ __asm__ __volatile__ (" .arch_extension memtag ; msr tco, #0" );
96
96
}
97
97
98
98
class ScopedDisableMemoryTagChecks {
99
99
size_t PrevTCO;
100
100
101
101
public:
102
102
ScopedDisableMemoryTagChecks () {
103
- __asm__ __volatile__ (" .arch_extension mte ; mrs %0, tco; msr tco, #1"
103
+ __asm__ __volatile__ (" .arch_extension memtag ; mrs %0, tco; msr tco, #1"
104
104
: " =r" (PrevTCO));
105
105
}
106
106
107
107
~ScopedDisableMemoryTagChecks () {
108
- __asm__ __volatile__ (" .arch_extension mte; msr tco, %0" : : " r" (PrevTCO));
108
+ __asm__ __volatile__ (" .arch_extension memtag; msr tco, %0"
109
+ :
110
+ : " r" (PrevTCO));
109
111
}
110
112
};
111
113
112
114
inline uptr selectRandomTag (uptr Ptr, uptr ExcludeMask) {
113
115
uptr TaggedPtr;
114
116
__asm__ __volatile__ (
115
- " .arch_extension mte ; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
117
+ " .arch_extension memtag ; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
116
118
: [TaggedPtr] " =r" (TaggedPtr)
117
119
: [Ptr] " r" (Ptr), [ExcludeMask] " r" (ExcludeMask));
118
120
return TaggedPtr;
@@ -123,7 +125,7 @@ inline uptr storeTags(uptr Begin, uptr End) {
123
125
if (Begin != End) {
124
126
__asm__ __volatile__ (
125
127
R"(
126
- .arch_extension mte
128
+ .arch_extension memtag
127
129
128
130
1:
129
131
stzg %[Cur], [%[Cur]], #16
@@ -144,7 +146,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
144
146
// chunk holding a low alignment allocation is reused for a higher alignment
145
147
// allocation, the chunk may already have a non-zero tag from the previous
146
148
// allocation.
147
- __asm__ __volatile__ (" .arch_extension mte ; stg %0, [%0, #-16]"
149
+ __asm__ __volatile__ (" .arch_extension memtag ; stg %0, [%0, #-16]"
148
150
:
149
151
: " r" (Ptr)
150
152
: " memory" );
@@ -161,7 +163,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
161
163
// purpose of catching linear overflows in this case.
162
164
uptr UntaggedEnd = untagPointer (TaggedEnd);
163
165
if (UntaggedEnd != BlockEnd)
164
- __asm__ __volatile__ (" .arch_extension mte ; stg %0, [%0]"
166
+ __asm__ __volatile__ (" .arch_extension memtag ; stg %0, [%0]"
165
167
:
166
168
: " r" (UntaggedEnd)
167
169
: " memory" );
@@ -175,15 +177,15 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
175
177
// of the allocation to 0. See explanation in prepareTaggedChunk above.
176
178
uptr RoundNewPtr = untagPointer (roundUpTo (NewPtr, 16 ));
177
179
if (RoundNewPtr != BlockEnd)
178
- __asm__ __volatile__ (" .arch_extension mte ; stg %0, [%0]"
180
+ __asm__ __volatile__ (" .arch_extension memtag ; stg %0, [%0]"
179
181
:
180
182
: " r" (RoundNewPtr)
181
183
: " memory" );
182
184
return ;
183
185
}
184
186
185
187
__asm__ __volatile__ (R"(
186
- .arch_extension mte
188
+ .arch_extension memtag
187
189
188
190
// Set the memory tag of the region
189
191
// [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
@@ -208,7 +210,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
208
210
209
211
inline uptr loadTag (uptr Ptr) {
210
212
uptr TaggedPtr = Ptr;
211
- __asm__ __volatile__ (" .arch_extension mte ; ldg %0, [%0]"
213
+ __asm__ __volatile__ (" .arch_extension memtag ; ldg %0, [%0]"
212
214
: " +r" (TaggedPtr)
213
215
:
214
216
: " memory" );
0 commit comments