@@ -107,12 +107,12 @@ static bool try_read_write_buf(char *ptr)
107
107
return try_read_buf (ptr ) && try_write_buf (ptr );
108
108
}
109
109
110
- FIXTURE (guard_pages )
110
+ FIXTURE (guard_regions )
111
111
{
112
112
unsigned long page_size ;
113
113
};
114
114
115
- FIXTURE_SETUP (guard_pages )
115
+ FIXTURE_SETUP (guard_regions )
116
116
{
117
117
struct sigaction act = {
118
118
.sa_handler = & handle_fatal ,
@@ -126,7 +126,7 @@ FIXTURE_SETUP(guard_pages)
126
126
self -> page_size = (unsigned long )sysconf (_SC_PAGESIZE );
127
127
};
128
128
129
- FIXTURE_TEARDOWN (guard_pages )
129
+ FIXTURE_TEARDOWN (guard_regions )
130
130
{
131
131
struct sigaction act = {
132
132
.sa_handler = SIG_DFL ,
@@ -137,7 +137,7 @@ FIXTURE_TEARDOWN(guard_pages)
137
137
sigaction (SIGSEGV , & act , NULL );
138
138
}
139
139
140
- TEST_F (guard_pages , basic )
140
+ TEST_F (guard_regions , basic )
141
141
{
142
142
const unsigned long NUM_PAGES = 10 ;
143
143
const unsigned long page_size = self -> page_size ;
@@ -231,7 +231,7 @@ TEST_F(guard_pages, basic)
231
231
}
232
232
233
233
/* Assert that operations applied across multiple VMAs work as expected. */
234
- TEST_F (guard_pages , multi_vma )
234
+ TEST_F (guard_regions , multi_vma )
235
235
{
236
236
const unsigned long page_size = self -> page_size ;
237
237
char * ptr_region , * ptr , * ptr1 , * ptr2 , * ptr3 ;
@@ -367,7 +367,7 @@ TEST_F(guard_pages, multi_vma)
367
367
* Assert that batched operations performed using process_madvise() work as
368
368
* expected.
369
369
*/
370
- TEST_F (guard_pages , process_madvise )
370
+ TEST_F (guard_regions , process_madvise )
371
371
{
372
372
const unsigned long page_size = self -> page_size ;
373
373
pid_t pid = getpid ();
@@ -467,7 +467,7 @@ TEST_F(guard_pages, process_madvise)
467
467
}
468
468
469
469
/* Assert that unmapping ranges does not leave guard markers behind. */
470
- TEST_F (guard_pages , munmap )
470
+ TEST_F (guard_regions , munmap )
471
471
{
472
472
const unsigned long page_size = self -> page_size ;
473
473
char * ptr , * ptr_new1 , * ptr_new2 ;
@@ -505,7 +505,7 @@ TEST_F(guard_pages, munmap)
505
505
}
506
506
507
507
/* Assert that mprotect() operations have no bearing on guard markers. */
508
- TEST_F (guard_pages , mprotect )
508
+ TEST_F (guard_regions , mprotect )
509
509
{
510
510
const unsigned long page_size = self -> page_size ;
511
511
char * ptr ;
@@ -553,7 +553,7 @@ TEST_F(guard_pages, mprotect)
553
553
}
554
554
555
555
/* Split and merge VMAs and make sure guard pages still behave. */
556
- TEST_F (guard_pages , split_merge )
556
+ TEST_F (guard_regions , split_merge )
557
557
{
558
558
const unsigned long page_size = self -> page_size ;
559
559
char * ptr , * ptr_new ;
@@ -684,7 +684,7 @@ TEST_F(guard_pages, split_merge)
684
684
}
685
685
686
686
/* Assert that MADV_DONTNEED does not remove guard markers. */
687
- TEST_F (guard_pages , dontneed )
687
+ TEST_F (guard_regions , dontneed )
688
688
{
689
689
const unsigned long page_size = self -> page_size ;
690
690
char * ptr ;
@@ -737,7 +737,7 @@ TEST_F(guard_pages, dontneed)
737
737
}
738
738
739
739
/* Assert that mlock()'ed pages work correctly with guard markers. */
740
- TEST_F (guard_pages , mlock )
740
+ TEST_F (guard_regions , mlock )
741
741
{
742
742
const unsigned long page_size = self -> page_size ;
743
743
char * ptr ;
@@ -810,7 +810,7 @@ TEST_F(guard_pages, mlock)
810
810
*
811
811
* - Moving a mapping alone should retain markers as they are.
812
812
*/
813
- TEST_F (guard_pages , mremap_move )
813
+ TEST_F (guard_regions , mremap_move )
814
814
{
815
815
const unsigned long page_size = self -> page_size ;
816
816
char * ptr , * ptr_new ;
@@ -857,7 +857,7 @@ TEST_F(guard_pages, mremap_move)
857
857
* will have to remove guard pages manually to fix up (they'd have to do the
858
858
* same if it were a PROT_NONE mapping).
859
859
*/
860
- TEST_F (guard_pages , mremap_expand )
860
+ TEST_F (guard_regions , mremap_expand )
861
861
{
862
862
const unsigned long page_size = self -> page_size ;
863
863
char * ptr , * ptr_new ;
@@ -920,7 +920,7 @@ TEST_F(guard_pages, mremap_expand)
920
920
* if the user were using a PROT_NONE mapping they'd have to manually fix this
921
921
* up also so this is OK.
922
922
*/
923
- TEST_F (guard_pages , mremap_shrink )
923
+ TEST_F (guard_regions , mremap_shrink )
924
924
{
925
925
const unsigned long page_size = self -> page_size ;
926
926
char * ptr ;
@@ -984,7 +984,7 @@ TEST_F(guard_pages, mremap_shrink)
984
984
* Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
985
985
* retain guard pages.
986
986
*/
987
- TEST_F (guard_pages , fork )
987
+ TEST_F (guard_regions , fork )
988
988
{
989
989
const unsigned long page_size = self -> page_size ;
990
990
char * ptr ;
@@ -1039,7 +1039,7 @@ TEST_F(guard_pages, fork)
1039
1039
* Assert expected behaviour after we fork populated ranges of anonymous memory
1040
1040
* and then guard and unguard the range.
1041
1041
*/
1042
- TEST_F (guard_pages , fork_cow )
1042
+ TEST_F (guard_regions , fork_cow )
1043
1043
{
1044
1044
const unsigned long page_size = self -> page_size ;
1045
1045
char * ptr ;
@@ -1110,7 +1110,7 @@ TEST_F(guard_pages, fork_cow)
1110
1110
* Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1111
1111
* behave as expected.
1112
1112
*/
1113
- TEST_F (guard_pages , fork_wipeonfork )
1113
+ TEST_F (guard_regions , fork_wipeonfork )
1114
1114
{
1115
1115
const unsigned long page_size = self -> page_size ;
1116
1116
char * ptr ;
@@ -1160,7 +1160,7 @@ TEST_F(guard_pages, fork_wipeonfork)
1160
1160
}
1161
1161
1162
1162
/* Ensure that MADV_FREE retains guard entries as expected. */
1163
- TEST_F (guard_pages , lazyfree )
1163
+ TEST_F (guard_regions , lazyfree )
1164
1164
{
1165
1165
const unsigned long page_size = self -> page_size ;
1166
1166
char * ptr ;
@@ -1196,7 +1196,7 @@ TEST_F(guard_pages, lazyfree)
1196
1196
}
1197
1197
1198
1198
/* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
1199
- TEST_F (guard_pages , populate )
1199
+ TEST_F (guard_regions , populate )
1200
1200
{
1201
1201
const unsigned long page_size = self -> page_size ;
1202
1202
char * ptr ;
@@ -1222,7 +1222,7 @@ TEST_F(guard_pages, populate)
1222
1222
}
1223
1223
1224
1224
/* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1225
- TEST_F (guard_pages , cold_pageout )
1225
+ TEST_F (guard_regions , cold_pageout )
1226
1226
{
1227
1227
const unsigned long page_size = self -> page_size ;
1228
1228
char * ptr ;
@@ -1268,7 +1268,7 @@ TEST_F(guard_pages, cold_pageout)
1268
1268
}
1269
1269
1270
1270
/* Ensure that guard pages do not break userfaultd. */
1271
- TEST_F (guard_pages , uffd )
1271
+ TEST_F (guard_regions , uffd )
1272
1272
{
1273
1273
const unsigned long page_size = self -> page_size ;
1274
1274
int uffd ;
0 commit comments