8
8
#include <linux/srcu.h>
9
9
#include <linux/interval_tree.h>
10
10
11
- struct mmu_notifier_mm ;
11
+ struct mmu_notifier_subscriptions ;
12
12
struct mmu_notifier ;
13
13
struct mmu_notifier_range ;
14
14
struct mmu_interval_notifier ;
@@ -73,7 +73,7 @@ struct mmu_notifier_ops {
73
73
* through the gart alias address, so leading to memory
74
74
* corruption.
75
75
*/
76
- void (* release )(struct mmu_notifier * mn ,
76
+ void (* release )(struct mmu_notifier * subscription ,
77
77
struct mm_struct * mm );
78
78
79
79
/*
@@ -85,7 +85,7 @@ struct mmu_notifier_ops {
85
85
* Start-end is necessary in case the secondary MMU is mapping the page
86
86
* at a smaller granularity than the primary MMU.
87
87
*/
88
- int (* clear_flush_young )(struct mmu_notifier * mn ,
88
+ int (* clear_flush_young )(struct mmu_notifier * subscription ,
89
89
struct mm_struct * mm ,
90
90
unsigned long start ,
91
91
unsigned long end );
@@ -95,7 +95,7 @@ struct mmu_notifier_ops {
95
95
* latter, it is supposed to test-and-clear the young/accessed bitflag
96
96
* in the secondary pte, but it may omit flushing the secondary tlb.
97
97
*/
98
- int (* clear_young )(struct mmu_notifier * mn ,
98
+ int (* clear_young )(struct mmu_notifier * subscription ,
99
99
struct mm_struct * mm ,
100
100
unsigned long start ,
101
101
unsigned long end );
@@ -106,15 +106,15 @@ struct mmu_notifier_ops {
106
106
* frequently used without actually clearing the flag or tearing
107
107
* down the secondary mapping on the page.
108
108
*/
109
- int (* test_young )(struct mmu_notifier * mn ,
109
+ int (* test_young )(struct mmu_notifier * subscription ,
110
110
struct mm_struct * mm ,
111
111
unsigned long address );
112
112
113
113
/*
114
114
* change_pte is called in cases that pte mapping to page is changed:
115
115
* for example, when ksm remaps pte to point to a new shared page.
116
116
*/
117
- void (* change_pte )(struct mmu_notifier * mn ,
117
+ void (* change_pte )(struct mmu_notifier * subscription ,
118
118
struct mm_struct * mm ,
119
119
unsigned long address ,
120
120
pte_t pte );
@@ -169,9 +169,9 @@ struct mmu_notifier_ops {
169
169
* invalidate_range_end.
170
170
*
171
171
*/
172
- int (* invalidate_range_start )(struct mmu_notifier * mn ,
172
+ int (* invalidate_range_start )(struct mmu_notifier * subscription ,
173
173
const struct mmu_notifier_range * range );
174
- void (* invalidate_range_end )(struct mmu_notifier * mn ,
174
+ void (* invalidate_range_end )(struct mmu_notifier * subscription ,
175
175
const struct mmu_notifier_range * range );
176
176
177
177
/*
@@ -192,8 +192,10 @@ struct mmu_notifier_ops {
192
192
* of what was passed to invalidate_range_start()/end(), if
193
193
* called between those functions.
194
194
*/
195
- void (* invalidate_range )(struct mmu_notifier * mn , struct mm_struct * mm ,
196
- unsigned long start , unsigned long end );
195
+ void (* invalidate_range )(struct mmu_notifier * subscription ,
196
+ struct mm_struct * mm ,
197
+ unsigned long start ,
198
+ unsigned long end );
197
199
198
200
/*
199
201
* These callbacks are used with the get/put interface to manage the
@@ -206,7 +208,7 @@ struct mmu_notifier_ops {
206
208
* and cannot sleep.
207
209
*/
208
210
struct mmu_notifier * (* alloc_notifier )(struct mm_struct * mm );
209
- void (* free_notifier )(struct mmu_notifier * mn );
211
+ void (* free_notifier )(struct mmu_notifier * subscription );
210
212
};
211
213
212
214
/*
@@ -235,7 +237,7 @@ struct mmu_notifier {
235
237
* was required but mmu_notifier_range_blockable(range) is false.
236
238
*/
237
239
struct mmu_interval_notifier_ops {
238
- bool (* invalidate )(struct mmu_interval_notifier * mni ,
240
+ bool (* invalidate )(struct mmu_interval_notifier * interval_sub ,
239
241
const struct mmu_notifier_range * range ,
240
242
unsigned long cur_seq );
241
243
};
@@ -265,7 +267,7 @@ struct mmu_notifier_range {
265
267
266
268
static inline int mm_has_notifiers (struct mm_struct * mm )
267
269
{
268
- return unlikely (mm -> mmu_notifier_mm );
270
+ return unlikely (mm -> notifier_subscriptions );
269
271
}
270
272
271
273
struct mmu_notifier * mmu_notifier_get_locked (const struct mmu_notifier_ops * ops ,
@@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
280
282
up_write (& mm -> mmap_sem );
281
283
return ret ;
282
284
}
283
- void mmu_notifier_put (struct mmu_notifier * mn );
285
+ void mmu_notifier_put (struct mmu_notifier * subscription );
284
286
void mmu_notifier_synchronize (void );
285
287
286
- extern int mmu_notifier_register (struct mmu_notifier * mn ,
288
+ extern int mmu_notifier_register (struct mmu_notifier * subscription ,
287
289
struct mm_struct * mm );
288
- extern int __mmu_notifier_register (struct mmu_notifier * mn ,
290
+ extern int __mmu_notifier_register (struct mmu_notifier * subscription ,
289
291
struct mm_struct * mm );
290
- extern void mmu_notifier_unregister (struct mmu_notifier * mn ,
292
+ extern void mmu_notifier_unregister (struct mmu_notifier * subscription ,
291
293
struct mm_struct * mm );
292
294
293
- unsigned long mmu_interval_read_begin (struct mmu_interval_notifier * mni );
294
- int mmu_interval_notifier_insert (struct mmu_interval_notifier * mni ,
295
+ unsigned long
296
+ mmu_interval_read_begin (struct mmu_interval_notifier * interval_sub );
297
+ int mmu_interval_notifier_insert (struct mmu_interval_notifier * interval_sub ,
295
298
struct mm_struct * mm , unsigned long start ,
296
299
unsigned long length ,
297
300
const struct mmu_interval_notifier_ops * ops );
298
301
int mmu_interval_notifier_insert_locked (
299
- struct mmu_interval_notifier * mni , struct mm_struct * mm ,
302
+ struct mmu_interval_notifier * interval_sub , struct mm_struct * mm ,
300
303
unsigned long start , unsigned long length ,
301
304
const struct mmu_interval_notifier_ops * ops );
302
- void mmu_interval_notifier_remove (struct mmu_interval_notifier * mni );
305
+ void mmu_interval_notifier_remove (struct mmu_interval_notifier * interval_sub );
303
306
304
307
/**
305
308
* mmu_interval_set_seq - Save the invalidation sequence
306
- * @mni - The mni passed to invalidate
309
+ * @interval_sub - The subscription passed to invalidate
307
310
* @cur_seq - The cur_seq passed to the invalidate() callback
308
311
*
309
312
* This must be called unconditionally from the invalidate callback of a
@@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
314
317
* If the caller does not call mmu_interval_read_begin() or
315
318
* mmu_interval_read_retry() then this call is not required.
316
319
*/
317
- static inline void mmu_interval_set_seq (struct mmu_interval_notifier * mni ,
318
- unsigned long cur_seq )
320
+ static inline void
321
+ mmu_interval_set_seq (struct mmu_interval_notifier * interval_sub ,
322
+ unsigned long cur_seq )
319
323
{
320
- WRITE_ONCE (mni -> invalidate_seq , cur_seq );
324
+ WRITE_ONCE (interval_sub -> invalidate_seq , cur_seq );
321
325
}
322
326
323
327
/**
324
328
* mmu_interval_read_retry - End a read side critical section against a VA range
325
- * mni : The range
329
+ * interval_sub : The subscription
326
330
* seq: The return of the paired mmu_interval_read_begin()
327
331
*
328
332
* This MUST be called under a user provided lock that is also held
@@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
334
338
* Returns true if an invalidation collided with this critical section, and
335
339
* the caller should retry.
336
340
*/
337
- static inline bool mmu_interval_read_retry (struct mmu_interval_notifier * mni ,
338
- unsigned long seq )
341
+ static inline bool
342
+ mmu_interval_read_retry (struct mmu_interval_notifier * interval_sub ,
343
+ unsigned long seq )
339
344
{
340
- return mni -> invalidate_seq != seq ;
345
+ return interval_sub -> invalidate_seq != seq ;
341
346
}
342
347
343
348
/**
344
349
* mmu_interval_check_retry - Test if a collision has occurred
345
- * mni : The range
350
+ * interval_sub : The subscription
346
351
* seq: The return of the matching mmu_interval_read_begin()
347
352
*
348
353
* This can be used in the critical section between mmu_interval_read_begin()
@@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
357
362
* This call can be used as part of loops and other expensive operations to
358
363
* expedite a retry.
359
364
*/
360
- static inline bool mmu_interval_check_retry (struct mmu_interval_notifier * mni ,
361
- unsigned long seq )
365
+ static inline bool
366
+ mmu_interval_check_retry (struct mmu_interval_notifier * interval_sub ,
367
+ unsigned long seq )
362
368
{
363
369
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
364
- return READ_ONCE (mni -> invalidate_seq ) != seq ;
370
+ return READ_ONCE (interval_sub -> invalidate_seq ) != seq ;
365
371
}
366
372
367
- extern void __mmu_notifier_mm_destroy (struct mm_struct * mm );
373
+ extern void __mmu_notifier_subscriptions_destroy (struct mm_struct * mm );
368
374
extern void __mmu_notifier_release (struct mm_struct * mm );
369
375
extern int __mmu_notifier_clear_flush_young (struct mm_struct * mm ,
370
376
unsigned long start ,
@@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
480
486
__mmu_notifier_invalidate_range (mm , start , end );
481
487
}
482
488
483
- static inline void mmu_notifier_mm_init (struct mm_struct * mm )
489
+ static inline void mmu_notifier_subscriptions_init (struct mm_struct * mm )
484
490
{
485
- mm -> mmu_notifier_mm = NULL ;
491
+ mm -> notifier_subscriptions = NULL ;
486
492
}
487
493
488
- static inline void mmu_notifier_mm_destroy (struct mm_struct * mm )
494
+ static inline void mmu_notifier_subscriptions_destroy (struct mm_struct * mm )
489
495
{
490
496
if (mm_has_notifiers (mm ))
491
- __mmu_notifier_mm_destroy (mm );
497
+ __mmu_notifier_subscriptions_destroy (mm );
492
498
}
493
499
494
500
@@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
692
698
{
693
699
}
694
700
695
- static inline void mmu_notifier_mm_init (struct mm_struct * mm )
701
+ static inline void mmu_notifier_subscriptions_init (struct mm_struct * mm )
696
702
{
697
703
}
698
704
699
- static inline void mmu_notifier_mm_destroy (struct mm_struct * mm )
705
+ static inline void mmu_notifier_subscriptions_destroy (struct mm_struct * mm )
700
706
{
701
707
}
702
708
0 commit comments