Skip to content

Commit 5d6527a

Browse files
Jérôme Glissetorvalds
authored andcommitted
mm/mmu_notifier: use structure for invalidate_range_start/end callback
Patch series "mmu notifier contextual informations", v2. This patchset adds contextual information, why an invalidation is happening, to mmu notifier callback. This is necessary for user of mmu notifier that wish to maintains their own data structure without having to add new fields to struct vm_area_struct (vma). For instance device can have they own page table that mirror the process address space. When a vma is unmap (munmap() syscall) the device driver can free the device page table for the range. Today we do not have any information on why a mmu notifier call back is happening and thus device driver have to assume that it is always an munmap(). This is inefficient at it means that it needs to re-allocate device page table on next page fault and rebuild the whole device driver data structure for the range. Other use case beside munmap() also exist, for instance it is pointless for device driver to invalidate the device page table when the invalidation is for the soft dirtyness tracking. Or device driver can optimize away mprotect() that change the page table permission access for the range. This patchset enables all this optimizations for device drivers. I do not include any of those in this series but another patchset I am posting will leverage this. The patchset is pretty simple from a code point of view. The first two patches consolidate all mmu notifier arguments into a struct so that it is easier to add/change arguments. The last patch adds the contextual information (munmap, protection, soft dirty, clear, ...). This patch (of 3): To avoid having to change many callback definition everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end callback. No functional changes with this patch. [[email protected]: fix drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c kerneldoc] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jérôme Glisse <[email protected]> Acked-by: Jan Kara <[email protected]> Acked-by: Jason Gunthorpe <[email protected]> [infiniband] Cc: Matthew Wilcox <[email protected]> Cc: Ross Zwisler <[email protected]> Cc: Dan Williams <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Radim Krcmar <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Christian Koenig <[email protected]> Cc: Felix Kuehling <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: John Hubbard <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b15c872 commit 5d6527a

File tree

12 files changed

+103
-116
lines changed

12 files changed

+103
-116
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

Lines changed: 20 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
238238
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
239239
*
240240
* @mn: our notifier
241-
* @mm: the mm this callback is about
242-
* @start: start of updated range
243-
* @end: end of updated range
241+
* @range: mmu notifier context
244242
*
245243
* Block for operations on BOs to finish and mark pages as accessed and
246244
* potentially dirty.
247245
*/
248246
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
249-
struct mm_struct *mm,
250-
unsigned long start,
251-
unsigned long end,
252-
bool blockable)
247+
const struct mmu_notifier_range *range)
253248
{
254249
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
255250
struct interval_tree_node *it;
251+
unsigned long end;
256252

257253
/* notification is exclusive, but interval is inclusive */
258-
end -= 1;
254+
end = range->end - 1;
259255

260256
/* TODO we should be able to split locking for interval tree and
261257
* amdgpu_mn_invalidate_node
262258
*/
263-
if (amdgpu_mn_read_lock(amn, blockable))
259+
if (amdgpu_mn_read_lock(amn, range->blockable))
264260
return -EAGAIN;
265261

266-
it = interval_tree_iter_first(&amn->objects, start, end);
262+
it = interval_tree_iter_first(&amn->objects, range->start, end);
267263
while (it) {
268264
struct amdgpu_mn_node *node;
269265

270-
if (!blockable) {
266+
if (!range->blockable) {
271267
amdgpu_mn_read_unlock(amn);
272268
return -EAGAIN;
273269
}
274270

275271
node = container_of(it, struct amdgpu_mn_node, it);
276-
it = interval_tree_iter_next(it, start, end);
272+
it = interval_tree_iter_next(it, range->start, end);
277273

278-
amdgpu_mn_invalidate_node(node, start, end);
274+
amdgpu_mn_invalidate_node(node, range->start, end);
279275
}
280276

281277
return 0;
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
294290
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
295291
*/
296292
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
297-
struct mm_struct *mm,
298-
unsigned long start,
299-
unsigned long end,
300-
bool blockable)
293+
const struct mmu_notifier_range *range)
301294
{
302295
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
303296
struct interval_tree_node *it;
297+
unsigned long end;
304298

305299
/* notification is exclusive, but interval is inclusive */
306-
end -= 1;
300+
end = range->end - 1;
307301

308-
if (amdgpu_mn_read_lock(amn, blockable))
302+
if (amdgpu_mn_read_lock(amn, range->blockable))
309303
return -EAGAIN;
310304

311-
it = interval_tree_iter_first(&amn->objects, start, end);
305+
it = interval_tree_iter_first(&amn->objects, range->start, end);
312306
while (it) {
313307
struct amdgpu_mn_node *node;
314308
struct amdgpu_bo *bo;
315309

316-
if (!blockable) {
310+
if (!range->blockable) {
317311
amdgpu_mn_read_unlock(amn);
318312
return -EAGAIN;
319313
}
320314

321315
node = container_of(it, struct amdgpu_mn_node, it);
322-
it = interval_tree_iter_next(it, start, end);
316+
it = interval_tree_iter_next(it, range->start, end);
323317

324318
list_for_each_entry(bo, &node->bos, mn_list) {
325319
struct kgd_mem *mem = bo->kfd_bo;
326320

327321
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
328-
start, end))
329-
amdgpu_amdkfd_evict_userptr(mem, mm);
322+
range->start,
323+
end))
324+
amdgpu_amdkfd_evict_userptr(mem, range->mm);
330325
}
331326
}
332327

@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
344339
* Release the lock again to allow new command submissions.
345340
*/
346341
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
347-
struct mm_struct *mm,
348-
unsigned long start,
349-
unsigned long end)
342+
const struct mmu_notifier_range *range)
350343
{
351344
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
352345

drivers/gpu/drm/i915/i915_gem_userptr.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo)
113113
}
114114

115115
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116-
struct mm_struct *mm,
117-
unsigned long start,
118-
unsigned long end,
119-
bool blockable)
116+
const struct mmu_notifier_range *range)
120117
{
121118
struct i915_mmu_notifier *mn =
122119
container_of(_mn, struct i915_mmu_notifier, mn);
123120
struct i915_mmu_object *mo;
124121
struct interval_tree_node *it;
125122
LIST_HEAD(cancelled);
123+
unsigned long end;
126124

127125
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
128126
return 0;
129127

130128
/* interval ranges are inclusive, but invalidate range is exclusive */
131-
end--;
129+
end = range->end - 1;
132130

133131
spin_lock(&mn->lock);
134-
it = interval_tree_iter_first(&mn->objects, start, end);
132+
it = interval_tree_iter_first(&mn->objects, range->start, end);
135133
while (it) {
136-
if (!blockable) {
134+
if (!range->blockable) {
137135
spin_unlock(&mn->lock);
138136
return -EAGAIN;
139137
}
@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
151149
queue_work(mn->wq, &mo->work);
152150

153151
list_add(&mo->link, &cancelled);
154-
it = interval_tree_iter_next(it, start, end);
152+
it = interval_tree_iter_next(it, range->start, end);
155153
}
156154
list_for_each_entry(mo, &cancelled, link)
157155
del_object(mo);

drivers/gpu/drm/radeon/radeon_mn.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
119119
* unmap them by move them into system domain again.
120120
*/
121121
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
122-
struct mm_struct *mm,
123-
unsigned long start,
124-
unsigned long end,
125-
bool blockable)
122+
const struct mmu_notifier_range *range)
126123
{
127124
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
128125
struct ttm_operation_ctx ctx = { false, false };
129126
struct interval_tree_node *it;
127+
unsigned long end;
130128
int ret = 0;
131129

132130
/* notification is exclusive, but interval is inclusive */
133-
end -= 1;
131+
end = range->end - 1;
134132

135133
/* TODO we should be able to split locking for interval tree and
136134
* the tear down.
137135
*/
138-
if (blockable)
136+
if (range->blockable)
139137
mutex_lock(&rmn->lock);
140138
else if (!mutex_trylock(&rmn->lock))
141139
return -EAGAIN;
142140

143-
it = interval_tree_iter_first(&rmn->objects, start, end);
141+
it = interval_tree_iter_first(&rmn->objects, range->start, end);
144142
while (it) {
145143
struct radeon_mn_node *node;
146144
struct radeon_bo *bo;
147145
long r;
148146

149-
if (!blockable) {
147+
if (!range->blockable) {
150148
ret = -EAGAIN;
151149
goto out_unlock;
152150
}
153151

154152
node = container_of(it, struct radeon_mn_node, it);
155-
it = interval_tree_iter_next(it, start, end);
153+
it = interval_tree_iter_next(it, range->start, end);
156154

157155
list_for_each_entry(bo, &node->bos, mn_list) {
158156

drivers/infiniband/core/umem_odp.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -146,15 +146,12 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
146146
}
147147

148148
static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
149-
struct mm_struct *mm,
150-
unsigned long start,
151-
unsigned long end,
152-
bool blockable)
149+
const struct mmu_notifier_range *range)
153150
{
154151
struct ib_ucontext_per_mm *per_mm =
155152
container_of(mn, struct ib_ucontext_per_mm, mn);
156153

157-
if (blockable)
154+
if (range->blockable)
158155
down_read(&per_mm->umem_rwsem);
159156
else if (!down_read_trylock(&per_mm->umem_rwsem))
160157
return -EAGAIN;
@@ -169,9 +166,10 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
169166
return 0;
170167
}
171168

172-
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
169+
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
170+
range->end,
173171
invalidate_range_start_trampoline,
174-
blockable, NULL);
172+
range->blockable, NULL);
175173
}
176174

177175
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
@@ -182,18 +180,16 @@ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
182180
}
183181

184182
static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
185-
struct mm_struct *mm,
186-
unsigned long start,
187-
unsigned long end)
183+
const struct mmu_notifier_range *range)
188184
{
189185
struct ib_ucontext_per_mm *per_mm =
190186
container_of(mn, struct ib_ucontext_per_mm, mn);
191187

192188
if (unlikely(!per_mm->active))
193189
return;
194190

195-
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
196-
end,
191+
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
192+
range->end,
197193
invalidate_range_end_trampoline, true, NULL);
198194
up_read(&per_mm->umem_rwsem);
199195
}

drivers/infiniband/hw/hfi1/mmu_rb.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,7 @@ struct mmu_rb_handler {
6868
static unsigned long mmu_node_start(struct mmu_rb_node *);
6969
static unsigned long mmu_node_last(struct mmu_rb_node *);
7070
static int mmu_notifier_range_start(struct mmu_notifier *,
71-
struct mm_struct *,
72-
unsigned long, unsigned long, bool);
71+
const struct mmu_notifier_range *);
7372
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
7473
unsigned long, unsigned long);
7574
static void do_remove(struct mmu_rb_handler *handler,
@@ -284,10 +283,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
284283
}
285284

286285
static int mmu_notifier_range_start(struct mmu_notifier *mn,
287-
struct mm_struct *mm,
288-
unsigned long start,
289-
unsigned long end,
290-
bool blockable)
286+
const struct mmu_notifier_range *range)
291287
{
292288
struct mmu_rb_handler *handler =
293289
container_of(mn, struct mmu_rb_handler, mn);
@@ -297,10 +293,11 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
297293
bool added = false;
298294

299295
spin_lock_irqsave(&handler->lock, flags);
300-
for (node = __mmu_int_rb_iter_first(root, start, end - 1);
296+
for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
301297
node; node = ptr) {
302298
/* Guard against node removal. */
303-
ptr = __mmu_int_rb_iter_next(node, start, end - 1);
299+
ptr = __mmu_int_rb_iter_next(node, range->start,
300+
range->end - 1);
304301
trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
305302
if (handler->ops->invalidate(handler->ops_arg, node)) {
306303
__mmu_int_rb_remove(node, root);

drivers/misc/mic/scif/scif_dma.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -201,23 +201,18 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
201201
}
202202

203203
static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
204-
struct mm_struct *mm,
205-
unsigned long start,
206-
unsigned long end,
207-
bool blockable)
204+
const struct mmu_notifier_range *range)
208205
{
209206
struct scif_mmu_notif *mmn;
210207

211208
mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
212-
scif_rma_destroy_tcw(mmn, start, end - start);
209+
scif_rma_destroy_tcw(mmn, range->start, range->end - range->start);
213210

214211
return 0;
215212
}
216213

217214
static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
218-
struct mm_struct *mm,
219-
unsigned long start,
220-
unsigned long end)
215+
const struct mmu_notifier_range *range)
221216
{
222217
/*
223218
* Nothing to do here, everything needed was done in

drivers/misc/sgi-gru/grutlbpurge.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -220,25 +220,22 @@ void gru_flush_all_tlb(struct gru_state *gru)
220220
* MMUOPS notifier callout functions
221221
*/
222222
static int gru_invalidate_range_start(struct mmu_notifier *mn,
223-
struct mm_struct *mm,
224-
unsigned long start, unsigned long end,
225-
bool blockable)
223+
const struct mmu_notifier_range *range)
226224
{
227225
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
228226
ms_notifier);
229227

230228
STAT(mmu_invalidate_range);
231229
atomic_inc(&gms->ms_range_active);
232230
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
233-
start, end, atomic_read(&gms->ms_range_active));
234-
gru_flush_tlb_range(gms, start, end - start);
231+
range->start, range->end, atomic_read(&gms->ms_range_active));
232+
gru_flush_tlb_range(gms, range->start, range->end - range->start);
235233

236234
return 0;
237235
}
238236

239237
static void gru_invalidate_range_end(struct mmu_notifier *mn,
240-
struct mm_struct *mm, unsigned long start,
241-
unsigned long end)
238+
const struct mmu_notifier_range *range)
242239
{
243240
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
244241
ms_notifier);
@@ -247,7 +244,8 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
247244
(void)atomic_dec_and_test(&gms->ms_range_active);
248245

249246
wake_up_all(&gms->ms_wait_queue);
250-
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
247+
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n",
248+
gms, range->start, range->end);
251249
}
252250

253251
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)

drivers/xen/gntdev.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -520,26 +520,26 @@ static int unmap_if_in_range(struct gntdev_grant_map *map,
520520
}
521521

522522
static int mn_invl_range_start(struct mmu_notifier *mn,
523-
struct mm_struct *mm,
524-
unsigned long start, unsigned long end,
525-
bool blockable)
523+
const struct mmu_notifier_range *range)
526524
{
527525
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
528526
struct gntdev_grant_map *map;
529527
int ret = 0;
530528

531-
if (blockable)
529+
if (range->blockable)
532530
mutex_lock(&priv->lock);
533531
else if (!mutex_trylock(&priv->lock))
534532
return -EAGAIN;
535533

536534
list_for_each_entry(map, &priv->maps, next) {
537-
ret = unmap_if_in_range(map, start, end, blockable);
535+
ret = unmap_if_in_range(map, range->start, range->end,
536+
range->blockable);
538537
if (ret)
539538
goto out_unlock;
540539
}
541540
list_for_each_entry(map, &priv->freeable_maps, next) {
542-
ret = unmap_if_in_range(map, start, end, blockable);
541+
ret = unmap_if_in_range(map, range->start, range->end,
542+
range->blockable);
543543
if (ret)
544544
goto out_unlock;
545545
}

0 commit comments

Comments
 (0)