@@ -25,18 +25,35 @@ struct arm_smmu_mmu_notifier {
25
25
#define mn_to_smmu (mn ) container_of(mn, struct arm_smmu_mmu_notifier, mn)
26
26
27
27
struct arm_smmu_bond {
28
- struct iommu_sva sva ;
29
28
struct mm_struct * mm ;
30
29
struct arm_smmu_mmu_notifier * smmu_mn ;
31
30
struct list_head list ;
32
- refcount_t refs ;
33
31
};
34
32
35
33
#define sva_to_bond (handle ) \
36
34
container_of(handle, struct arm_smmu_bond, sva)
37
35
38
36
static DEFINE_MUTEX (sva_lock );
39
37
38
+ /*
39
+ * Write the CD to the CD tables for all masters that this domain is attached
40
+ * to. Note that this is only used to update existing CD entries in the target
41
+ * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
42
+ */
43
+ static void arm_smmu_update_ctx_desc_devices (struct arm_smmu_domain * smmu_domain ,
44
+ int ssid ,
45
+ struct arm_smmu_ctx_desc * cd )
46
+ {
47
+ struct arm_smmu_master * master ;
48
+ unsigned long flags ;
49
+
50
+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
51
+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
52
+ arm_smmu_write_ctx_desc (master , ssid , cd );
53
+ }
54
+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
55
+ }
56
+
40
57
/*
41
58
* Check if the CPU ASID is available on the SMMU side. If a private context
42
59
* descriptor is using it, try to replace it.
@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
62
79
return cd ;
63
80
}
64
81
65
- smmu_domain = container_of (cd , struct arm_smmu_domain , s1_cfg . cd );
82
+ smmu_domain = container_of (cd , struct arm_smmu_domain , cd );
66
83
smmu = smmu_domain -> smmu ;
67
84
68
85
ret = xa_alloc (& arm_smmu_asid_xa , & new_asid , cd ,
@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
80
97
* be some overlap between use of both ASIDs, until we invalidate the
81
98
* TLB.
82
99
*/
83
- arm_smmu_write_ctx_desc (smmu_domain , IOMMU_NO_PASID , cd );
100
+ arm_smmu_update_ctx_desc_devices (smmu_domain , IOMMU_NO_PASID , cd );
84
101
85
102
/* Invalidate TLB entries previously associated with that context */
86
103
arm_smmu_tlb_inv_asid (smmu , asid );
@@ -186,6 +203,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
186
203
}
187
204
}
188
205
206
+ /*
207
+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
208
+ * is used as a threshold to replace per-page TLBI commands to issue in the
209
+ * command queue with an address-space TLBI command, when SMMU w/o a range
210
+ * invalidation feature handles too many per-page TLBI commands, which will
211
+ * otherwise result in a soft lockup.
212
+ */
213
+ #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
214
+
189
215
static void arm_smmu_mm_arch_invalidate_secondary_tlbs (struct mmu_notifier * mn ,
190
216
struct mm_struct * mm ,
191
217
unsigned long start ,
@@ -201,8 +227,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
201
227
* range. So do a simple translation here by calculating size correctly.
202
228
*/
203
229
size = end - start ;
204
- if (size == ULONG_MAX )
205
- size = 0 ;
230
+ if (!(smmu_domain -> smmu -> features & ARM_SMMU_FEAT_RANGE_INV )) {
231
+ if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE )
232
+ size = 0 ;
233
+ } else {
234
+ if (size == ULONG_MAX )
235
+ size = 0 ;
236
+ }
206
237
207
238
if (!(smmu_domain -> smmu -> features & ARM_SMMU_FEAT_BTM )) {
208
239
if (!size )
@@ -233,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
233
264
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
234
265
* but disable translation.
235
266
*/
236
- arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , & quiet_cd );
267
+ arm_smmu_update_ctx_desc_devices (smmu_domain , mm -> pasid , & quiet_cd );
237
268
238
269
arm_smmu_tlb_inv_asid (smmu_domain -> smmu , smmu_mn -> cd -> asid );
239
270
arm_smmu_atc_inv_domain (smmu_domain , mm -> pasid , 0 , 0 );
@@ -259,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
259
290
struct mm_struct * mm )
260
291
{
261
292
int ret ;
293
+ unsigned long flags ;
262
294
struct arm_smmu_ctx_desc * cd ;
263
295
struct arm_smmu_mmu_notifier * smmu_mn ;
296
+ struct arm_smmu_master * master ;
264
297
265
298
list_for_each_entry (smmu_mn , & smmu_domain -> mmu_notifiers , list ) {
266
299
if (smmu_mn -> mn .mm == mm ) {
@@ -290,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
290
323
goto err_free_cd ;
291
324
}
292
325
293
- ret = arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , cd );
326
+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
327
+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
328
+ ret = arm_smmu_write_ctx_desc (master , mm -> pasid , cd );
329
+ if (ret ) {
330
+ list_for_each_entry_from_reverse (master , & smmu_domain -> devices , domain_head )
331
+ arm_smmu_write_ctx_desc (master , mm -> pasid , NULL );
332
+ break ;
333
+ }
334
+ }
335
+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
294
336
if (ret )
295
337
goto err_put_notifier ;
296
338
@@ -315,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
315
357
return ;
316
358
317
359
list_del (& smmu_mn -> list );
318
- arm_smmu_write_ctx_desc (smmu_domain , mm -> pasid , NULL );
360
+
361
+ arm_smmu_update_ctx_desc_devices (smmu_domain , mm -> pasid , NULL );
319
362
320
363
/*
321
364
* If we went through clear(), we've already invalidated, and no
@@ -331,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
331
374
arm_smmu_free_shared_cd (cd );
332
375
}
333
376
334
- static struct iommu_sva *
335
- __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
377
+ static int __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
336
378
{
337
379
int ret ;
338
380
struct arm_smmu_bond * bond ;
@@ -341,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
341
383
struct arm_smmu_domain * smmu_domain = to_smmu_domain (domain );
342
384
343
385
if (!master || !master -> sva_enabled )
344
- return ERR_PTR (- ENODEV );
345
-
346
- /* If bind() was already called for this {dev, mm} pair, reuse it. */
347
- list_for_each_entry (bond , & master -> bonds , list ) {
348
- if (bond -> mm == mm ) {
349
- refcount_inc (& bond -> refs );
350
- return & bond -> sva ;
351
- }
352
- }
386
+ return - ENODEV ;
353
387
354
388
bond = kzalloc (sizeof (* bond ), GFP_KERNEL );
355
389
if (!bond )
356
- return ERR_PTR ( - ENOMEM ) ;
390
+ return - ENOMEM ;
357
391
358
392
bond -> mm = mm ;
359
- bond -> sva .dev = dev ;
360
- refcount_set (& bond -> refs , 1 );
361
393
362
394
bond -> smmu_mn = arm_smmu_mmu_notifier_get (smmu_domain , mm );
363
395
if (IS_ERR (bond -> smmu_mn )) {
@@ -366,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
366
398
}
367
399
368
400
list_add (& bond -> list , & master -> bonds );
369
- return & bond -> sva ;
401
+ return 0 ;
370
402
371
403
err_free_bond :
372
404
kfree (bond );
373
- return ERR_PTR ( ret ) ;
405
+ return ret ;
374
406
}
375
407
376
408
bool arm_smmu_sva_supported (struct arm_smmu_device * smmu )
@@ -536,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
536
568
}
537
569
}
538
570
539
- if (!WARN_ON (!bond ) && refcount_dec_and_test ( & bond -> refs ) ) {
571
+ if (!WARN_ON (!bond )) {
540
572
list_del (& bond -> list );
541
573
arm_smmu_mmu_notifier_put (bond -> smmu_mn );
542
574
kfree (bond );
@@ -548,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
548
580
struct device * dev , ioasid_t id )
549
581
{
550
582
int ret = 0 ;
551
- struct iommu_sva * handle ;
552
583
struct mm_struct * mm = domain -> mm ;
553
584
554
585
mutex_lock (& sva_lock );
555
- handle = __arm_smmu_sva_bind (dev , mm );
556
- if (IS_ERR (handle ))
557
- ret = PTR_ERR (handle );
586
+ ret = __arm_smmu_sva_bind (dev , mm );
558
587
mutex_unlock (& sva_lock );
559
588
560
589
return ret ;
0 commit comments