@@ -909,8 +909,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
909
909
910
910
spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
911
911
list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
912
- for (i = 0 ; i < master -> num_sids ; i ++ ) {
913
- cmd .cfgi .sid = master -> sids [i ];
912
+ for (i = 0 ; i < master -> num_streams ; i ++ ) {
913
+ cmd .cfgi .sid = master -> streams [i ]. id ;
914
914
arm_smmu_cmdq_batch_add (smmu , & cmds , & cmd );
915
915
}
916
916
}
@@ -1355,6 +1355,29 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1355
1355
return 0 ;
1356
1356
}
1357
1357
1358
+ __maybe_unused
1359
+ static struct arm_smmu_master *
1360
+ arm_smmu_find_master (struct arm_smmu_device * smmu , u32 sid )
1361
+ {
1362
+ struct rb_node * node ;
1363
+ struct arm_smmu_stream * stream ;
1364
+
1365
+ lockdep_assert_held (& smmu -> streams_mutex );
1366
+
1367
+ node = smmu -> streams .rb_node ;
1368
+ while (node ) {
1369
+ stream = rb_entry (node , struct arm_smmu_stream , node );
1370
+ if (stream -> id < sid )
1371
+ node = node -> rb_right ;
1372
+ else if (stream -> id > sid )
1373
+ node = node -> rb_left ;
1374
+ else
1375
+ return stream -> master ;
1376
+ }
1377
+
1378
+ return NULL ;
1379
+ }
1380
+
1358
1381
/* IRQ and event handlers */
1359
1382
static irqreturn_t arm_smmu_evtq_thread (int irq , void * dev )
1360
1383
{
@@ -1588,8 +1611,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
1588
1611
1589
1612
arm_smmu_atc_inv_to_cmd (0 , 0 , 0 , & cmd );
1590
1613
1591
- for (i = 0 ; i < master -> num_sids ; i ++ ) {
1592
- cmd .atc .sid = master -> sids [i ];
1614
+ for (i = 0 ; i < master -> num_streams ; i ++ ) {
1615
+ cmd .atc .sid = master -> streams [i ]. id ;
1593
1616
arm_smmu_cmdq_issue_cmd (master -> smmu , & cmd );
1594
1617
}
1595
1618
@@ -1632,8 +1655,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
1632
1655
if (!master -> ats_enabled )
1633
1656
continue ;
1634
1657
1635
- for (i = 0 ; i < master -> num_sids ; i ++ ) {
1636
- cmd .atc .sid = master -> sids [i ];
1658
+ for (i = 0 ; i < master -> num_streams ; i ++ ) {
1659
+ cmd .atc .sid = master -> streams [i ]. id ;
1637
1660
arm_smmu_cmdq_batch_add (smmu_domain -> smmu , & cmds , & cmd );
1638
1661
}
1639
1662
}
@@ -2065,13 +2088,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
2065
2088
int i , j ;
2066
2089
struct arm_smmu_device * smmu = master -> smmu ;
2067
2090
2068
- for (i = 0 ; i < master -> num_sids ; ++ i ) {
2069
- u32 sid = master -> sids [i ];
2091
+ for (i = 0 ; i < master -> num_streams ; ++ i ) {
2092
+ u32 sid = master -> streams [i ]. id ;
2070
2093
__le64 * step = arm_smmu_get_step_for_sid (smmu , sid );
2071
2094
2072
2095
/* Bridged PCI devices may end up with duplicated IDs */
2073
2096
for (j = 0 ; j < i ; j ++ )
2074
- if (master -> sids [j ] == sid )
2097
+ if (master -> streams [j ]. id == sid )
2075
2098
break ;
2076
2099
if (j < i )
2077
2100
continue ;
@@ -2345,11 +2368,101 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
2345
2368
return sid < limit ;
2346
2369
}
2347
2370
2371
+ static int arm_smmu_insert_master (struct arm_smmu_device * smmu ,
2372
+ struct arm_smmu_master * master )
2373
+ {
2374
+ int i ;
2375
+ int ret = 0 ;
2376
+ struct arm_smmu_stream * new_stream , * cur_stream ;
2377
+ struct rb_node * * new_node , * parent_node = NULL ;
2378
+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (master -> dev );
2379
+
2380
+ master -> streams = kcalloc (fwspec -> num_ids , sizeof (* master -> streams ),
2381
+ GFP_KERNEL );
2382
+ if (!master -> streams )
2383
+ return - ENOMEM ;
2384
+ master -> num_streams = fwspec -> num_ids ;
2385
+
2386
+ mutex_lock (& smmu -> streams_mutex );
2387
+ for (i = 0 ; i < fwspec -> num_ids ; i ++ ) {
2388
+ u32 sid = fwspec -> ids [i ];
2389
+
2390
+ new_stream = & master -> streams [i ];
2391
+ new_stream -> id = sid ;
2392
+ new_stream -> master = master ;
2393
+
2394
+ /*
2395
+ * Check the SIDs are in range of the SMMU and our stream table
2396
+ */
2397
+ if (!arm_smmu_sid_in_range (smmu , sid )) {
2398
+ ret = - ERANGE ;
2399
+ break ;
2400
+ }
2401
+
2402
+ /* Ensure l2 strtab is initialised */
2403
+ if (smmu -> features & ARM_SMMU_FEAT_2_LVL_STRTAB ) {
2404
+ ret = arm_smmu_init_l2_strtab (smmu , sid );
2405
+ if (ret )
2406
+ break ;
2407
+ }
2408
+
2409
+ /* Insert into SID tree */
2410
+ new_node = & (smmu -> streams .rb_node );
2411
+ while (* new_node ) {
2412
+ cur_stream = rb_entry (* new_node , struct arm_smmu_stream ,
2413
+ node );
2414
+ parent_node = * new_node ;
2415
+ if (cur_stream -> id > new_stream -> id ) {
2416
+ new_node = & ((* new_node )-> rb_left );
2417
+ } else if (cur_stream -> id < new_stream -> id ) {
2418
+ new_node = & ((* new_node )-> rb_right );
2419
+ } else {
2420
+ dev_warn (master -> dev ,
2421
+ "stream %u already in tree\n" ,
2422
+ cur_stream -> id );
2423
+ ret = - EINVAL ;
2424
+ break ;
2425
+ }
2426
+ }
2427
+ if (ret )
2428
+ break ;
2429
+
2430
+ rb_link_node (& new_stream -> node , parent_node , new_node );
2431
+ rb_insert_color (& new_stream -> node , & smmu -> streams );
2432
+ }
2433
+
2434
+ if (ret ) {
2435
+ for (i -- ; i >= 0 ; i -- )
2436
+ rb_erase (& master -> streams [i ].node , & smmu -> streams );
2437
+ kfree (master -> streams );
2438
+ }
2439
+ mutex_unlock (& smmu -> streams_mutex );
2440
+
2441
+ return ret ;
2442
+ }
2443
+
2444
+ static void arm_smmu_remove_master (struct arm_smmu_master * master )
2445
+ {
2446
+ int i ;
2447
+ struct arm_smmu_device * smmu = master -> smmu ;
2448
+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (master -> dev );
2449
+
2450
+ if (!smmu || !master -> streams )
2451
+ return ;
2452
+
2453
+ mutex_lock (& smmu -> streams_mutex );
2454
+ for (i = 0 ; i < fwspec -> num_ids ; i ++ )
2455
+ rb_erase (& master -> streams [i ].node , & smmu -> streams );
2456
+ mutex_unlock (& smmu -> streams_mutex );
2457
+
2458
+ kfree (master -> streams );
2459
+ }
2460
+
2348
2461
static struct iommu_ops arm_smmu_ops ;
2349
2462
2350
2463
static struct iommu_device * arm_smmu_probe_device (struct device * dev )
2351
2464
{
2352
- int i , ret ;
2465
+ int ret ;
2353
2466
struct arm_smmu_device * smmu ;
2354
2467
struct arm_smmu_master * master ;
2355
2468
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (dev );
@@ -2370,27 +2483,12 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
2370
2483
2371
2484
master -> dev = dev ;
2372
2485
master -> smmu = smmu ;
2373
- master -> sids = fwspec -> ids ;
2374
- master -> num_sids = fwspec -> num_ids ;
2375
2486
INIT_LIST_HEAD (& master -> bonds );
2376
2487
dev_iommu_priv_set (dev , master );
2377
2488
2378
- /* Check the SIDs are in range of the SMMU and our stream table */
2379
- for (i = 0 ; i < master -> num_sids ; i ++ ) {
2380
- u32 sid = master -> sids [i ];
2381
-
2382
- if (!arm_smmu_sid_in_range (smmu , sid )) {
2383
- ret = - ERANGE ;
2384
- goto err_free_master ;
2385
- }
2386
-
2387
- /* Ensure l2 strtab is initialised */
2388
- if (smmu -> features & ARM_SMMU_FEAT_2_LVL_STRTAB ) {
2389
- ret = arm_smmu_init_l2_strtab (smmu , sid );
2390
- if (ret )
2391
- goto err_free_master ;
2392
- }
2393
- }
2489
+ ret = arm_smmu_insert_master (smmu , master );
2490
+ if (ret )
2491
+ goto err_free_master ;
2394
2492
2395
2493
device_property_read_u32 (dev , "pasid-num-bits" , & master -> ssid_bits );
2396
2494
master -> ssid_bits = min (smmu -> ssid_bits , master -> ssid_bits );
@@ -2429,6 +2527,7 @@ static void arm_smmu_release_device(struct device *dev)
2429
2527
WARN_ON (arm_smmu_master_sva_enabled (master ));
2430
2528
arm_smmu_detach_dev (master );
2431
2529
arm_smmu_disable_pasid (master );
2530
+ arm_smmu_remove_master (master );
2432
2531
kfree (master );
2433
2532
iommu_fwspec_free (dev );
2434
2533
}
@@ -2852,6 +2951,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2852
2951
{
2853
2952
int ret ;
2854
2953
2954
+ mutex_init (& smmu -> streams_mutex );
2955
+ smmu -> streams = RB_ROOT ;
2956
+
2855
2957
ret = arm_smmu_init_queues (smmu );
2856
2958
if (ret )
2857
2959
return ret ;
0 commit comments