@@ -347,58 +347,140 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
347
347
}
348
348
}
349
349
350
- static int __show_device_domain_translation (struct device * dev , void * data )
350
+ static int domain_translation_struct_show (struct seq_file * m ,
351
+ struct device_domain_info * info ,
352
+ ioasid_t pasid )
351
353
{
352
- struct dmar_domain * domain ;
353
- struct seq_file * m = data ;
354
- u64 path [6 ] = { 0 };
355
-
356
- domain = to_dmar_domain (iommu_get_domain_for_dev (dev ));
357
- if (!domain )
358
- return 0 ;
354
+ bool scalable , found = false;
355
+ struct dmar_drhd_unit * drhd ;
356
+ struct intel_iommu * iommu ;
357
+ u16 devfn , bus , seg ;
359
358
360
- seq_printf ( m , "Device %s @0x%llx\n" , dev_name ( dev ),
361
- ( u64 ) virt_to_phys ( domain -> pgd )) ;
362
- seq_puts ( m , "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n" ) ;
359
+ bus = info -> bus ;
360
+ devfn = info -> devfn ;
361
+ seg = info -> segment ;
363
362
364
- pgtable_walk_level (m , domain -> pgd , domain -> agaw + 2 , 0 , path );
365
- seq_putc (m , '\n' );
363
+ rcu_read_lock ();
364
+ for_each_active_iommu (iommu , drhd ) {
365
+ struct context_entry * context ;
366
+ u64 pgd , path [6 ] = { 0 };
367
+ u32 sts , agaw ;
366
368
367
- /* Don't iterate */
368
- return 1 ;
369
- }
369
+ if (seg != iommu -> segment )
370
+ continue ;
370
371
371
- static int show_device_domain_translation (struct device * dev , void * data )
372
- {
373
- struct iommu_group * group ;
372
+ sts = dmar_readl (iommu -> reg + DMAR_GSTS_REG );
373
+ if (!(sts & DMA_GSTS_TES )) {
374
+ seq_printf (m , "DMA Remapping is not enabled on %s\n" ,
375
+ iommu -> name );
376
+ continue ;
377
+ }
378
+ if (dmar_readq (iommu -> reg + DMAR_RTADDR_REG ) & DMA_RTADDR_SMT )
379
+ scalable = true;
380
+ else
381
+ scalable = false;
374
382
375
- group = iommu_group_get (dev );
376
- if (group ) {
377
383
/*
378
- * The group->mutex is held across the callback, which will
379
- * block calls to iommu_attach/detach_group/device . Hence,
384
+ * The iommu->lock is held across the callback, which will
385
+ * block calls to domain_attach/domain_detach . Hence,
380
386
* the domain of the device will not change during traversal.
381
387
*
382
- * All devices in an iommu group share a single domain, hence
383
- * we only dump the domain of the first device. Even though,
384
- * this code still possibly races with the iommu_unmap()
388
+ * Traversing page table possibly races with the iommu_unmap()
385
389
* interface. This could be solved by RCU-freeing the page
386
390
* table pages in the iommu_unmap() path.
387
391
*/
388
- iommu_group_for_each_dev (group , data ,
389
- __show_device_domain_translation );
390
- iommu_group_put (group );
392
+ spin_lock (& iommu -> lock );
393
+
394
+ context = iommu_context_addr (iommu , bus , devfn , 0 );
395
+ if (!context || !context_present (context ))
396
+ goto iommu_unlock ;
397
+
398
+ if (scalable ) { /* scalable mode */
399
+ struct pasid_entry * pasid_tbl , * pasid_tbl_entry ;
400
+ struct pasid_dir_entry * dir_tbl , * dir_entry ;
401
+ u16 dir_idx , tbl_idx , pgtt ;
402
+ u64 pasid_dir_ptr ;
403
+
404
+ pasid_dir_ptr = context -> lo & VTD_PAGE_MASK ;
405
+
406
+ /* Dump specified device domain mappings with PASID. */
407
+ dir_idx = pasid >> PASID_PDE_SHIFT ;
408
+ tbl_idx = pasid & PASID_PTE_MASK ;
409
+
410
+ dir_tbl = phys_to_virt (pasid_dir_ptr );
411
+ dir_entry = & dir_tbl [dir_idx ];
412
+
413
+ pasid_tbl = get_pasid_table_from_pde (dir_entry );
414
+ if (!pasid_tbl )
415
+ goto iommu_unlock ;
416
+
417
+ pasid_tbl_entry = & pasid_tbl [tbl_idx ];
418
+ if (!pasid_pte_is_present (pasid_tbl_entry ))
419
+ goto iommu_unlock ;
420
+
421
+ /*
422
+ * According to PASID Granular Translation Type(PGTT),
423
+ * get the page table pointer.
424
+ */
425
+ pgtt = (u16 )(pasid_tbl_entry -> val [0 ] & GENMASK_ULL (8 , 6 )) >> 6 ;
426
+ agaw = (u8 )(pasid_tbl_entry -> val [0 ] & GENMASK_ULL (4 , 2 )) >> 2 ;
427
+
428
+ switch (pgtt ) {
429
+ case PASID_ENTRY_PGTT_FL_ONLY :
430
+ pgd = pasid_tbl_entry -> val [2 ];
431
+ break ;
432
+ case PASID_ENTRY_PGTT_SL_ONLY :
433
+ case PASID_ENTRY_PGTT_NESTED :
434
+ pgd = pasid_tbl_entry -> val [0 ];
435
+ break ;
436
+ default :
437
+ goto iommu_unlock ;
438
+ }
439
+ pgd &= VTD_PAGE_MASK ;
440
+ } else { /* legacy mode */
441
+ pgd = context -> lo & VTD_PAGE_MASK ;
442
+ agaw = context -> hi & 7 ;
443
+ }
444
+
445
+ seq_printf (m , "Device %04x:%02x:%02x.%x " ,
446
+ iommu -> segment , bus , PCI_SLOT (devfn ), PCI_FUNC (devfn ));
447
+
448
+ if (scalable )
449
+ seq_printf (m , "with pasid %x @0x%llx\n" , pasid , pgd );
450
+ else
451
+ seq_printf (m , "@0x%llx\n" , pgd );
452
+
453
+ seq_printf (m , "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n" ,
454
+ "IOVA_PFN" , "PML5E" , "PML4E" , "PDPE" , "PDE" , "PTE" );
455
+ pgtable_walk_level (m , phys_to_virt (pgd ), agaw + 2 , 0 , path );
456
+
457
+ found = true;
458
+ iommu_unlock :
459
+ spin_unlock (& iommu -> lock );
460
+ if (found )
461
+ break ;
391
462
}
463
+ rcu_read_unlock ();
392
464
393
465
return 0 ;
394
466
}
395
467
396
- static int domain_translation_struct_show (struct seq_file * m , void * unused )
468
+ static int dev_domain_translation_struct_show (struct seq_file * m , void * unused )
397
469
{
398
- return bus_for_each_dev (& pci_bus_type , NULL , m ,
399
- show_device_domain_translation );
470
+ struct device_domain_info * info = (struct device_domain_info * )m -> private ;
471
+
472
+ return domain_translation_struct_show (m , info , IOMMU_NO_PASID );
400
473
}
401
- DEFINE_SHOW_ATTRIBUTE (domain_translation_struct );
474
+ DEFINE_SHOW_ATTRIBUTE (dev_domain_translation_struct );
475
+
476
+ static int pasid_domain_translation_struct_show (struct seq_file * m , void * unused )
477
+ {
478
+ struct dev_pasid_info * dev_pasid = (struct dev_pasid_info * )m -> private ;
479
+ struct device_domain_info * info = dev_iommu_priv_get (dev_pasid -> dev );
480
+
481
+ return domain_translation_struct_show (m , info , dev_pasid -> pasid );
482
+ }
483
+ DEFINE_SHOW_ATTRIBUTE (pasid_domain_translation_struct );
402
484
403
485
static void invalidation_queue_entry_show (struct seq_file * m ,
404
486
struct intel_iommu * iommu )
@@ -700,7 +782,7 @@ void intel_iommu_debugfs_create_dev(struct device_domain_info *info)
700
782
info -> debugfs_dentry = debugfs_create_dir (dev_name (info -> dev ), intel_iommu_debug );
701
783
702
784
debugfs_create_file ("domain_translation_struct" , 0444 , info -> debugfs_dentry ,
703
- NULL , & domain_translation_struct_fops );
785
+ info , & dev_domain_translation_struct_fops );
704
786
}
705
787
706
788
/* Remove the device debugfs directory. */
@@ -726,6 +808,9 @@ void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid)
726
808
727
809
sprintf (dir_name , "%x" , dev_pasid -> pasid );
728
810
dev_pasid -> debugfs_dentry = debugfs_create_dir (dir_name , info -> debugfs_dentry );
811
+
812
+ debugfs_create_file ("domain_translation_struct" , 0444 , dev_pasid -> debugfs_dentry ,
813
+ dev_pasid , & pasid_domain_translation_struct_fops );
729
814
}
730
815
731
816
/* Remove the device pasid debugfs directory. */
0 commit comments