10
10
#include <linux/dma-mapping.h>
11
11
#include <linux/iommu.h>
12
12
#include <linux/io.h>
13
+ #include <linux/soc/qcom/smem.h>
13
14
14
15
#include "ipa.h"
15
16
#include "ipa_reg.h"
23
24
/* "Canary" value placed between memory regions to detect overflow */
24
25
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
25
26
27
+ /* SMEM host id representing the modem. */
28
+ #define QCOM_SMEM_HOST_MODEM 1
29
+
26
30
/* Add an immediate command to a transaction that zeroes a memory region */
27
31
static void
28
32
ipa_mem_zero_region_add (struct gsi_trans * trans , const struct ipa_mem * mem )
@@ -340,6 +344,111 @@ static void ipa_imem_exit(struct ipa *ipa)
340
344
ipa -> imem_iova = 0 ;
341
345
}
342
346
347
+ /**
348
+ * ipa_smem_init() - Initialize SMEM memory used by the IPA
349
+ * @ipa: IPA pointer
350
+ * @item: Item ID of SMEM memory
351
+ * @size: Size (bytes) of SMEM memory region
352
+ *
353
+ * SMEM is a managed block of shared DRAM, from which numbered "items"
354
+ * can be allocated. One item is designated for use by the IPA.
355
+ *
356
+ * The modem accesses SMEM memory directly, but the IPA accesses it
357
+ * via the IOMMU, using the AP's credentials.
358
+ *
359
+ * If size provided is non-zero, we allocate it and map it for
360
+ * access through the IOMMU.
361
+ *
362
+ * Note: @size and the item address are is not guaranteed to be page-aligned.
363
+ */
364
+ static int ipa_smem_init (struct ipa * ipa , u32 item , size_t size )
365
+ {
366
+ struct device * dev = & ipa -> pdev -> dev ;
367
+ struct iommu_domain * domain ;
368
+ unsigned long iova ;
369
+ phys_addr_t phys ;
370
+ phys_addr_t addr ;
371
+ size_t actual ;
372
+ void * virt ;
373
+ int ret ;
374
+
375
+ if (!size )
376
+ return 0 ; /* SMEM memory not used */
377
+
378
+ /* SMEM is memory shared between the AP and another system entity
379
+ * (in this case, the modem). An allocation from SMEM is persistent
380
+ * until the AP reboots; there is no way to free an allocated SMEM
381
+ * region. Allocation only reserves the space; to use it you need
382
+ * to "get" a pointer it (this implies no reference counting).
383
+ * The item might have already been allocated, in which case we
384
+ * use it unless the size isn't what we expect.
385
+ */
386
+ ret = qcom_smem_alloc (QCOM_SMEM_HOST_MODEM , item , size );
387
+ if (ret && ret != - EEXIST ) {
388
+ dev_err (dev , "error %d allocating size %zu SMEM item %u\n" ,
389
+ ret , size , item );
390
+ return ret ;
391
+ }
392
+
393
+ /* Now get the address of the SMEM memory region */
394
+ virt = qcom_smem_get (QCOM_SMEM_HOST_MODEM , item , & actual );
395
+ if (IS_ERR (virt )) {
396
+ ret = PTR_ERR (virt );
397
+ dev_err (dev , "error %d getting SMEM item %u\n" , ret , item );
398
+ return ret ;
399
+ }
400
+
401
+ /* In case the region was already allocated, verify the size */
402
+ if (ret && actual != size ) {
403
+ dev_err (dev , "SMEM item %u has size %zu, expected %zu\n" ,
404
+ item , actual , size );
405
+ return - EINVAL ;
406
+ }
407
+
408
+ domain = iommu_get_domain_for_dev (dev );
409
+ if (!domain ) {
410
+ dev_err (dev , "no IOMMU domain found for SMEM\n" );
411
+ return - EINVAL ;
412
+ }
413
+
414
+ /* Align the address down and the size up to a page boundary */
415
+ addr = qcom_smem_virt_to_phys (virt ) & PAGE_MASK ;
416
+ phys = addr & PAGE_MASK ;
417
+ size = PAGE_ALIGN (size + addr - phys );
418
+ iova = phys ; /* We just want a direct mapping */
419
+
420
+ ret = iommu_map (domain , iova , phys , size , IOMMU_READ | IOMMU_WRITE );
421
+ if (ret )
422
+ return ret ;
423
+
424
+ ipa -> smem_iova = iova ;
425
+ ipa -> smem_size = size ;
426
+
427
+ return 0 ;
428
+ }
429
+
430
+ static void ipa_smem_exit (struct ipa * ipa )
431
+ {
432
+ struct device * dev = & ipa -> pdev -> dev ;
433
+ struct iommu_domain * domain ;
434
+
435
+ domain = iommu_get_domain_for_dev (dev );
436
+ if (domain ) {
437
+ size_t size ;
438
+
439
+ size = iommu_unmap (domain , ipa -> smem_iova , ipa -> smem_size );
440
+ if (size != ipa -> smem_size )
441
+ dev_warn (dev , "unmapped %zu SMEM bytes, expected %lu\n" ,
442
+ size , ipa -> smem_size );
443
+
444
+ } else {
445
+ dev_err (dev , "couldn't get IPA IOMMU domain for SMEM\n" );
446
+ }
447
+
448
+ ipa -> smem_size = 0 ;
449
+ ipa -> smem_iova = 0 ;
450
+ }
451
+
343
452
/* Perform memory region-related initialization */
344
453
int ipa_mem_init (struct ipa * ipa , const struct ipa_mem_data * mem_data )
345
454
{
@@ -383,8 +492,14 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
383
492
if (ret )
384
493
goto err_unmap ;
385
494
495
+ ret = ipa_smem_init (ipa , mem_data -> smem_id , mem_data -> smem_size );
496
+ if (ret )
497
+ goto err_imem_exit ;
498
+
386
499
return 0 ;
387
500
501
+ err_imem_exit :
502
+ ipa_imem_exit (ipa );
388
503
err_unmap :
389
504
memunmap (ipa -> mem_virt );
390
505
@@ -394,6 +509,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
394
509
/* Inverse of ipa_mem_init() */
395
510
void ipa_mem_exit (struct ipa * ipa )
396
511
{
512
+ ipa_smem_exit (ipa );
397
513
ipa_imem_exit (ipa );
398
514
memunmap (ipa -> mem_virt );
399
515
}
0 commit comments