Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 75ab153

Browse files
Anumula-Murali-Mohan-Reddyrleon
authored andcommitted
RDMA/cxgb4: use dma_mmap_coherent() for mapping non-contiguous memory
dma_alloc_coherent() allocates contiguous memory irrespective of iommu mode, but after commit f5ff79f ("dma-mapping: remove CONFIG_DMA_REMAP") if iommu is enabled in translate mode, dma_alloc_coherent() may allocate non-contiguous memory. Attempt to map this memory results in panic. This patch fixes the issue by using dma_mmap_coherent() to map each page to user space. Signed-off-by: Anumula Murali Mohan Reddy <[email protected]> Signed-off-by: Potnuri Bharat Teja <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 8400291 commit 75ab153

File tree

4 files changed

+105
-38
lines changed

4 files changed

+105
-38
lines changed

drivers/infiniband/hw/cxgb4/cq.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1126,13 +1126,19 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
11261126
goto err_free_mm2;
11271127

11281128
mm->key = uresp.key;
1129-
mm->addr = virt_to_phys(chp->cq.queue);
1129+
mm->addr = 0;
1130+
mm->vaddr = chp->cq.queue;
1131+
mm->dma_addr = chp->cq.dma_addr;
11301132
mm->len = chp->cq.memsize;
1133+
insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
11311134
insert_mmap(ucontext, mm);
11321135

11331136
mm2->key = uresp.gts_key;
11341137
mm2->addr = chp->cq.bar2_pa;
11351138
mm2->len = PAGE_SIZE;
1139+
mm2->vaddr = NULL;
1140+
mm2->dma_addr = 0;
1141+
insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr);
11361142
insert_mmap(ucontext, mm2);
11371143
}
11381144

drivers/infiniband/hw/cxgb4/iw_cxgb4.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -532,11 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
532532
return container_of(c, struct c4iw_ucontext, ibucontext);
533533
}
534534

535+
enum {
536+
CXGB4_MMAP_BAR,
537+
CXGB4_MMAP_BAR_WC,
538+
CXGB4_MMAP_CONTIG,
539+
CXGB4_MMAP_NON_CONTIG,
540+
};
541+
535542
struct c4iw_mm_entry {
536543
struct list_head entry;
537544
u64 addr;
538545
u32 key;
546+
void *vaddr;
547+
dma_addr_t dma_addr;
539548
unsigned len;
549+
u8 mmap_flag;
540550
};
541551

542552
static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
@@ -561,6 +571,32 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
561571
return NULL;
562572
}
563573

574+
static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev,
575+
struct c4iw_mm_entry *mm, u64 addr)
576+
{
577+
if (addr >= pci_resource_start(rdev->lldi.pdev, 0) &&
578+
(addr < (pci_resource_start(rdev->lldi.pdev, 0) +
579+
pci_resource_len(rdev->lldi.pdev, 0))))
580+
mm->mmap_flag = CXGB4_MMAP_BAR;
581+
else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) &&
582+
(addr < (pci_resource_start(rdev->lldi.pdev, 2) +
583+
pci_resource_len(rdev->lldi.pdev, 2)))) {
584+
if (addr >= rdev->oc_mw_pa) {
585+
mm->mmap_flag = CXGB4_MMAP_BAR_WC;
586+
} else {
587+
if (is_t4(rdev->lldi.adapter_type))
588+
mm->mmap_flag = CXGB4_MMAP_BAR;
589+
else
590+
mm->mmap_flag = CXGB4_MMAP_BAR_WC;
591+
}
592+
} else {
593+
if (addr)
594+
mm->mmap_flag = CXGB4_MMAP_CONTIG;
595+
else
596+
mm->mmap_flag = CXGB4_MMAP_NON_CONTIG;
597+
}
598+
}
599+
564600
static inline void insert_mmap(struct c4iw_ucontext *ucontext,
565601
struct c4iw_mm_entry *mm)
566602
{

drivers/infiniband/hw/cxgb4/provider.c

Lines changed: 33 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
113113
mm->key = uresp.status_page_key;
114114
mm->addr = virt_to_phys(rhp->rdev.status_page);
115115
mm->len = PAGE_SIZE;
116+
mm->vaddr = NULL;
117+
mm->dma_addr = 0;
118+
insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
116119
insert_mmap(context, mm);
117120
}
118121
return 0;
@@ -131,6 +134,11 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
131134
struct c4iw_mm_entry *mm;
132135
struct c4iw_ucontext *ucontext;
133136
u64 addr;
137+
u8 mmap_flag;
138+
size_t size;
139+
void *vaddr;
140+
unsigned long vm_pgoff;
141+
dma_addr_t dma_addr;
134142

135143
pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
136144
key, len);
@@ -145,47 +153,38 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
145153
if (!mm)
146154
return -EINVAL;
147155
addr = mm->addr;
156+
vaddr = mm->vaddr;
157+
dma_addr = mm->dma_addr;
158+
size = mm->len;
159+
mmap_flag = mm->mmap_flag;
148160
kfree(mm);
149161

150-
if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
151-
(addr < (pci_resource_start(rdev->lldi.pdev, 0) +
152-
pci_resource_len(rdev->lldi.pdev, 0)))) {
153-
154-
/*
155-
* MA_SYNC register...
156-
*/
157-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
162+
switch (mmap_flag) {
163+
case CXGB4_MMAP_BAR:
164+
ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
165+
len,
166+
pgprot_noncached(vma->vm_page_prot));
167+
break;
168+
case CXGB4_MMAP_BAR_WC:
158169
ret = io_remap_pfn_range(vma, vma->vm_start,
159170
addr >> PAGE_SHIFT,
160-
len, vma->vm_page_prot);
161-
} else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
162-
(addr < (pci_resource_start(rdev->lldi.pdev, 2) +
163-
pci_resource_len(rdev->lldi.pdev, 2)))) {
164-
165-
/*
166-
* Map user DB or OCQP memory...
167-
*/
168-
if (addr >= rdev->oc_mw_pa)
169-
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
170-
else {
171-
if (!is_t4(rdev->lldi.adapter_type))
172-
vma->vm_page_prot =
173-
t4_pgprot_wc(vma->vm_page_prot);
174-
else
175-
vma->vm_page_prot =
176-
pgprot_noncached(vma->vm_page_prot);
177-
}
171+
len, t4_pgprot_wc(vma->vm_page_prot));
172+
break;
173+
case CXGB4_MMAP_CONTIG:
178174
ret = io_remap_pfn_range(vma, vma->vm_start,
179175
addr >> PAGE_SHIFT,
180176
len, vma->vm_page_prot);
181-
} else {
182-
183-
/*
184-
* Map WQ or CQ contig dma memory...
185-
*/
186-
ret = remap_pfn_range(vma, vma->vm_start,
187-
addr >> PAGE_SHIFT,
188-
len, vma->vm_page_prot);
177+
break;
178+
case CXGB4_MMAP_NON_CONTIG:
179+
vm_pgoff = vma->vm_pgoff;
180+
vma->vm_pgoff = 0;
181+
ret = dma_mmap_coherent(&rdev->lldi.pdev->dev, vma,
182+
vaddr, dma_addr, size);
183+
vma->vm_pgoff = vm_pgoff;
184+
break;
185+
default:
186+
ret = -EINVAL;
187+
break;
189188
}
190189

191190
return ret;

drivers/infiniband/hw/cxgb4/qp.c

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2281,24 +2281,39 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
22812281
if (ret)
22822282
goto err_free_ma_sync_key;
22832283
sq_key_mm->key = uresp.sq_key;
2284-
sq_key_mm->addr = qhp->wq.sq.phys_addr;
2284+
sq_key_mm->addr = 0;
2285+
sq_key_mm->vaddr = qhp->wq.sq.queue;
2286+
sq_key_mm->dma_addr = qhp->wq.sq.dma_addr;
22852287
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2288+
insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr);
22862289
insert_mmap(ucontext, sq_key_mm);
22872290
if (!attrs->srq) {
22882291
rq_key_mm->key = uresp.rq_key;
2289-
rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2292+
rq_key_mm->addr = 0;
2293+
rq_key_mm->vaddr = qhp->wq.rq.queue;
2294+
rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
22902295
rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2296+
insert_flag_to_mmap(&rhp->rdev, rq_key_mm,
2297+
rq_key_mm->addr);
22912298
insert_mmap(ucontext, rq_key_mm);
22922299
}
22932300
sq_db_key_mm->key = uresp.sq_db_gts_key;
22942301
sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2302+
sq_db_key_mm->vaddr = NULL;
2303+
sq_db_key_mm->dma_addr = 0;
22952304
sq_db_key_mm->len = PAGE_SIZE;
2305+
insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm,
2306+
sq_db_key_mm->addr);
22962307
insert_mmap(ucontext, sq_db_key_mm);
22972308
if (!attrs->srq) {
22982309
rq_db_key_mm->key = uresp.rq_db_gts_key;
22992310
rq_db_key_mm->addr =
23002311
(u64)(unsigned long)qhp->wq.rq.bar2_pa;
23012312
rq_db_key_mm->len = PAGE_SIZE;
2313+
rq_db_key_mm->vaddr = NULL;
2314+
rq_db_key_mm->dma_addr = 0;
2315+
insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm,
2316+
rq_db_key_mm->addr);
23022317
insert_mmap(ucontext, rq_db_key_mm);
23032318
}
23042319
if (ma_sync_key_mm) {
@@ -2307,6 +2322,10 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
23072322
(pci_resource_start(rhp->rdev.lldi.pdev, 0) +
23082323
PCIE_MA_SYNC_A) & PAGE_MASK;
23092324
ma_sync_key_mm->len = PAGE_SIZE;
2325+
ma_sync_key_mm->vaddr = NULL;
2326+
ma_sync_key_mm->dma_addr = 0;
2327+
insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm,
2328+
ma_sync_key_mm->addr);
23102329
insert_mmap(ucontext, ma_sync_key_mm);
23112330
}
23122331

@@ -2761,12 +2780,19 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
27612780
if (ret)
27622781
goto err_free_srq_db_key_mm;
27632782
srq_key_mm->key = uresp.srq_key;
2764-
srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2783+
srq_key_mm->addr = 0;
27652784
srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2785+
srq_key_mm->vaddr = srq->wq.queue;
2786+
srq_key_mm->dma_addr = srq->wq.dma_addr;
2787+
insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr);
27662788
insert_mmap(ucontext, srq_key_mm);
27672789
srq_db_key_mm->key = uresp.srq_db_gts_key;
27682790
srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
27692791
srq_db_key_mm->len = PAGE_SIZE;
2792+
srq_db_key_mm->vaddr = NULL;
2793+
srq_db_key_mm->dma_addr = 0;
2794+
insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm,
2795+
srq_db_key_mm->addr);
27702796
insert_mmap(ucontext, srq_db_key_mm);
27712797
}
27722798

0 commit comments

Comments
 (0)