Skip to content

Commit ee877b8

Browse files
Sebastian OttMartin Schwidefsky
authored andcommitted
s390/pci_dma: improve map_sg
Our map_sg implementation mapped sg entries independently of each other. For ease of use and possible performance improvements this patch changes the implementation to try to map as many (likely physically non-contiguous) sglist entries as possible into a contiguous DMA segment. Signed-off-by: Sebastian Ott <[email protected]> Reviewed-by: Gerald Schaefer <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent 8cb63b7 commit ee877b8

File tree

1 file changed

+82
-24
lines changed

1 file changed

+82
-24
lines changed

arch/s390/pci/pci_dma.c

Lines changed: 82 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -388,37 +388,94 @@ static void s390_dma_free(struct device *dev, size_t size,
388388
free_pages((unsigned long) pa, get_order(size));
389389
}
390390

391-
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
392-
int nr_elements, enum dma_data_direction dir,
393-
unsigned long attrs)
391+
/* Map a segment into a contiguous dma address area */
392+
static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
393+
size_t size, dma_addr_t *handle,
394+
enum dma_data_direction dir)
394395
{
395-
int mapped_elements = 0;
396+
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
397+
dma_addr_t dma_addr_base, dma_addr;
398+
int flags = ZPCI_PTE_VALID;
396399
struct scatterlist *s;
397-
int i;
400+
unsigned long pa;
401+
int ret;
398402

399-
for_each_sg(sg, s, nr_elements, i) {
400-
struct page *page = sg_page(s);
401-
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
402-
s->length, dir, 0);
403-
if (!dma_mapping_error(dev, s->dma_address)) {
404-
s->dma_length = s->length;
405-
mapped_elements++;
406-
} else
403+
size = PAGE_ALIGN(size);
404+
dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
405+
if (dma_addr_base == DMA_ERROR_CODE)
406+
return -ENOMEM;
407+
408+
dma_addr = dma_addr_base;
409+
if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
410+
flags |= ZPCI_TABLE_PROTECTED;
411+
412+
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
413+
pa = page_to_phys(sg_page(s)) + s->offset;
414+
ret = dma_update_trans(zdev, pa, dma_addr, s->length, flags);
415+
if (ret)
407416
goto unmap;
417+
418+
dma_addr += s->length;
408419
}
409-
out:
410-
return mapped_elements;
420+
*handle = dma_addr_base;
421+
atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
422+
423+
return ret;
411424

412425
unmap:
413-
for_each_sg(sg, s, mapped_elements, i) {
414-
if (s->dma_address)
415-
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
416-
dir, 0);
417-
s->dma_address = 0;
426+
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
427+
ZPCI_PTE_INVALID);
428+
dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
429+
zpci_err("map error:\n");
430+
zpci_err_dma(ret, pa);
431+
return ret;
432+
}
433+
434+
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
435+
int nr_elements, enum dma_data_direction dir,
436+
unsigned long attrs)
437+
{
438+
struct scatterlist *s = sg, *start = sg, *dma = sg;
439+
unsigned int max = dma_get_max_seg_size(dev);
440+
unsigned int size = s->offset + s->length;
441+
unsigned int offset = s->offset;
442+
int count = 0, i;
443+
444+
for (i = 1; i < nr_elements; i++) {
445+
s = sg_next(s);
446+
447+
s->dma_address = DMA_ERROR_CODE;
418448
s->dma_length = 0;
449+
450+
if (s->offset || (size & ~PAGE_MASK) ||
451+
size + s->length > max) {
452+
if (__s390_dma_map_sg(dev, start, size,
453+
&dma->dma_address, dir))
454+
goto unmap;
455+
456+
dma->dma_address += offset;
457+
dma->dma_length = size - offset;
458+
459+
size = offset = s->offset;
460+
start = s;
461+
dma = sg_next(dma);
462+
count++;
463+
}
464+
size += s->length;
419465
}
420-
mapped_elements = 0;
421-
goto out;
466+
if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
467+
goto unmap;
468+
469+
dma->dma_address += offset;
470+
dma->dma_length = size - offset;
471+
472+
return count + 1;
473+
unmap:
474+
for_each_sg(sg, s, count, i)
475+
s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
476+
dir, attrs);
477+
478+
return 0;
422479
}
423480

424481
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -429,8 +486,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
429486
int i;
430487

431488
for_each_sg(sg, s, nr_elements, i) {
432-
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
433-
0);
489+
if (s->dma_length)
490+
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
491+
dir, attrs);
434492
s->dma_address = 0;
435493
s->dma_length = 0;
436494
}

0 commit comments

Comments
 (0)