@@ -230,34 +230,36 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
230
230
boundary_size , 0 );
231
231
}
232
232
233
- static unsigned long dma_alloc_iommu (struct device * dev , int size )
233
+ static dma_addr_t dma_alloc_address (struct device * dev , int size )
234
234
{
235
235
struct zpci_dev * zdev = to_zpci (to_pci_dev (dev ));
236
236
unsigned long offset , flags ;
237
- int wrap = 0 ;
238
237
239
238
spin_lock_irqsave (& zdev -> iommu_bitmap_lock , flags );
240
239
offset = __dma_alloc_iommu (dev , zdev -> next_bit , size );
241
240
if (offset == -1 ) {
242
241
/* wrap-around */
243
242
offset = __dma_alloc_iommu (dev , 0 , size );
244
- wrap = 1 ;
245
- }
246
-
247
- if (offset != -1 ) {
248
- zdev -> next_bit = offset + size ;
249
- if (!zdev -> tlb_refresh && !s390_iommu_strict && wrap )
243
+ if (offset == -1 ) {
244
+ spin_unlock_irqrestore (& zdev -> iommu_bitmap_lock , flags );
245
+ return DMA_ERROR_CODE ;
246
+ }
247
+ if (!zdev -> tlb_refresh && !s390_iommu_strict )
250
248
/* global flush after wrap-around with lazy unmap */
251
249
zpci_refresh_global (zdev );
252
250
}
251
+ zdev -> next_bit = offset + size ;
253
252
spin_unlock_irqrestore (& zdev -> iommu_bitmap_lock , flags );
254
- return offset ;
253
+
254
+ return zdev -> start_dma + offset * PAGE_SIZE ;
255
255
}
256
256
257
- static void dma_free_iommu (struct device * dev , unsigned long offset , int size )
257
+ static void dma_free_address (struct device * dev , dma_addr_t dma_addr , int size )
258
258
{
259
259
struct zpci_dev * zdev = to_zpci (to_pci_dev (dev ));
260
- unsigned long flags ;
260
+ unsigned long flags , offset ;
261
+
262
+ offset = (dma_addr - zdev -> start_dma ) >> PAGE_SHIFT ;
261
263
262
264
spin_lock_irqsave (& zdev -> iommu_bitmap_lock , flags );
263
265
if (!zdev -> iommu_bitmap )
@@ -289,23 +291,22 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
289
291
unsigned long attrs )
290
292
{
291
293
struct zpci_dev * zdev = to_zpci (to_pci_dev (dev ));
292
- unsigned long nr_pages , iommu_page_index ;
293
294
unsigned long pa = page_to_phys (page ) + offset ;
294
295
int flags = ZPCI_PTE_VALID ;
296
+ unsigned long nr_pages ;
295
297
dma_addr_t dma_addr ;
296
298
int ret ;
297
299
298
300
/* This rounds up number of pages based on size and offset */
299
301
nr_pages = iommu_num_pages (pa , size , PAGE_SIZE );
300
- iommu_page_index = dma_alloc_iommu (dev , nr_pages );
301
- if (iommu_page_index == -1 ) {
302
+ dma_addr = dma_alloc_address (dev , nr_pages );
303
+ if (dma_addr == DMA_ERROR_CODE ) {
302
304
ret = - ENOSPC ;
303
305
goto out_err ;
304
306
}
305
307
306
308
/* Use rounded up size */
307
309
size = nr_pages * PAGE_SIZE ;
308
- dma_addr = zdev -> start_dma + iommu_page_index * PAGE_SIZE ;
309
310
310
311
if (direction == DMA_NONE || direction == DMA_TO_DEVICE )
311
312
flags |= ZPCI_TABLE_PROTECTED ;
@@ -318,7 +319,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
318
319
return dma_addr + (offset & ~PAGE_MASK );
319
320
320
321
out_free :
321
- dma_free_iommu (dev , iommu_page_index , nr_pages );
322
+ dma_free_address (dev , dma_addr , nr_pages );
322
323
out_err :
323
324
zpci_err ("map error:\n" );
324
325
zpci_err_dma (ret , pa );
@@ -330,7 +331,6 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
330
331
unsigned long attrs )
331
332
{
332
333
struct zpci_dev * zdev = to_zpci (to_pci_dev (dev ));
333
- unsigned long iommu_page_index ;
334
334
int npages , ret ;
335
335
336
336
npages = iommu_num_pages (dma_addr , size , PAGE_SIZE );
@@ -344,8 +344,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
344
344
}
345
345
346
346
atomic64_add (npages , & zdev -> unmapped_pages );
347
- iommu_page_index = (dma_addr - zdev -> start_dma ) >> PAGE_SHIFT ;
348
- dma_free_iommu (dev , iommu_page_index , npages );
347
+ dma_free_address (dev , dma_addr , npages );
349
348
}
350
349
351
350
static void * s390_dma_alloc (struct device * dev , size_t size ,
0 commit comments