@@ -1079,13 +1079,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1079
1079
__free_from_pool (cpu_addr , size );
1080
1080
}
1081
1081
1082
- static void * __arm_iommu_alloc_attrs (struct device * dev , size_t size ,
1083
- dma_addr_t * handle , gfp_t gfp , unsigned long attrs ,
1084
- int coherent_flag )
1082
+ static void * arm_iommu_alloc_attrs (struct device * dev , size_t size ,
1083
+ dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
1085
1084
{
1086
1085
pgprot_t prot = __get_dma_pgprot (attrs , PAGE_KERNEL );
1087
1086
struct page * * pages ;
1088
1087
void * addr = NULL ;
1088
+ int coherent_flag = dev -> dma_coherent ? COHERENT : NORMAL ;
1089
1089
1090
1090
* handle = DMA_MAPPING_ERROR ;
1091
1091
size = PAGE_ALIGN (size );
@@ -1128,19 +1128,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1128
1128
return NULL ;
1129
1129
}
1130
1130
1131
- static void * arm_iommu_alloc_attrs (struct device * dev , size_t size ,
1132
- dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
1133
- {
1134
- return __arm_iommu_alloc_attrs (dev , size , handle , gfp , attrs , NORMAL );
1135
- }
1136
-
1137
- static void * arm_coherent_iommu_alloc_attrs (struct device * dev , size_t size ,
1138
- dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
1139
- {
1140
- return __arm_iommu_alloc_attrs (dev , size , handle , gfp , attrs , COHERENT );
1141
- }
1142
-
1143
- static int __arm_iommu_mmap_attrs (struct device * dev , struct vm_area_struct * vma ,
1131
+ static int arm_iommu_mmap_attrs (struct device * dev , struct vm_area_struct * vma ,
1144
1132
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
1145
1133
unsigned long attrs )
1146
1134
{
@@ -1154,35 +1142,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
1154
1142
if (vma -> vm_pgoff >= nr_pages )
1155
1143
return - ENXIO ;
1156
1144
1145
+ if (!dev -> dma_coherent )
1146
+ vma -> vm_page_prot = __get_dma_pgprot (attrs , vma -> vm_page_prot );
1147
+
1157
1148
err = vm_map_pages (vma , pages , nr_pages );
1158
1149
if (err )
1159
1150
pr_err ("Remapping memory failed: %d\n" , err );
1160
1151
1161
1152
return err ;
1162
1153
}
1163
- static int arm_iommu_mmap_attrs (struct device * dev ,
1164
- struct vm_area_struct * vma , void * cpu_addr ,
1165
- dma_addr_t dma_addr , size_t size , unsigned long attrs )
1166
- {
1167
- vma -> vm_page_prot = __get_dma_pgprot (attrs , vma -> vm_page_prot );
1168
-
1169
- return __arm_iommu_mmap_attrs (dev , vma , cpu_addr , dma_addr , size , attrs );
1170
- }
1171
-
1172
- static int arm_coherent_iommu_mmap_attrs (struct device * dev ,
1173
- struct vm_area_struct * vma , void * cpu_addr ,
1174
- dma_addr_t dma_addr , size_t size , unsigned long attrs )
1175
- {
1176
- return __arm_iommu_mmap_attrs (dev , vma , cpu_addr , dma_addr , size , attrs );
1177
- }
1178
1154
1179
1155
/*
1180
1156
* free a page as defined by the above mapping.
1181
1157
* Must not be called with IRQs disabled.
1182
1158
*/
1183
- static void __arm_iommu_free_attrs (struct device * dev , size_t size , void * cpu_addr ,
1184
- dma_addr_t handle , unsigned long attrs , int coherent_flag )
1159
+ static void arm_iommu_free_attrs (struct device * dev , size_t size , void * cpu_addr ,
1160
+ dma_addr_t handle , unsigned long attrs )
1185
1161
{
1162
+ int coherent_flag = dev -> dma_coherent ? COHERENT : NORMAL ;
1186
1163
struct page * * pages ;
1187
1164
size = PAGE_ALIGN (size );
1188
1165
@@ -1204,19 +1181,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad
1204
1181
__iommu_free_buffer (dev , pages , size , attrs );
1205
1182
}
1206
1183
1207
- static void arm_iommu_free_attrs (struct device * dev , size_t size ,
1208
- void * cpu_addr , dma_addr_t handle ,
1209
- unsigned long attrs )
1210
- {
1211
- __arm_iommu_free_attrs (dev , size , cpu_addr , handle , attrs , NORMAL );
1212
- }
1213
-
1214
- static void arm_coherent_iommu_free_attrs (struct device * dev , size_t size ,
1215
- void * cpu_addr , dma_addr_t handle , unsigned long attrs )
1216
- {
1217
- __arm_iommu_free_attrs (dev , size , cpu_addr , handle , attrs , COHERENT );
1218
- }
1219
-
1220
1184
static int arm_iommu_get_sgtable (struct device * dev , struct sg_table * sgt ,
1221
1185
void * cpu_addr , dma_addr_t dma_addr ,
1222
1186
size_t size , unsigned long attrs )
@@ -1236,8 +1200,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1236
1200
*/
1237
1201
static int __map_sg_chunk (struct device * dev , struct scatterlist * sg ,
1238
1202
size_t size , dma_addr_t * handle ,
1239
- enum dma_data_direction dir , unsigned long attrs ,
1240
- bool is_coherent )
1203
+ enum dma_data_direction dir , unsigned long attrs )
1241
1204
{
1242
1205
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1243
1206
dma_addr_t iova , iova_base ;
@@ -1257,7 +1220,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1257
1220
phys_addr_t phys = page_to_phys (sg_page (s ));
1258
1221
unsigned int len = PAGE_ALIGN (s -> offset + s -> length );
1259
1222
1260
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
1223
+ if (!dev -> dma_coherent && ! (attrs & DMA_ATTR_SKIP_CPU_SYNC ))
1261
1224
__dma_page_cpu_to_dev (sg_page (s ), s -> offset , s -> length , dir );
1262
1225
1263
1226
prot = __dma_info_to_prot (dir , attrs );
@@ -1277,9 +1240,20 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1277
1240
return ret ;
1278
1241
}
1279
1242
1280
- static int __iommu_map_sg (struct device * dev , struct scatterlist * sg , int nents ,
1281
- enum dma_data_direction dir , unsigned long attrs ,
1282
- bool is_coherent )
1243
+ /**
1244
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1245
+ * @dev: valid struct device pointer
1246
+ * @sg: list of buffers
1247
+ * @nents: number of buffers to map
1248
+ * @dir: DMA transfer direction
1249
+ *
1250
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
1251
+ * The scatter gather list elements are merged together (if possible) and
1252
+ * tagged with the appropriate dma address and length. They are obtained via
1253
+ * sg_dma_{address,length}.
1254
+ */
1255
+ static int arm_iommu_map_sg (struct device * dev , struct scatterlist * sg ,
1256
+ int nents , enum dma_data_direction dir , unsigned long attrs )
1283
1257
{
1284
1258
struct scatterlist * s = sg , * dma = sg , * start = sg ;
1285
1259
int i , count = 0 , ret ;
@@ -1294,8 +1268,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1294
1268
1295
1269
if (s -> offset || (size & ~PAGE_MASK ) || size + s -> length > max ) {
1296
1270
ret = __map_sg_chunk (dev , start , size ,
1297
- & dma -> dma_address , dir , attrs ,
1298
- is_coherent );
1271
+ & dma -> dma_address , dir , attrs );
1299
1272
if (ret < 0 )
1300
1273
goto bad_mapping ;
1301
1274
@@ -1309,8 +1282,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1309
1282
}
1310
1283
size += s -> length ;
1311
1284
}
1312
- ret = __map_sg_chunk (dev , start , size , & dma -> dma_address , dir , attrs ,
1313
- is_coherent );
1285
+ ret = __map_sg_chunk (dev , start , size , & dma -> dma_address , dir , attrs );
1314
1286
if (ret < 0 )
1315
1287
goto bad_mapping ;
1316
1288
@@ -1328,44 +1300,19 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1328
1300
}
1329
1301
1330
1302
/**
1331
- * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1332
- * @dev: valid struct device pointer
1333
- * @sg: list of buffers
1334
- * @nents: number of buffers to map
1335
- * @dir: DMA transfer direction
1336
- *
1337
- * Map a set of i/o coherent buffers described by scatterlist in streaming
1338
- * mode for DMA. The scatter gather list elements are merged together (if
1339
- * possible) and tagged with the appropriate dma address and length. They are
1340
- * obtained via sg_dma_{address,length}.
1341
- */
1342
- static int arm_coherent_iommu_map_sg (struct device * dev , struct scatterlist * sg ,
1343
- int nents , enum dma_data_direction dir , unsigned long attrs )
1344
- {
1345
- return __iommu_map_sg (dev , sg , nents , dir , attrs , true);
1346
- }
1347
-
1348
- /**
1349
- * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1303
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1350
1304
* @dev: valid struct device pointer
1351
1305
* @sg: list of buffers
1352
- * @nents: number of buffers to map
1353
- * @dir: DMA transfer direction
1306
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1307
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1354
1308
*
1355
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
1356
- * The scatter gather list elements are merged together (if possible) and
1357
- * tagged with the appropriate dma address and length. They are obtained via
1358
- * sg_dma_{address,length}.
1309
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
1310
+ * rules concerning calls here are the same as for dma_unmap_single().
1359
1311
*/
1360
- static int arm_iommu_map_sg (struct device * dev , struct scatterlist * sg ,
1361
- int nents , enum dma_data_direction dir , unsigned long attrs )
1362
- {
1363
- return __iommu_map_sg (dev , sg , nents , dir , attrs , false);
1364
- }
1365
-
1366
- static void __iommu_unmap_sg (struct device * dev , struct scatterlist * sg ,
1367
- int nents , enum dma_data_direction dir ,
1368
- unsigned long attrs , bool is_coherent )
1312
+ static void arm_iommu_unmap_sg (struct device * dev ,
1313
+ struct scatterlist * sg , int nents ,
1314
+ enum dma_data_direction dir ,
1315
+ unsigned long attrs )
1369
1316
{
1370
1317
struct scatterlist * s ;
1371
1318
int i ;
@@ -1374,47 +1321,12 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1374
1321
if (sg_dma_len (s ))
1375
1322
__iommu_remove_mapping (dev , sg_dma_address (s ),
1376
1323
sg_dma_len (s ));
1377
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
1324
+ if (!dev -> dma_coherent && ! (attrs & DMA_ATTR_SKIP_CPU_SYNC ))
1378
1325
__dma_page_dev_to_cpu (sg_page (s ), s -> offset ,
1379
1326
s -> length , dir );
1380
1327
}
1381
1328
}
1382
1329
1383
- /**
1384
- * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1385
- * @dev: valid struct device pointer
1386
- * @sg: list of buffers
1387
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1388
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1389
- *
1390
- * Unmap a set of streaming mode DMA translations. Again, CPU access
1391
- * rules concerning calls here are the same as for dma_unmap_single().
1392
- */
1393
- static void arm_coherent_iommu_unmap_sg (struct device * dev ,
1394
- struct scatterlist * sg , int nents , enum dma_data_direction dir ,
1395
- unsigned long attrs )
1396
- {
1397
- __iommu_unmap_sg (dev , sg , nents , dir , attrs , true);
1398
- }
1399
-
1400
- /**
1401
- * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1402
- * @dev: valid struct device pointer
1403
- * @sg: list of buffers
1404
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1405
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1406
- *
1407
- * Unmap a set of streaming mode DMA translations. Again, CPU access
1408
- * rules concerning calls here are the same as for dma_unmap_single().
1409
- */
1410
- static void arm_iommu_unmap_sg (struct device * dev ,
1411
- struct scatterlist * sg , int nents ,
1412
- enum dma_data_direction dir ,
1413
- unsigned long attrs )
1414
- {
1415
- __iommu_unmap_sg (dev , sg , nents , dir , attrs , false);
1416
- }
1417
-
1418
1330
/**
1419
1331
* arm_iommu_sync_sg_for_cpu
1420
1332
* @dev: valid struct device pointer
@@ -1452,25 +1364,27 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
1452
1364
__dma_page_cpu_to_dev (sg_page (s ), s -> offset , s -> length , dir );
1453
1365
}
1454
1366
1455
-
1456
1367
/**
1457
- * arm_coherent_iommu_map_page
1368
+ * arm_iommu_map_page
1458
1369
* @dev: valid struct device pointer
1459
1370
* @page: page that buffer resides in
1460
1371
* @offset: offset into page for start of buffer
1461
1372
* @size: size of buffer to map
1462
1373
* @dir: DMA transfer direction
1463
1374
*
1464
- * Coherent IOMMU aware version of arm_dma_map_page()
1375
+ * IOMMU aware version of arm_dma_map_page()
1465
1376
*/
1466
- static dma_addr_t arm_coherent_iommu_map_page (struct device * dev , struct page * page ,
1377
+ static dma_addr_t arm_iommu_map_page (struct device * dev , struct page * page ,
1467
1378
unsigned long offset , size_t size , enum dma_data_direction dir ,
1468
1379
unsigned long attrs )
1469
1380
{
1470
1381
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1471
1382
dma_addr_t dma_addr ;
1472
1383
int ret , prot , len = PAGE_ALIGN (size + offset );
1473
1384
1385
+ if (!dev -> dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
1386
+ __dma_page_cpu_to_dev (page , offset , size , dir );
1387
+
1474
1388
dma_addr = __alloc_iova (mapping , len );
1475
1389
if (dma_addr == DMA_MAPPING_ERROR )
1476
1390
return dma_addr ;
@@ -1487,50 +1401,6 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1487
1401
return DMA_MAPPING_ERROR ;
1488
1402
}
1489
1403
1490
- /**
1491
- * arm_iommu_map_page
1492
- * @dev: valid struct device pointer
1493
- * @page: page that buffer resides in
1494
- * @offset: offset into page for start of buffer
1495
- * @size: size of buffer to map
1496
- * @dir: DMA transfer direction
1497
- *
1498
- * IOMMU aware version of arm_dma_map_page()
1499
- */
1500
- static dma_addr_t arm_iommu_map_page (struct device * dev , struct page * page ,
1501
- unsigned long offset , size_t size , enum dma_data_direction dir ,
1502
- unsigned long attrs )
1503
- {
1504
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
1505
- __dma_page_cpu_to_dev (page , offset , size , dir );
1506
-
1507
- return arm_coherent_iommu_map_page (dev , page , offset , size , dir , attrs );
1508
- }
1509
-
1510
- /**
1511
- * arm_coherent_iommu_unmap_page
1512
- * @dev: valid struct device pointer
1513
- * @handle: DMA address of buffer
1514
- * @size: size of buffer (same as passed to dma_map_page)
1515
- * @dir: DMA transfer direction (same as passed to dma_map_page)
1516
- *
1517
- * Coherent IOMMU aware version of arm_dma_unmap_page()
1518
- */
1519
- static void arm_coherent_iommu_unmap_page (struct device * dev , dma_addr_t handle ,
1520
- size_t size , enum dma_data_direction dir , unsigned long attrs )
1521
- {
1522
- struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1523
- dma_addr_t iova = handle & PAGE_MASK ;
1524
- int offset = handle & ~PAGE_MASK ;
1525
- int len = PAGE_ALIGN (size + offset );
1526
-
1527
- if (!iova )
1528
- return ;
1529
-
1530
- iommu_unmap (mapping -> domain , iova , len );
1531
- __free_iova (mapping , iova , len );
1532
- }
1533
-
1534
1404
/**
1535
1405
* arm_iommu_unmap_page
1536
1406
* @dev: valid struct device pointer
@@ -1545,15 +1415,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1545
1415
{
1546
1416
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping (dev );
1547
1417
dma_addr_t iova = handle & PAGE_MASK ;
1548
- struct page * page = phys_to_page ( iommu_iova_to_phys ( mapping -> domain , iova )) ;
1418
+ struct page * page ;
1549
1419
int offset = handle & ~PAGE_MASK ;
1550
1420
int len = PAGE_ALIGN (size + offset );
1551
1421
1552
1422
if (!iova )
1553
1423
return ;
1554
1424
1555
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC ) == 0 )
1425
+ if (!dev -> dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC )) {
1426
+ page = phys_to_page (iommu_iova_to_phys (mapping -> domain , iova ));
1556
1427
__dma_page_dev_to_cpu (page , offset , size , dir );
1428
+ }
1557
1429
1558
1430
iommu_unmap (mapping -> domain , iova , len );
1559
1431
__free_iova (mapping , iova , len );
@@ -1665,16 +1537,16 @@ static const struct dma_map_ops iommu_ops = {
1665
1537
};
1666
1538
1667
1539
static const struct dma_map_ops iommu_coherent_ops = {
1668
- .alloc = arm_coherent_iommu_alloc_attrs ,
1669
- .free = arm_coherent_iommu_free_attrs ,
1670
- .mmap = arm_coherent_iommu_mmap_attrs ,
1540
+ .alloc = arm_iommu_alloc_attrs ,
1541
+ .free = arm_iommu_free_attrs ,
1542
+ .mmap = arm_iommu_mmap_attrs ,
1671
1543
.get_sgtable = arm_iommu_get_sgtable ,
1672
1544
1673
- .map_page = arm_coherent_iommu_map_page ,
1674
- .unmap_page = arm_coherent_iommu_unmap_page ,
1545
+ .map_page = arm_iommu_map_page ,
1546
+ .unmap_page = arm_iommu_unmap_page ,
1675
1547
1676
- .map_sg = arm_coherent_iommu_map_sg ,
1677
- .unmap_sg = arm_coherent_iommu_unmap_sg ,
1548
+ .map_sg = arm_iommu_map_sg ,
1549
+ .unmap_sg = arm_iommu_unmap_sg ,
1678
1550
1679
1551
.map_resource = arm_iommu_map_resource ,
1680
1552
.unmap_resource = arm_iommu_unmap_resource ,
0 commit comments