Skip to content

Commit c678fa6

Browse files
davejiangVinod Koul
authored andcommitted
dmaengine: remove DMA_SG as it is dead code in kernel
There are no in kernel consumers for DMA_SG op. Removing operation, dead code, and test code in dmatest. Signed-off-by: Dave Jiang <[email protected]> Reviewed-by: Linus Walleij <[email protected]> Cc: Gary Hook <[email protected]> Cc: Ludovic Desroches <[email protected]> Cc: Kedareswara rao Appana <[email protected]> Cc: Li Yang <[email protected]> Cc: Michal Simek <[email protected]> Signed-off-by: Vinod Koul <[email protected]>
1 parent 61b5f54 commit c678fa6

File tree

12 files changed

+5
-786
lines changed

12 files changed

+5
-786
lines changed

Documentation/dmaengine/provider.txt

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -181,13 +181,6 @@ Currently, the types available are:
181181
- Used by the client drivers to register a callback that will be
182182
called on a regular basis through the DMA controller interrupt
183183

184-
* DMA_SG
185-
- The device supports memory to memory scatter-gather
186-
transfers.
187-
- Even though a plain memcpy can look like a particular case of a
188-
scatter-gather transfer, with a single chunk to transfer, it's a
189-
distinct transaction type in the mem2mem transfers case
190-
191184
* DMA_PRIVATE
192185
- The devices only supports slave transfers, and as such isn't
193186
available for async transfers.

drivers/crypto/ccp/ccp-dmaengine.c

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
502502
return &desc->tx_desc;
503503
}
504504

505-
static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
506-
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
507-
unsigned int dst_nents, struct scatterlist *src_sg,
508-
unsigned int src_nents, unsigned long flags)
509-
{
510-
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
511-
dma_chan);
512-
struct ccp_dma_desc *desc;
513-
514-
dev_dbg(chan->ccp->dev,
515-
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
516-
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
517-
518-
desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
519-
flags);
520-
if (!desc)
521-
return NULL;
522-
523-
return &desc->tx_desc;
524-
}
525-
526505
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
527506
struct dma_chan *dma_chan, unsigned long flags)
528507
{
@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
704683
dma_dev->directions = DMA_MEM_TO_MEM;
705684
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
706685
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
707-
dma_cap_set(DMA_SG, dma_dev->cap_mask);
708686
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
709687

710688
/* The DMA channels for this device can be set to public or private,
@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
740718

741719
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
742720
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
743-
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
744721
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
745722
dma_dev->device_issue_pending = ccp_issue_pending;
746723
dma_dev->device_tx_status = ccp_tx_status;

drivers/dma/at_hdmac.c

Lines changed: 1 addition & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
12021202
return NULL;
12031203
}
12041204

1205-
/**
1206-
* atc_prep_dma_sg - prepare memory to memory scather-gather operation
1207-
* @chan: the channel to prepare operation on
1208-
* @dst_sg: destination scatterlist
1209-
* @dst_nents: number of destination scatterlist entries
1210-
* @src_sg: source scatterlist
1211-
* @src_nents: number of source scatterlist entries
1212-
* @flags: tx descriptor status flags
1213-
*/
1214-
static struct dma_async_tx_descriptor *
1215-
atc_prep_dma_sg(struct dma_chan *chan,
1216-
struct scatterlist *dst_sg, unsigned int dst_nents,
1217-
struct scatterlist *src_sg, unsigned int src_nents,
1218-
unsigned long flags)
1219-
{
1220-
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1221-
struct at_desc *desc = NULL;
1222-
struct at_desc *first = NULL;
1223-
struct at_desc *prev = NULL;
1224-
unsigned int src_width;
1225-
unsigned int dst_width;
1226-
size_t xfer_count;
1227-
u32 ctrla;
1228-
u32 ctrlb;
1229-
size_t dst_len = 0, src_len = 0;
1230-
dma_addr_t dst = 0, src = 0;
1231-
size_t len = 0, total_len = 0;
1232-
1233-
if (unlikely(dst_nents == 0 || src_nents == 0))
1234-
return NULL;
1235-
1236-
if (unlikely(dst_sg == NULL || src_sg == NULL))
1237-
return NULL;
1238-
1239-
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
1240-
| ATC_SRC_ADDR_MODE_INCR
1241-
| ATC_DST_ADDR_MODE_INCR
1242-
| ATC_FC_MEM2MEM;
1243-
1244-
/*
1245-
* loop until there is either no more source or no more destination
1246-
* scatterlist entry
1247-
*/
1248-
while (true) {
1249-
1250-
/* prepare the next transfer */
1251-
if (dst_len == 0) {
1252-
1253-
/* no more destination scatterlist entries */
1254-
if (!dst_sg || !dst_nents)
1255-
break;
1256-
1257-
dst = sg_dma_address(dst_sg);
1258-
dst_len = sg_dma_len(dst_sg);
1259-
1260-
dst_sg = sg_next(dst_sg);
1261-
dst_nents--;
1262-
}
1263-
1264-
if (src_len == 0) {
1265-
1266-
/* no more source scatterlist entries */
1267-
if (!src_sg || !src_nents)
1268-
break;
1269-
1270-
src = sg_dma_address(src_sg);
1271-
src_len = sg_dma_len(src_sg);
1272-
1273-
src_sg = sg_next(src_sg);
1274-
src_nents--;
1275-
}
1276-
1277-
len = min_t(size_t, src_len, dst_len);
1278-
if (len == 0)
1279-
continue;
1280-
1281-
/* take care for the alignment */
1282-
src_width = dst_width = atc_get_xfer_width(src, dst, len);
1283-
1284-
ctrla = ATC_SRC_WIDTH(src_width) |
1285-
ATC_DST_WIDTH(dst_width);
1286-
1287-
/*
1288-
* The number of transfers to set up refer to the source width
1289-
* that depends on the alignment.
1290-
*/
1291-
xfer_count = len >> src_width;
1292-
if (xfer_count > ATC_BTSIZE_MAX) {
1293-
xfer_count = ATC_BTSIZE_MAX;
1294-
len = ATC_BTSIZE_MAX << src_width;
1295-
}
1296-
1297-
/* create the transfer */
1298-
desc = atc_desc_get(atchan);
1299-
if (!desc)
1300-
goto err_desc_get;
1301-
1302-
desc->lli.saddr = src;
1303-
desc->lli.daddr = dst;
1304-
desc->lli.ctrla = ctrla | xfer_count;
1305-
desc->lli.ctrlb = ctrlb;
1306-
1307-
desc->txd.cookie = 0;
1308-
desc->len = len;
1309-
1310-
atc_desc_chain(&first, &prev, desc);
1311-
1312-
/* update the lengths and addresses for the next loop cycle */
1313-
dst_len -= len;
1314-
src_len -= len;
1315-
dst += len;
1316-
src += len;
1317-
1318-
total_len += len;
1319-
}
1320-
1321-
/* First descriptor of the chain embedds additional information */
1322-
first->txd.cookie = -EBUSY;
1323-
first->total_len = total_len;
1324-
1325-
/* set end-of-link to the last link descriptor of list*/
1326-
set_desc_eol(desc);
1327-
1328-
first->txd.flags = flags; /* client is in control of this ack */
1329-
1330-
return &first->txd;
1331-
1332-
err_desc_get:
1333-
atc_desc_put(atchan, first);
1334-
return NULL;
1335-
}
1336-
13371205
/**
13381206
* atc_dma_cyclic_check_values
13391207
* Check for too big/unaligned periods and unaligned DMA buffer
@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
19331801

19341802
/* setup platform data for each SoC */
19351803
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1936-
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
19371804
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
19381805
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
19391806
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
19401807
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
19411808
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
19421809
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1943-
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
19441810

19451811
/* get DMA parameters from controller type */
19461812
plat_dat = at_dma_get_driver_data(pdev);
@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
20781944
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
20791945
}
20801946

2081-
if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2082-
atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2083-
20841947
dma_writel(atdma, EN, AT_DMA_ENABLE);
20851948

2086-
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
1949+
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
20871950
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
20881951
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
20891952
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
2090-
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
20911953
plat_dat->nr_channels);
20921954

20931955
dma_async_device_register(&atdma->dma_common);

drivers/dma/dmaengine.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -937,8 +937,6 @@ int dma_async_device_register(struct dma_device *device)
937937
!device->device_prep_dma_memset);
938938
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
939939
!device->device_prep_dma_interrupt);
940-
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
941-
!device->device_prep_dma_sg);
942940
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
943941
!device->device_prep_dma_cyclic);
944942
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&

drivers/dma/dmatest.c

Lines changed: 2 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
5252
MODULE_PARM_DESC(iterations,
5353
"Iterations before stopping test (default: infinite)");
5454

55-
static unsigned int sg_buffers = 1;
56-
module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
57-
MODULE_PARM_DESC(sg_buffers,
58-
"Number of scatter gather buffers (default: 1)");
59-
6055
static unsigned int dmatest;
6156
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
6257
MODULE_PARM_DESC(dmatest,
63-
"dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
58+
"dmatest 0-memcpy 1-memset (default: 0)");
6459

6560
static unsigned int xor_sources = 3;
6661
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -471,9 +466,6 @@ static int dmatest_func(void *data)
471466
align = dev->fill_align;
472467
src_cnt = dst_cnt = 1;
473468
is_memset = true;
474-
} else if (thread->type == DMA_SG) {
475-
align = dev->copy_align;
476-
src_cnt = dst_cnt = sg_buffers;
477469
} else if (thread->type == DMA_XOR) {
478470
/* force odd to ensure dst = src */
479471
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -553,8 +545,6 @@ static int dmatest_func(void *data)
553545
dma_addr_t srcs[src_cnt];
554546
dma_addr_t *dsts;
555547
unsigned int src_off, dst_off, len;
556-
struct scatterlist tx_sg[src_cnt];
557-
struct scatterlist rx_sg[src_cnt];
558548

559549
total_tests++;
560550

@@ -650,15 +640,6 @@ static int dmatest_func(void *data)
650640
um->bidi_cnt++;
651641
}
652642

653-
sg_init_table(tx_sg, src_cnt);
654-
sg_init_table(rx_sg, src_cnt);
655-
for (i = 0; i < src_cnt; i++) {
656-
sg_dma_address(&rx_sg[i]) = srcs[i];
657-
sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
658-
sg_dma_len(&tx_sg[i]) = len;
659-
sg_dma_len(&rx_sg[i]) = len;
660-
}
661-
662643
if (thread->type == DMA_MEMCPY)
663644
tx = dev->device_prep_dma_memcpy(chan,
664645
dsts[0] + dst_off,
@@ -668,9 +649,6 @@ static int dmatest_func(void *data)
668649
dsts[0] + dst_off,
669650
*(thread->srcs[0] + src_off),
670651
len, flags);
671-
else if (thread->type == DMA_SG)
672-
tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
673-
rx_sg, src_cnt, flags);
674652
else if (thread->type == DMA_XOR)
675653
tx = dev->device_prep_dma_xor(chan,
676654
dsts[0] + dst_off,
@@ -853,8 +831,6 @@ static int dmatest_add_threads(struct dmatest_info *info,
853831
op = "copy";
854832
else if (type == DMA_MEMSET)
855833
op = "set";
856-
else if (type == DMA_SG)
857-
op = "sg";
858834
else if (type == DMA_XOR)
859835
op = "xor";
860836
else if (type == DMA_PQ)
@@ -916,15 +892,8 @@ static int dmatest_add_channel(struct dmatest_info *info,
916892
}
917893

918894
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
919-
if (dmatest == 2) {
920-
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
921-
thread_count += cnt > 0 ? cnt : 0;
922-
}
923-
}
924-
925-
if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
926895
if (dmatest == 1) {
927-
cnt = dmatest_add_threads(info, dtc, DMA_SG);
896+
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
928897
thread_count += cnt > 0 ? cnt : 0;
929898
}
930899
}
@@ -1002,7 +971,6 @@ static void run_threaded_test(struct dmatest_info *info)
1002971
request_channels(info, DMA_MEMCPY);
1003972
request_channels(info, DMA_MEMSET);
1004973
request_channels(info, DMA_XOR);
1005-
request_channels(info, DMA_SG);
1006974
request_channels(info, DMA_PQ);
1007975
}
1008976

0 commit comments

Comments
 (0)