@@ -418,53 +418,93 @@ static void spi_qup_dma_terminate(struct spi_master *master,
418
418
dmaengine_terminate_all (master -> dma_rx );
419
419
}
420
420
421
+ static u32 spi_qup_sgl_get_nents_len (struct scatterlist * sgl , u32 max ,
422
+ u32 * nents )
423
+ {
424
+ struct scatterlist * sg ;
425
+ u32 total = 0 ;
426
+
427
+ * nents = 0 ;
428
+
429
+ for (sg = sgl ; sg ; sg = sg_next (sg )) {
430
+ unsigned int len = sg_dma_len (sg );
431
+
432
+ /* check for overflow as well as limit */
433
+ if (((total + len ) < total ) || ((total + len ) > max ))
434
+ break ;
435
+
436
+ total += len ;
437
+ (* nents )++ ;
438
+ }
439
+
440
+ return total ;
441
+ }
442
+
421
443
static int spi_qup_do_dma (struct spi_device * spi , struct spi_transfer * xfer ,
422
444
unsigned long timeout )
423
445
{
424
446
dma_async_tx_callback rx_done = NULL , tx_done = NULL ;
425
447
struct spi_master * master = spi -> master ;
426
448
struct spi_qup * qup = spi_master_get_devdata (master );
449
+ struct scatterlist * tx_sgl , * rx_sgl ;
427
450
int ret ;
428
451
429
452
if (xfer -> rx_buf )
430
453
rx_done = spi_qup_dma_done ;
431
454
else if (xfer -> tx_buf )
432
455
tx_done = spi_qup_dma_done ;
433
456
434
- ret = spi_qup_io_config (spi , xfer );
435
- if (ret )
436
- return ret ;
457
+ rx_sgl = xfer -> rx_sg .sgl ;
458
+ tx_sgl = xfer -> tx_sg .sgl ;
437
459
438
- /* before issuing the descriptors, set the QUP to run */
439
- ret = spi_qup_set_state (qup , QUP_STATE_RUN );
440
- if (ret ) {
441
- dev_warn (qup -> dev , "%s(%d): cannot set RUN state\n" ,
442
- __func__ , __LINE__ );
443
- return ret ;
444
- }
460
+ do {
461
+ u32 rx_nents , tx_nents ;
462
+
463
+ if (rx_sgl )
464
+ qup -> n_words = spi_qup_sgl_get_nents_len (rx_sgl ,
465
+ SPI_MAX_XFER , & rx_nents ) / qup -> w_size ;
466
+ if (tx_sgl )
467
+ qup -> n_words = spi_qup_sgl_get_nents_len (tx_sgl ,
468
+ SPI_MAX_XFER , & tx_nents ) / qup -> w_size ;
469
+ if (!qup -> n_words )
470
+ return - EIO ;
445
471
446
- if (xfer -> rx_buf ) {
447
- ret = spi_qup_prep_sg (master , xfer -> rx_sg .sgl ,
448
- xfer -> rx_sg .nents , DMA_DEV_TO_MEM ,
449
- rx_done );
472
+ ret = spi_qup_io_config (spi , xfer );
450
473
if (ret )
451
474
return ret ;
452
475
453
- dma_async_issue_pending (master -> dma_rx );
454
- }
455
-
456
- if (xfer -> tx_buf ) {
457
- ret = spi_qup_prep_sg (master , xfer -> tx_sg .sgl ,
458
- xfer -> tx_sg .nents , DMA_MEM_TO_DEV ,
459
- tx_done );
460
- if (ret )
476
+ /* before issuing the descriptors, set the QUP to run */
477
+ ret = spi_qup_set_state (qup , QUP_STATE_RUN );
478
+ if (ret ) {
479
+ dev_warn (qup -> dev , "cannot set RUN state\n" );
461
480
return ret ;
481
+ }
482
+ if (rx_sgl ) {
483
+ ret = spi_qup_prep_sg (master , rx_sgl , rx_nents ,
484
+ DMA_DEV_TO_MEM , rx_done );
485
+ if (ret )
486
+ return ret ;
487
+ dma_async_issue_pending (master -> dma_rx );
488
+ }
462
489
463
- dma_async_issue_pending (master -> dma_tx );
464
- }
490
+ if (tx_sgl ) {
491
+ ret = spi_qup_prep_sg (master , tx_sgl , tx_nents ,
492
+ DMA_MEM_TO_DEV , tx_done );
493
+ if (ret )
494
+ return ret ;
495
+
496
+ dma_async_issue_pending (master -> dma_tx );
497
+ }
498
+
499
+ if (!wait_for_completion_timeout (& qup -> done , timeout ))
500
+ return - ETIMEDOUT ;
501
+
502
+ for (; rx_sgl && rx_nents -- ; rx_sgl = sg_next (rx_sgl ))
503
+ ;
504
+ for (; tx_sgl && tx_nents -- ; tx_sgl = sg_next (tx_sgl ))
505
+ ;
465
506
466
- if (!wait_for_completion_timeout (& qup -> done , timeout ))
467
- return - ETIMEDOUT ;
507
+ } while (rx_sgl || tx_sgl );
468
508
469
509
return 0 ;
470
510
}
0 commit comments