38
38
#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
39
39
#define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
40
40
#define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
41
- #define STM32_DMA_MASKI (STM32_DMA_TCI \
42
- | STM32_DMA_TEI \
43
- | STM32_DMA_DMEI \
44
- | STM32_DMA_FEI)
45
41
46
42
/* DMA Stream x Configuration Register */
47
43
#define STM32_DMA_SCR (x ) (0x0010 + 0x18 * (x)) /* x = 0..7 */
118
114
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
119
115
120
116
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
117
+ /*
118
+ * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
119
+ * gather at boundary. Thus it's safer to round down this value on FIFO
120
+ * size (16 Bytes)
121
+ */
122
+ #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
123
+ ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
121
124
#define STM32_DMA_MAX_CHANNELS 0x08
122
125
#define STM32_DMA_MAX_REQUEST_ID 0x08
123
126
#define STM32_DMA_MAX_DATA_PARAM 0x03
@@ -869,7 +872,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
869
872
desc -> sg_req [i ].len = sg_dma_len (sg );
870
873
871
874
nb_data_items = desc -> sg_req [i ].len / buswidth ;
872
- if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS ) {
875
+ if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS ) {
873
876
dev_err (chan2dev (chan ), "nb items not supported\n" );
874
877
goto err ;
875
878
}
@@ -935,7 +938,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
935
938
return NULL ;
936
939
937
940
nb_data_items = period_len / buswidth ;
938
- if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS ) {
941
+ if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS ) {
939
942
dev_err (chan2dev (chan ), "number of items not supported\n" );
940
943
return NULL ;
941
944
}
@@ -985,7 +988,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
985
988
u32 num_sgs , best_burst , dma_burst , threshold ;
986
989
int i ;
987
990
988
- num_sgs = DIV_ROUND_UP (len , STM32_DMA_MAX_DATA_ITEMS );
991
+ num_sgs = DIV_ROUND_UP (len , STM32_DMA_ALIGNED_MAX_DATA_ITEMS );
989
992
desc = stm32_dma_alloc_desc (num_sgs );
990
993
if (!desc )
991
994
return NULL ;
@@ -994,7 +997,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
994
997
995
998
for (offset = 0 , i = 0 ; offset < len ; offset += xfer_count , i ++ ) {
996
999
xfer_count = min_t (size_t , len - offset ,
997
- STM32_DMA_MAX_DATA_ITEMS );
1000
+ STM32_DMA_ALIGNED_MAX_DATA_ITEMS );
998
1001
999
1002
/* Compute best burst size */
1000
1003
max_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
0 commit comments