3
3
// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
4
4
// Author: Vignesh Raghavendra <[email protected] >
5
5
6
+ #include <linux/completion.h>
7
+ #include <linux/dma-direction.h>
8
+ #include <linux/dma-mapping.h>
9
+ #include <linux/dmaengine.h>
6
10
#include <linux/err.h>
7
11
#include <linux/kernel.h>
8
12
#include <linux/module.h>
13
17
#include <linux/of.h>
14
18
#include <linux/of_address.h>
15
19
#include <linux/platform_device.h>
20
+ #include <linux/sched/task_stack.h>
16
21
#include <linux/types.h>
17
22
18
23
#define AM654_HBMC_CALIB_COUNT 25
19
24
25
+ struct am654_hbmc_device_priv {
26
+ struct completion rx_dma_complete ;
27
+ phys_addr_t device_base ;
28
+ struct hyperbus_ctlr * ctlr ;
29
+ struct dma_chan * rx_chan ;
30
+ };
31
+
20
32
struct am654_hbmc_priv {
21
33
struct hyperbus_ctlr ctlr ;
22
34
struct hyperbus_device hbdev ;
@@ -51,13 +63,103 @@ static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
51
63
return ret ;
52
64
}
53
65
66
+ static void am654_hbmc_dma_callback (void * param )
67
+ {
68
+ struct am654_hbmc_device_priv * priv = param ;
69
+
70
+ complete (& priv -> rx_dma_complete );
71
+ }
72
+
73
+ static int am654_hbmc_dma_read (struct am654_hbmc_device_priv * priv , void * to ,
74
+ unsigned long from , ssize_t len )
75
+
76
+ {
77
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
78
+ struct dma_chan * rx_chan = priv -> rx_chan ;
79
+ struct dma_async_tx_descriptor * tx ;
80
+ dma_addr_t dma_dst , dma_src ;
81
+ dma_cookie_t cookie ;
82
+ int ret ;
83
+
84
+ if (!priv -> rx_chan || !virt_addr_valid (to ) || object_is_on_stack (to ))
85
+ return - EINVAL ;
86
+
87
+ dma_dst = dma_map_single (rx_chan -> device -> dev , to , len , DMA_FROM_DEVICE );
88
+ if (dma_mapping_error (rx_chan -> device -> dev , dma_dst )) {
89
+ dev_dbg (priv -> ctlr -> dev , "DMA mapping failed\n" );
90
+ return - EIO ;
91
+ }
92
+
93
+ dma_src = priv -> device_base + from ;
94
+ tx = dmaengine_prep_dma_memcpy (rx_chan , dma_dst , dma_src , len , flags );
95
+ if (!tx ) {
96
+ dev_err (priv -> ctlr -> dev , "device_prep_dma_memcpy error\n" );
97
+ ret = - EIO ;
98
+ goto unmap_dma ;
99
+ }
100
+
101
+ reinit_completion (& priv -> rx_dma_complete );
102
+ tx -> callback = am654_hbmc_dma_callback ;
103
+ tx -> callback_param = priv ;
104
+ cookie = dmaengine_submit (tx );
105
+
106
+ ret = dma_submit_error (cookie );
107
+ if (ret ) {
108
+ dev_err (priv -> ctlr -> dev , "dma_submit_error %d\n" , cookie );
109
+ goto unmap_dma ;
110
+ }
111
+
112
+ dma_async_issue_pending (rx_chan );
113
+ if (!wait_for_completion_timeout (& priv -> rx_dma_complete , msecs_to_jiffies (len + 1000 ))) {
114
+ dmaengine_terminate_sync (rx_chan );
115
+ dev_err (priv -> ctlr -> dev , "DMA wait_for_completion_timeout\n" );
116
+ ret = - ETIMEDOUT ;
117
+ }
118
+
119
+ unmap_dma :
120
+ dma_unmap_single (rx_chan -> device -> dev , dma_dst , len , DMA_FROM_DEVICE );
121
+ return ret ;
122
+ }
123
+
124
+ static void am654_hbmc_read (struct hyperbus_device * hbdev , void * to ,
125
+ unsigned long from , ssize_t len )
126
+ {
127
+ struct am654_hbmc_device_priv * priv = hbdev -> priv ;
128
+
129
+ if (len < SZ_1K || am654_hbmc_dma_read (priv , to , from , len ))
130
+ memcpy_fromio (to , hbdev -> map .virt + from , len );
131
+ }
132
+
54
133
static const struct hyperbus_ops am654_hbmc_ops = {
55
134
.calibrate = am654_hbmc_calibrate ,
135
+ .copy_from = am654_hbmc_read ,
56
136
};
57
137
138
+ static int am654_hbmc_request_mmap_dma (struct am654_hbmc_device_priv * priv )
139
+ {
140
+ struct dma_chan * rx_chan ;
141
+ dma_cap_mask_t mask ;
142
+
143
+ dma_cap_zero (mask );
144
+ dma_cap_set (DMA_MEMCPY , mask );
145
+
146
+ rx_chan = dma_request_chan_by_mask (& mask );
147
+ if (IS_ERR (rx_chan )) {
148
+ if (PTR_ERR (rx_chan ) == - EPROBE_DEFER )
149
+ return - EPROBE_DEFER ;
150
+ dev_dbg (priv -> ctlr -> dev , "No DMA channel available\n" );
151
+ return 0 ;
152
+ }
153
+ priv -> rx_chan = rx_chan ;
154
+ init_completion (& priv -> rx_dma_complete );
155
+
156
+ return 0 ;
157
+ }
158
+
58
159
static int am654_hbmc_probe (struct platform_device * pdev )
59
160
{
60
161
struct device_node * np = pdev -> dev .of_node ;
162
+ struct am654_hbmc_device_priv * dev_priv ;
61
163
struct device * dev = & pdev -> dev ;
62
164
struct am654_hbmc_priv * priv ;
63
165
struct resource res ;
@@ -96,13 +198,31 @@ static int am654_hbmc_probe(struct platform_device *pdev)
96
198
priv -> ctlr .dev = dev ;
97
199
priv -> ctlr .ops = & am654_hbmc_ops ;
98
200
priv -> hbdev .ctlr = & priv -> ctlr ;
201
+
202
+ dev_priv = devm_kzalloc (dev , sizeof (* dev_priv ), GFP_KERNEL );
203
+ if (!dev_priv ) {
204
+ ret = - ENOMEM ;
205
+ goto disable_mux ;
206
+ }
207
+
208
+ priv -> hbdev .priv = dev_priv ;
209
+ dev_priv -> device_base = res .start ;
210
+ dev_priv -> ctlr = & priv -> ctlr ;
211
+
212
+ ret = am654_hbmc_request_mmap_dma (dev_priv );
213
+ if (ret )
214
+ goto disable_mux ;
215
+
99
216
ret = hyperbus_register_device (& priv -> hbdev );
100
217
if (ret ) {
101
218
dev_err (dev , "failed to register controller\n" );
102
- goto disable_mux ;
219
+ goto release_dma ;
103
220
}
104
221
105
222
return 0 ;
223
+ release_dma :
224
+ if (dev_priv -> rx_chan )
225
+ dma_release_channel (dev_priv -> rx_chan );
106
226
disable_mux :
107
227
if (priv -> mux_ctrl )
108
228
mux_control_deselect (priv -> mux_ctrl );
@@ -112,12 +232,16 @@ static int am654_hbmc_probe(struct platform_device *pdev)
112
232
static int am654_hbmc_remove (struct platform_device * pdev )
113
233
{
114
234
struct am654_hbmc_priv * priv = platform_get_drvdata (pdev );
235
+ struct am654_hbmc_device_priv * dev_priv = priv -> hbdev .priv ;
115
236
int ret ;
116
237
117
238
ret = hyperbus_unregister_device (& priv -> hbdev );
118
239
if (priv -> mux_ctrl )
119
240
mux_control_deselect (priv -> mux_ctrl );
120
241
242
+ if (dev_priv -> rx_chan )
243
+ dma_release_channel (dev_priv -> rx_chan );
244
+
121
245
return ret ;
122
246
}
123
247
0 commit comments