|
24 | 24 | #define FDMA_XTR_BUFFER_SIZE 2048
|
25 | 25 | #define FDMA_WEIGHT 4
|
26 | 26 |
|
27 |
| -/* For each hardware DB there is an entry in this list and when the HW DB |
28 |
| - * entry is used, this SW DB entry is moved to the back of the list |
29 |
| - */ |
30 |
| -struct sparx5_db { |
31 |
| - struct list_head list; |
32 |
| - void *cpu_addr; |
33 |
| -}; |
34 |
| - |
35 | 27 | static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
|
36 | 28 | u64 *dataptr)
|
37 | 29 | {
|
38 |
| - struct sparx5 *sparx5 = fdma->priv; |
39 |
| - struct sparx5_tx *tx = &sparx5->tx; |
40 |
| - struct sparx5_db *db_buf; |
41 |
| - |
42 |
| - db_buf = list_first_entry(&tx->db_list, struct sparx5_db, list); |
43 |
| - list_move_tail(&db_buf->list, &tx->db_list); |
44 |
| - |
45 |
| - *dataptr = virt_to_phys(db_buf->cpu_addr); |
| 30 | + *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + |
| 31 | + ((dcb * fdma->n_dbs + db) * fdma->db_size); |
46 | 32 |
|
47 | 33 | return 0;
|
48 | 34 | }
|
@@ -236,15 +222,19 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
|
236 | 222 | struct sparx5_tx *tx = &sparx5->tx;
|
237 | 223 | struct fdma *fdma = &tx->fdma;
|
238 | 224 | static bool first_time = true;
|
239 |
| - struct sparx5_db *db; |
| 225 | + void *virt_addr; |
240 | 226 |
|
241 | 227 | fdma_dcb_advance(fdma);
|
242 | 228 | if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
|
243 | 229 | return -EINVAL;
|
244 |
| - db = list_first_entry(&tx->db_list, struct sparx5_db, list); |
245 |
| - memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); |
246 |
| - memcpy(db->cpu_addr, ifh, IFH_LEN * 4); |
247 |
| - memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); |
| 230 | + |
| 231 | + /* Get the virtual address of the dataptr for the next DB */ |
| 232 | + virt_addr = ((u8 *)fdma->dcbs + |
| 233 | + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + |
| 234 | + ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size)); |
| 235 | + |
| 236 | + memcpy(virt_addr, ifh, IFH_LEN * 4); |
| 237 | + memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len); |
248 | 238 |
|
249 | 239 | fdma_dcb_add(fdma, fdma->dcb_index, 0,
|
250 | 240 | FDMA_DCB_STATUS_SOF |
|
@@ -285,28 +275,7 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
|
285 | 275 | {
|
286 | 276 | struct sparx5_tx *tx = &sparx5->tx;
|
287 | 277 | struct fdma *fdma = &tx->fdma;
|
288 |
| - int idx, jdx, err; |
289 |
| - |
290 |
| - INIT_LIST_HEAD(&tx->db_list); |
291 |
| - /* Now for each dcb allocate the db */ |
292 |
| - for (idx = 0; idx < fdma->n_dcbs; ++idx) { |
293 |
| - /* TX databuffers must be 16byte aligned */ |
294 |
| - for (jdx = 0; jdx < fdma->n_dbs; ++jdx) { |
295 |
| - struct sparx5_db *db; |
296 |
| - void *cpu_addr; |
297 |
| - |
298 |
| - cpu_addr = devm_kzalloc(sparx5->dev, |
299 |
| - FDMA_XTR_BUFFER_SIZE, |
300 |
| - GFP_KERNEL); |
301 |
| - if (!cpu_addr) |
302 |
| - return -ENOMEM; |
303 |
| - db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); |
304 |
| - if (!db) |
305 |
| - return -ENOMEM; |
306 |
| - db->cpu_addr = cpu_addr; |
307 |
| - list_add_tail(&db->list, &tx->db_list); |
308 |
| - } |
309 |
| - } |
| 278 | + int err; |
310 | 279 |
|
311 | 280 | err = fdma_alloc_phys(fdma);
|
312 | 281 | if (err)
|
@@ -353,7 +322,7 @@ static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
|
353 | 322 | fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
|
354 | 323 | fdma->priv = sparx5;
|
355 | 324 | fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
|
356 |
| - fdma->size = fdma_get_size(&sparx5->tx.fdma); |
| 325 | + fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma); |
357 | 326 | fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
|
358 | 327 | fdma->ops.nextptr_cb = &fdma_nextptr_cb;
|
359 | 328 | }
|
|
0 commit comments