@@ -183,6 +183,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
183
183
184
184
goto err_unlock ;
185
185
}
186
+
187
+ mhi_chan -> rd_offset = ch_ring -> rd_offset ;
186
188
}
187
189
188
190
/* Set channel state to RUNNING */
@@ -312,7 +314,7 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
312
314
struct mhi_ep_cntrl * mhi_cntrl = mhi_dev -> mhi_cntrl ;
313
315
struct mhi_ep_ring * ring = & mhi_cntrl -> mhi_chan [mhi_chan -> chan ].ring ;
314
316
315
- return !!(ring -> rd_offset == ring -> wr_offset );
317
+ return !!(mhi_chan -> rd_offset == ring -> wr_offset );
316
318
}
317
319
EXPORT_SYMBOL_GPL (mhi_ep_queue_is_empty );
318
320
@@ -339,7 +341,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
339
341
return - ENODEV ;
340
342
}
341
343
342
- el = & ring -> ring_cache [ring -> rd_offset ];
344
+ el = & ring -> ring_cache [mhi_chan -> rd_offset ];
343
345
344
346
/* Check if there is data pending to be read from previous read operation */
345
347
if (mhi_chan -> tre_bytes_left ) {
@@ -418,6 +420,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
418
420
tr_done = true;
419
421
}
420
422
423
+ mhi_chan -> rd_offset = (mhi_chan -> rd_offset + 1 ) % ring -> ring_size ;
421
424
mhi_ep_ring_inc_index (ring );
422
425
}
423
426
@@ -478,6 +481,35 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
478
481
return 0 ;
479
482
}
480
483
484
+ static void mhi_ep_skb_completion (struct mhi_ep_buf_info * buf_info )
485
+ {
486
+ struct mhi_ep_device * mhi_dev = buf_info -> mhi_dev ;
487
+ struct mhi_ep_cntrl * mhi_cntrl = mhi_dev -> mhi_cntrl ;
488
+ struct mhi_ep_chan * mhi_chan = mhi_dev -> dl_chan ;
489
+ struct mhi_ep_ring * ring = & mhi_cntrl -> mhi_chan [mhi_chan -> chan ].ring ;
490
+ struct mhi_ring_element * el = & ring -> ring_cache [ring -> rd_offset ];
491
+ struct device * dev = & mhi_dev -> dev ;
492
+ struct mhi_result result = {};
493
+ int ret ;
494
+
495
+ if (mhi_chan -> xfer_cb ) {
496
+ result .buf_addr = buf_info -> cb_buf ;
497
+ result .dir = mhi_chan -> dir ;
498
+ result .bytes_xferd = buf_info -> size ;
499
+
500
+ mhi_chan -> xfer_cb (mhi_dev , & result );
501
+ }
502
+
503
+ ret = mhi_ep_send_completion_event (mhi_cntrl , ring , el , buf_info -> size ,
504
+ buf_info -> code );
505
+ if (ret ) {
506
+ dev_err (dev , "Error sending transfer completion event\n" );
507
+ return ;
508
+ }
509
+
510
+ mhi_ep_ring_inc_index (ring );
511
+ }
512
+
481
513
/* TODO: Handle partially formed TDs */
482
514
int mhi_ep_queue_skb (struct mhi_ep_device * mhi_dev , struct sk_buff * skb )
483
515
{
@@ -488,7 +520,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
488
520
struct mhi_ring_element * el ;
489
521
u32 buf_left , read_offset ;
490
522
struct mhi_ep_ring * ring ;
491
- enum mhi_ev_ccs code ;
492
523
size_t tr_len ;
493
524
u32 tre_len ;
494
525
int ret ;
@@ -512,7 +543,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
512
543
goto err_exit ;
513
544
}
514
545
515
- el = & ring -> ring_cache [ring -> rd_offset ];
546
+ el = & ring -> ring_cache [mhi_chan -> rd_offset ];
516
547
tre_len = MHI_TRE_DATA_GET_LEN (el );
517
548
518
549
tr_len = min (buf_left , tre_len );
@@ -521,33 +552,35 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
521
552
buf_info .dev_addr = skb -> data + read_offset ;
522
553
buf_info .host_addr = MHI_TRE_DATA_GET_PTR (el );
523
554
buf_info .size = tr_len ;
555
+ buf_info .cb = mhi_ep_skb_completion ;
556
+ buf_info .cb_buf = skb ;
557
+ buf_info .mhi_dev = mhi_dev ;
524
558
525
- dev_dbg (dev , "Writing %zd bytes to channel (%u)\n" , tr_len , ring -> ch_id );
526
- ret = mhi_cntrl -> write_sync (mhi_cntrl , & buf_info );
527
- if (ret < 0 ) {
528
- dev_err (dev , "Error writing to the channel\n" );
529
- goto err_exit ;
530
- }
531
-
532
- buf_left -= tr_len ;
533
559
/*
534
560
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
535
561
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
536
562
* the host so that the host can adjust the packet boundary to next TREs. Else send
537
563
* the EOT event to the host indicating the packet boundary.
538
564
*/
539
- if (buf_left )
540
- code = MHI_EV_CC_OVERFLOW ;
565
+ if (buf_left - tr_len )
566
+ buf_info . code = MHI_EV_CC_OVERFLOW ;
541
567
else
542
- code = MHI_EV_CC_EOT ;
568
+ buf_info . code = MHI_EV_CC_EOT ;
543
569
544
- ret = mhi_ep_send_completion_event (mhi_cntrl , ring , el , tr_len , code );
545
- if (ret ) {
546
- dev_err (dev , "Error sending transfer completion event\n" );
570
+ dev_dbg (dev , "Writing %zd bytes to channel (%u)\n" , tr_len , ring -> ch_id );
571
+ ret = mhi_cntrl -> write_async (mhi_cntrl , & buf_info );
572
+ if (ret < 0 ) {
573
+ dev_err (dev , "Error writing to the channel\n" );
547
574
goto err_exit ;
548
575
}
549
576
550
- mhi_ep_ring_inc_index (ring );
577
+ buf_left -= tr_len ;
578
+
579
+ /*
580
+ * Update the read offset cached in mhi_chan. Actual read offset
581
+ * will be updated by the completion handler.
582
+ */
583
+ mhi_chan -> rd_offset = (mhi_chan -> rd_offset + 1 ) % ring -> ring_size ;
551
584
} while (buf_left );
552
585
553
586
mutex_unlock (& mhi_chan -> lock );
@@ -787,7 +820,7 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
787
820
}
788
821
789
822
/* Sanity check to make sure there are elements in the ring */
790
- if (ring -> rd_offset == ring -> wr_offset ) {
823
+ if (chan -> rd_offset == ring -> wr_offset ) {
791
824
mutex_unlock (& chan -> lock );
792
825
kmem_cache_free (mhi_cntrl -> ring_item_cache , itr );
793
826
continue ;
@@ -1431,6 +1464,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1431
1464
ret = - ENOMEM ;
1432
1465
goto err_destroy_tre_buf_cache ;
1433
1466
}
1467
+
1434
1468
INIT_WORK (& mhi_cntrl -> state_work , mhi_ep_state_worker );
1435
1469
INIT_WORK (& mhi_cntrl -> reset_work , mhi_ep_reset_worker );
1436
1470
INIT_WORK (& mhi_cntrl -> cmd_ring_work , mhi_ep_cmd_ring_worker );
0 commit comments