Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit b6af3a9

Browse files
Mani-Sadhasivamgregkh
authored andcommitted
bus: mhi: ep: Add support for async DMA write operation
[ Upstream commit ee08acb ] In order to optimize the data transfer, let's use the async DMA operation for writing (queuing) data to the host. In the async path, the completion event for the transfer ring will only be sent to the host when the controller driver notifies the MHI stack of the actual transfer completion using the callback (mhi_ep_skb_completion) supplied in "struct mhi_ep_buf_info". Also to accommodate the async operation, the transfer ring read offset (ring->rd_offset) is cached in the "struct mhi_ep_chan" and updated locally to let the stack queue further ring items to the controller driver. But the actual read offset of the transfer ring will only be updated in the completion callback. Signed-off-by: Manivannan Sadhasivam <[email protected]> Stable-dep-of: c7d0b2d ("bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone") Signed-off-by: Sasha Levin <[email protected]>
1 parent a77e595 commit b6af3a9

File tree

3 files changed

+59
-20
lines changed

3 files changed

+59
-20
lines changed

drivers/bus/mhi/ep/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,7 @@ struct mhi_ep_chan {
159159
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
160160
enum mhi_ch_state state;
161161
enum dma_data_direction dir;
162+
size_t rd_offset;
162163
u64 tre_loc;
163164
u32 tre_size;
164165
u32 tre_bytes_left;

drivers/bus/mhi/ep/main.c

Lines changed: 54 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
183183

184184
goto err_unlock;
185185
}
186+
187+
mhi_chan->rd_offset = ch_ring->rd_offset;
186188
}
187189

188190
/* Set channel state to RUNNING */
@@ -312,7 +314,7 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
312314
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
313315
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
314316

315-
return !!(ring->rd_offset == ring->wr_offset);
317+
return !!(mhi_chan->rd_offset == ring->wr_offset);
316318
}
317319
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
318320

@@ -339,7 +341,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
339341
return -ENODEV;
340342
}
341343

342-
el = &ring->ring_cache[ring->rd_offset];
344+
el = &ring->ring_cache[mhi_chan->rd_offset];
343345

344346
/* Check if there is data pending to be read from previous read operation */
345347
if (mhi_chan->tre_bytes_left) {
@@ -418,6 +420,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
418420
tr_done = true;
419421
}
420422

423+
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
421424
mhi_ep_ring_inc_index(ring);
422425
}
423426

@@ -478,6 +481,35 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
478481
return 0;
479482
}
480483

484+
static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
485+
{
486+
struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
487+
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
488+
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
489+
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
490+
struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
491+
struct device *dev = &mhi_dev->dev;
492+
struct mhi_result result = {};
493+
int ret;
494+
495+
if (mhi_chan->xfer_cb) {
496+
result.buf_addr = buf_info->cb_buf;
497+
result.dir = mhi_chan->dir;
498+
result.bytes_xferd = buf_info->size;
499+
500+
mhi_chan->xfer_cb(mhi_dev, &result);
501+
}
502+
503+
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
504+
buf_info->code);
505+
if (ret) {
506+
dev_err(dev, "Error sending transfer completion event\n");
507+
return;
508+
}
509+
510+
mhi_ep_ring_inc_index(ring);
511+
}
512+
481513
/* TODO: Handle partially formed TDs */
482514
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
483515
{
@@ -488,7 +520,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
488520
struct mhi_ring_element *el;
489521
u32 buf_left, read_offset;
490522
struct mhi_ep_ring *ring;
491-
enum mhi_ev_ccs code;
492523
size_t tr_len;
493524
u32 tre_len;
494525
int ret;
@@ -512,7 +543,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
512543
goto err_exit;
513544
}
514545

515-
el = &ring->ring_cache[ring->rd_offset];
546+
el = &ring->ring_cache[mhi_chan->rd_offset];
516547
tre_len = MHI_TRE_DATA_GET_LEN(el);
517548

518549
tr_len = min(buf_left, tre_len);
@@ -521,33 +552,35 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
521552
buf_info.dev_addr = skb->data + read_offset;
522553
buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
523554
buf_info.size = tr_len;
555+
buf_info.cb = mhi_ep_skb_completion;
556+
buf_info.cb_buf = skb;
557+
buf_info.mhi_dev = mhi_dev;
524558

525-
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
526-
ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
527-
if (ret < 0) {
528-
dev_err(dev, "Error writing to the channel\n");
529-
goto err_exit;
530-
}
531-
532-
buf_left -= tr_len;
533559
/*
534560
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
535561
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
536562
* the host so that the host can adjust the packet boundary to next TREs. Else send
537563
* the EOT event to the host indicating the packet boundary.
538564
*/
539-
if (buf_left)
540-
code = MHI_EV_CC_OVERFLOW;
565+
if (buf_left - tr_len)
566+
buf_info.code = MHI_EV_CC_OVERFLOW;
541567
else
542-
code = MHI_EV_CC_EOT;
568+
buf_info.code = MHI_EV_CC_EOT;
543569

544-
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
545-
if (ret) {
546-
dev_err(dev, "Error sending transfer completion event\n");
570+
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
571+
ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
572+
if (ret < 0) {
573+
dev_err(dev, "Error writing to the channel\n");
547574
goto err_exit;
548575
}
549576

550-
mhi_ep_ring_inc_index(ring);
577+
buf_left -= tr_len;
578+
579+
/*
580+
* Update the read offset cached in mhi_chan. Actual read offset
581+
* will be updated by the completion handler.
582+
*/
583+
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
551584
} while (buf_left);
552585

553586
mutex_unlock(&mhi_chan->lock);
@@ -787,7 +820,7 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
787820
}
788821

789822
/* Sanity check to make sure there are elements in the ring */
790-
if (ring->rd_offset == ring->wr_offset) {
823+
if (chan->rd_offset == ring->wr_offset) {
791824
mutex_unlock(&chan->lock);
792825
kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
793826
continue;
@@ -1431,6 +1464,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
14311464
ret = -ENOMEM;
14321465
goto err_destroy_tre_buf_cache;
14331466
}
1467+
14341468
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
14351469
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
14361470
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);

include/linux/mhi_ep.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,16 +51,20 @@ struct mhi_ep_db_info {
5151

5252
/**
5353
* struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
54+
* @mhi_dev: MHI device associated with this buffer
5455
* @dev_addr: Address of the buffer in endpoint
5556
* @host_addr: Address of the bufffer in host
5657
* @size: Size of the buffer
58+
* @code: Transfer completion code
5759
* @cb: Callback to be executed by controller drivers after transfer completion (async)
5860
* @cb_buf: Opaque buffer to be passed to the callback
5961
*/
6062
struct mhi_ep_buf_info {
63+
struct mhi_ep_device *mhi_dev;
6164
void *dev_addr;
6265
u64 host_addr;
6366
size_t size;
67+
int code;
6468

6569
void (*cb)(struct mhi_ep_buf_info *buf_info);
6670
void *cb_buf;

0 commit comments

Comments
 (0)