Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 4398765

Browse files
Mani-Sadhasivamgregkh
authored andcommitted
bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone
[ Upstream commit c7d0b2d ] MHI endpoint stack accidentally started allocating memory for objects from DMA zone since commit 62210a2 ("bus: mhi: ep: Use slab allocator where applicable"). But there is no real need to allocate memory from this naturally limited DMA zone. This also causes the MHI endpoint stack to run out of memory while doing high bandwidth transfers. So let's switch over to normal memory. Cc: <[email protected]> # 6.8 Fixes: 62210a2 ("bus: mhi: ep: Use slab allocator where applicable") Reviewed-by: Mayank Rana <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Manivannan Sadhasivam <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent 39601f4 commit 4398765

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

drivers/bus/mhi/ep/main.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
7474
struct mhi_ring_element *event;
7575
int ret;
7676

77-
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
77+
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
7878
if (!event)
7979
return -ENOMEM;
8080

@@ -93,7 +93,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
9393
struct mhi_ring_element *event;
9494
int ret;
9595

96-
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
96+
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
9797
if (!event)
9898
return -ENOMEM;
9999

@@ -111,7 +111,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
111111
struct mhi_ring_element *event;
112112
int ret;
113113

114-
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
114+
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
115115
if (!event)
116116
return -ENOMEM;
117117

@@ -130,7 +130,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
130130
struct mhi_ring_element *event;
131131
int ret;
132132

133-
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
133+
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
134134
if (!event)
135135
return -ENOMEM;
136136

@@ -422,7 +422,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
422422
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
423423
write_offset = len - buf_left;
424424

425-
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
425+
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
426426
if (!buf_addr)
427427
return -ENOMEM;
428428

@@ -1460,14 +1460,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
14601460

14611461
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
14621462
sizeof(struct mhi_ring_element), 0,
1463-
SLAB_CACHE_DMA, NULL);
1463+
0, NULL);
14641464
if (!mhi_cntrl->ev_ring_el_cache) {
14651465
ret = -ENOMEM;
14661466
goto err_free_cmd;
14671467
}
14681468

14691469
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
1470-
SLAB_CACHE_DMA, NULL);
1470+
0, NULL);
14711471
if (!mhi_cntrl->tre_buf_cache) {
14721472
ret = -ENOMEM;
14731473
goto err_destroy_ev_ring_el_cache;

0 commit comments

Comments
 (0)