Skip to content

Commit 39d4390

Browse files
Haijun Liudavem330
authored andcommitted
net: wwan: t7xx: Add control DMA interface
Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control path of Host-Modem data transfers. CLDMA HIF layer provides a common interface to the Port Layer. CLDMA manages 8 independent RX/TX physical channels with data flow control in HW queues. CLDMA uses ring buffers of General Packet Descriptors (GPD) for TX/RX. GPDs can represent multiple or single data buffers (DB). CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA interrupts, and initializes CLDMA HW registers. CLDMA TX flow: 1. Port Layer write 2. Get DB address 3. Configure GPD 4. Triggering processing via HW register write CLDMA RX flow: 1. CLDMA HW sends a RX "done" to host 2. Driver starts thread to safely read GPD 3. DB is sent to Port layer 4. Create a new buffer for GPD ring Note: This patch does not enable compilation since it has dependencies such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and struct t7xx_pci_dev which are added by the core patch. Signed-off-by: Haijun Liu <[email protected]> Signed-off-by: Chandrashekar Devegowda <[email protected]> Co-developed-by: Ricardo Martinez <[email protected]> Signed-off-by: Ricardo Martinez <[email protected]> Reviewed-by: Loic Poulain <[email protected]> Reviewed-by: Ilpo Järvinen <[email protected]> Reviewed-by: Sergey Ryazanov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a4ff365 commit 39d4390

File tree

5 files changed

+1812
-0
lines changed

5 files changed

+1812
-0
lines changed

drivers/net/wwan/t7xx/t7xx_cldma.c

Lines changed: 281 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,281 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (c) 2021, MediaTek Inc.
4+
* Copyright (c) 2021-2022, Intel Corporation.
5+
*
6+
* Authors:
7+
* Haijun Liu <[email protected]>
8+
* Moises Veleta <[email protected]>
9+
* Ricardo Martinez <[email protected]>
10+
*
11+
* Contributors:
12+
* Amir Hanania <[email protected]>
13+
* Andy Shevchenko <[email protected]>
14+
* Eliot Lee <[email protected]>
15+
* Sreehari Kancharla <[email protected]>
16+
*/
17+
18+
#include <linux/bits.h>
19+
#include <linux/delay.h>
20+
#include <linux/io.h>
21+
#include <linux/io-64-nonatomic-lo-hi.h>
22+
#include <linux/types.h>
23+
24+
#include "t7xx_cldma.h"
25+
26+
#define ADDR_SIZE 8
27+
28+
void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
29+
{
30+
u32 val;
31+
32+
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
33+
val |= IP_BUSY_WAKEUP;
34+
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
35+
}
36+
37+
/**
38+
* t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
39+
* @hw_info: Pointer to struct t7xx_cldma_hw.
40+
*
41+
* Restore HW after resume. Writes uplink configuration for CLDMA HW.
42+
*/
43+
void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
44+
{
45+
u32 ul_cfg;
46+
47+
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
48+
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
49+
50+
if (hw_info->hw_mode == MODE_BIT_64)
51+
ul_cfg |= UL_CFG_BIT_MODE_64;
52+
else if (hw_info->hw_mode == MODE_BIT_40)
53+
ul_cfg |= UL_CFG_BIT_MODE_40;
54+
else if (hw_info->hw_mode == MODE_BIT_36)
55+
ul_cfg |= UL_CFG_BIT_MODE_36;
56+
57+
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
58+
/* Disable TX and RX invalid address check */
59+
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
60+
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
61+
}
62+
63+
void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
64+
enum mtk_txrx tx_rx)
65+
{
66+
void __iomem *reg;
67+
u32 val;
68+
69+
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
70+
hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
71+
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
72+
iowrite32(val, reg);
73+
}
74+
75+
void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
76+
{
77+
/* Enable the TX & RX interrupts */
78+
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
79+
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
80+
/* Enable the empty queue interrupt */
81+
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
82+
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
83+
}
84+
85+
void t7xx_cldma_hw_reset(void __iomem *ao_base)
86+
{
87+
u32 val;
88+
89+
val = ioread32(ao_base + REG_INFRA_RST2_SET);
90+
val |= RST2_PMIC_SW_RST_SET;
91+
iowrite32(val, ao_base + REG_INFRA_RST2_SET);
92+
val = ioread32(ao_base + REG_INFRA_RST4_SET);
93+
val |= RST4_CLDMA1_SW_RST_SET;
94+
iowrite32(val, ao_base + REG_INFRA_RST4_SET);
95+
udelay(1);
96+
97+
val = ioread32(ao_base + REG_INFRA_RST4_CLR);
98+
val |= RST4_CLDMA1_SW_RST_CLR;
99+
iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
100+
val = ioread32(ao_base + REG_INFRA_RST2_CLR);
101+
val |= RST2_PMIC_SW_RST_CLR;
102+
iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
103+
}
104+
105+
bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
106+
{
107+
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
108+
109+
return ioread64(hw_info->ap_pdn_base + offset);
110+
}
111+
112+
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
113+
enum mtk_txrx tx_rx)
114+
{
115+
u32 offset = qno * ADDR_SIZE;
116+
void __iomem *reg;
117+
118+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
119+
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
120+
iowrite64(address, reg + offset);
121+
}
122+
123+
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
124+
enum mtk_txrx tx_rx)
125+
{
126+
void __iomem *base = hw_info->ap_pdn_base;
127+
128+
if (tx_rx == MTK_RX)
129+
iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
130+
else
131+
iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
132+
}
133+
134+
unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
135+
enum mtk_txrx tx_rx)
136+
{
137+
void __iomem *reg;
138+
u32 mask, val;
139+
140+
mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
141+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
142+
hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
143+
val = ioread32(reg);
144+
145+
return val & mask;
146+
}
147+
148+
void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
149+
{
150+
unsigned int ch_id;
151+
152+
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
153+
ch_id &= bitmask;
154+
/* Clear the ch IDs in the TX interrupt status register */
155+
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
156+
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
157+
}
158+
159+
void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
160+
{
161+
unsigned int ch_id;
162+
163+
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
164+
ch_id &= bitmask;
165+
/* Clear the ch IDs in the RX interrupt status register */
166+
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
167+
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
168+
}
169+
170+
unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
171+
enum mtk_txrx tx_rx)
172+
{
173+
void __iomem *reg;
174+
u32 val;
175+
176+
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
177+
hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
178+
val = ioread32(reg);
179+
return val & bitmask;
180+
}
181+
182+
void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
183+
enum mtk_txrx tx_rx)
184+
{
185+
void __iomem *reg;
186+
u32 val;
187+
188+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
189+
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
190+
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
191+
iowrite32(val, reg);
192+
}
193+
194+
void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
195+
{
196+
void __iomem *reg;
197+
u32 val;
198+
199+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
200+
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
201+
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
202+
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
203+
}
204+
205+
void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
206+
enum mtk_txrx tx_rx)
207+
{
208+
void __iomem *reg;
209+
u32 val;
210+
211+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
212+
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
213+
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
214+
iowrite32(val, reg);
215+
}
216+
217+
void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
218+
{
219+
void __iomem *reg;
220+
u32 val;
221+
222+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
223+
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
224+
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
225+
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
226+
}
227+
228+
/**
229+
* t7xx_cldma_hw_init() - Initialize CLDMA HW.
230+
* @hw_info: Pointer to struct t7xx_cldma_hw.
231+
*
232+
* Write uplink and downlink configuration to CLDMA HW.
233+
*/
234+
void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
235+
{
236+
u32 ul_cfg, dl_cfg;
237+
238+
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
239+
dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
240+
/* Configure the DRAM address mode */
241+
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
242+
dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
243+
244+
if (hw_info->hw_mode == MODE_BIT_64) {
245+
ul_cfg |= UL_CFG_BIT_MODE_64;
246+
dl_cfg |= DL_CFG_BIT_MODE_64;
247+
} else if (hw_info->hw_mode == MODE_BIT_40) {
248+
ul_cfg |= UL_CFG_BIT_MODE_40;
249+
dl_cfg |= DL_CFG_BIT_MODE_40;
250+
} else if (hw_info->hw_mode == MODE_BIT_36) {
251+
ul_cfg |= UL_CFG_BIT_MODE_36;
252+
dl_cfg |= DL_CFG_BIT_MODE_36;
253+
}
254+
255+
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
256+
dl_cfg |= DL_CFG_UP_HW_LAST;
257+
iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
258+
iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
259+
iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
260+
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
261+
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
262+
}
263+
264+
void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
265+
{
266+
void __iomem *reg;
267+
268+
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
269+
hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
270+
iowrite32(CLDMA_ALL_Q, reg);
271+
}
272+
273+
void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
274+
{
275+
void __iomem *reg;
276+
277+
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
278+
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
279+
iowrite32(TXRX_STATUS_BITMASK, reg);
280+
iowrite32(EMPTY_STATUS_BITMASK, reg);
281+
}

0 commit comments

Comments
 (0)