Skip to content

Commit 8d307f8

Browse files
Jie Wangdavem330
authored andcommitted
net: hns3: create new set of unified hclge_comm_cmd_send APIs
This patch create new set of unified hclge_comm_cmd_send APIs for PF and VF cmdq module. Subfunctions called by hclge_comm_cmd_send are also created include cmdq result check, cmdq return code conversion and ring space opertaion APIs. These new common cmdq APIs will be used to replace the old PF and VF cmdq APIs in next patches. Signed-off-by: Jie Wang <[email protected]> Signed-off-by: Guangbin Huang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 6befad6 commit 8d307f8

File tree

2 files changed

+325
-0
lines changed

2 files changed

+325
-0
lines changed
Lines changed: 259 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,259 @@
1+
// SPDX-License-Identifier: GPL-2.0+
2+
// Copyright (c) 2021-2021 Hisilicon Limited.
3+
4+
#include "hnae3.h"
5+
#include "hclge_comm_cmd.h"
6+
7+
static bool hclge_is_elem_in_array(const u16 *spec_opcode, u32 size, u16 opcode)
8+
{
9+
u32 i;
10+
11+
for (i = 0; i < size; i++) {
12+
if (spec_opcode[i] == opcode)
13+
return true;
14+
}
15+
16+
return false;
17+
}
18+
19+
static const u16 pf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
20+
HCLGE_COMM_OPC_STATS_32_BIT,
21+
HCLGE_COMM_OPC_STATS_MAC,
22+
HCLGE_COMM_OPC_STATS_MAC_ALL,
23+
HCLGE_COMM_OPC_QUERY_32_BIT_REG,
24+
HCLGE_COMM_OPC_QUERY_64_BIT_REG,
25+
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT,
26+
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT,
27+
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT,
28+
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT,
29+
HCLGE_COMM_QUERY_ALL_ERR_INFO };
30+
31+
static const u16 vf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
32+
HCLGE_COMM_OPC_STATS_32_BIT,
33+
HCLGE_COMM_OPC_STATS_MAC };
34+
35+
static bool hclge_comm_is_special_opcode(u16 opcode, bool is_pf)
36+
{
37+
/* these commands have several descriptors,
38+
* and use the first one to save opcode and return value
39+
*/
40+
const u16 *spec_opcode = is_pf ? pf_spec_opcode : vf_spec_opcode;
41+
u32 size = is_pf ? ARRAY_SIZE(pf_spec_opcode) :
42+
ARRAY_SIZE(vf_spec_opcode);
43+
44+
return hclge_is_elem_in_array(spec_opcode, size, opcode);
45+
}
46+
47+
static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
48+
{
49+
int ntc = ring->next_to_clean;
50+
int ntu = ring->next_to_use;
51+
int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
52+
53+
return ring->desc_num - used - 1;
54+
}
55+
56+
static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
57+
struct hclge_desc *desc, int num)
58+
{
59+
struct hclge_desc *desc_to_use;
60+
int handle = 0;
61+
62+
while (handle < num) {
63+
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
64+
*desc_to_use = desc[handle];
65+
(hw->cmq.csq.next_to_use)++;
66+
if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
67+
hw->cmq.csq.next_to_use = 0;
68+
handle++;
69+
}
70+
}
71+
72+
static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
73+
int head)
74+
{
75+
int ntc = ring->next_to_clean;
76+
int ntu = ring->next_to_use;
77+
78+
if (ntu > ntc)
79+
return head >= ntc && head <= ntu;
80+
81+
return head >= ntc || head <= ntu;
82+
}
83+
84+
static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
85+
{
86+
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
87+
int clean;
88+
u32 head;
89+
90+
head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
91+
rmb(); /* Make sure head is ready before touch any data */
92+
93+
if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
94+
dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
95+
head, csq->next_to_use, csq->next_to_clean);
96+
dev_warn(&hw->cmq.csq.pdev->dev,
97+
"Disabling any further commands to IMP firmware\n");
98+
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
99+
dev_warn(&hw->cmq.csq.pdev->dev,
100+
"IMP firmware watchdog reset soon expected!\n");
101+
return -EIO;
102+
}
103+
104+
clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
105+
csq->next_to_clean = head;
106+
return clean;
107+
}
108+
109+
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
110+
{
111+
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
112+
return head == hw->cmq.csq.next_to_use;
113+
}
114+
115+
static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
116+
bool *is_completed)
117+
{
118+
u32 timeout = 0;
119+
120+
do {
121+
if (hclge_comm_cmd_csq_done(hw)) {
122+
*is_completed = true;
123+
break;
124+
}
125+
udelay(1);
126+
timeout++;
127+
} while (timeout < hw->cmq.tx_timeout);
128+
}
129+
130+
static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
131+
{
132+
struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
133+
{ HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
134+
{ HCLGE_COMM_CMD_NO_AUTH, -EPERM },
135+
{ HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
136+
{ HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
137+
{ HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
138+
{ HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
139+
{ HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
140+
{ HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
141+
{ HCLGE_COMM_CMD_TIMEOUT, -ETIME },
142+
{ HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
143+
{ HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
144+
{ HCLGE_COMM_CMD_INVALID, -EBADR },
145+
};
146+
u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
147+
u32 i;
148+
149+
for (i = 0; i < errcode_count; i++)
150+
if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
151+
return hclge_comm_cmd_errcode[i].common_errno;
152+
153+
return -EIO;
154+
}
155+
156+
static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
157+
struct hclge_desc *desc, int num,
158+
int ntc, bool is_pf)
159+
{
160+
u16 opcode, desc_ret;
161+
int handle;
162+
163+
opcode = le16_to_cpu(desc[0].opcode);
164+
for (handle = 0; handle < num; handle++) {
165+
desc[handle] = hw->cmq.csq.desc[ntc];
166+
ntc++;
167+
if (ntc >= hw->cmq.csq.desc_num)
168+
ntc = 0;
169+
}
170+
if (likely(!hclge_comm_is_special_opcode(opcode, is_pf)))
171+
desc_ret = le16_to_cpu(desc[num - 1].retval);
172+
else
173+
desc_ret = le16_to_cpu(desc[0].retval);
174+
175+
hw->cmq.last_status = desc_ret;
176+
177+
return hclge_comm_cmd_convert_err_code(desc_ret);
178+
}
179+
180+
static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
181+
struct hclge_desc *desc,
182+
int num, int ntc, bool is_pf)
183+
{
184+
bool is_completed = false;
185+
int handle, ret;
186+
187+
/* If the command is sync, wait for the firmware to write back,
188+
* if multi descriptors to be sent, use the first one to check
189+
*/
190+
if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
191+
hclge_comm_wait_for_resp(hw, &is_completed);
192+
193+
if (!is_completed)
194+
ret = -EBADE;
195+
else
196+
ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc, is_pf);
197+
198+
/* Clean the command send queue */
199+
handle = hclge_comm_cmd_csq_clean(hw);
200+
if (handle < 0)
201+
ret = handle;
202+
else if (handle != num)
203+
dev_warn(&hw->cmq.csq.pdev->dev,
204+
"cleaned %d, need to clean %d\n", handle, num);
205+
return ret;
206+
}
207+
208+
/**
209+
* hclge_comm_cmd_send - send command to command queue
210+
* @hw: pointer to the hw struct
211+
* @desc: prefilled descriptor for describing the command
212+
* @num : the number of descriptors to be sent
213+
* @is_pf: bool to judge pf/vf module
214+
*
215+
* This is the main send command for command queue, it
216+
* sends the queue, cleans the queue, etc
217+
**/
218+
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
219+
int num, bool is_pf)
220+
{
221+
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
222+
int ret;
223+
int ntc;
224+
225+
spin_lock_bh(&hw->cmq.csq.lock);
226+
227+
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
228+
spin_unlock_bh(&hw->cmq.csq.lock);
229+
return -EBUSY;
230+
}
231+
232+
if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
233+
/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
234+
* need update the SW HEAD pointer csq->next_to_clean
235+
*/
236+
csq->next_to_clean =
237+
hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
238+
spin_unlock_bh(&hw->cmq.csq.lock);
239+
return -EBUSY;
240+
}
241+
242+
/**
243+
* Record the location of desc in the ring for this time
244+
* which will be use for hardware to write back
245+
*/
246+
ntc = hw->cmq.csq.next_to_use;
247+
248+
hclge_comm_cmd_copy_desc(hw, desc, num);
249+
250+
/* Write to hardware */
251+
hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
252+
hw->cmq.csq.next_to_use);
253+
254+
ret = hclge_comm_cmd_check_result(hw, desc, num, ntc, is_pf);
255+
256+
spin_unlock_bh(&hw->cmq.csq.lock);
257+
258+
return ret;
259+
}

drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,52 @@
77

88
#include "hnae3.h"
99

10+
#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
11+
12+
#define HCLGE_COMM_SEND_SYNC(flag) \
13+
((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
14+
15+
#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
16+
#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
17+
18+
enum hclge_comm_cmd_return_status {
19+
HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
20+
HCLGE_COMM_CMD_NO_AUTH = 1,
21+
HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
22+
HCLGE_COMM_CMD_QUEUE_FULL = 3,
23+
HCLGE_COMM_CMD_NEXT_ERR = 4,
24+
HCLGE_COMM_CMD_UNEXE_ERR = 5,
25+
HCLGE_COMM_CMD_PARA_ERR = 6,
26+
HCLGE_COMM_CMD_RESULT_ERR = 7,
27+
HCLGE_COMM_CMD_TIMEOUT = 8,
28+
HCLGE_COMM_CMD_HILINK_ERR = 9,
29+
HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
30+
HCLGE_COMM_CMD_INVALID = 11,
31+
};
32+
33+
enum hclge_comm_special_cmd {
34+
HCLGE_COMM_OPC_STATS_64_BIT = 0x0030,
35+
HCLGE_COMM_OPC_STATS_32_BIT = 0x0031,
36+
HCLGE_COMM_OPC_STATS_MAC = 0x0032,
37+
HCLGE_COMM_OPC_STATS_MAC_ALL = 0x0034,
38+
HCLGE_COMM_OPC_QUERY_32_BIT_REG = 0x0041,
39+
HCLGE_COMM_OPC_QUERY_64_BIT_REG = 0x0042,
40+
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
41+
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT = 0x1512,
42+
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
43+
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
44+
HCLGE_COMM_QUERY_ALL_ERR_INFO = 0x1517,
45+
};
46+
47+
enum hclge_comm_cmd_state {
48+
HCLGE_COMM_STATE_CMD_DISABLE,
49+
};
50+
51+
struct hclge_comm_errcode {
52+
u32 imp_errcode;
53+
int common_errno;
54+
};
55+
1056
#define HCLGE_DESC_DATA_LEN 6
1157
struct hclge_desc {
1258
__le16 opcode;
@@ -52,4 +98,24 @@ struct hclge_comm_hw {
5298
unsigned long comm_state;
5399
};
54100

101+
static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
102+
{
103+
writel(value, base + reg);
104+
}
105+
106+
static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
107+
{
108+
u8 __iomem *reg_addr = READ_ONCE(base);
109+
110+
return readl(reg_addr + reg);
111+
}
112+
113+
#define hclge_comm_write_dev(a, reg, value) \
114+
hclge_comm_write_reg((a)->io_base, reg, value)
115+
#define hclge_comm_read_dev(a, reg) \
116+
hclge_comm_read_reg((a)->io_base, reg)
117+
118+
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
119+
int num, bool is_pf);
120+
55121
#endif

0 commit comments

Comments
 (0)