Skip to content

Commit 68c0a5c

Browse files
Salildavem330
authored andcommitted
net: hns3: Add HNS3 IMP(Integrated Mgmt Proc) Cmd Interface Support
This patch adds the support of IMP (Integrated Management Processor) command interface to the HNS3 driver. Each PF/VF has support of CQP(Command Queue Pair) ring interface. Each CQP consis of send queue CSQ and receive queue CRQ. There are various commands a PF/VF may support, like for Flow Table manipulation, Device management, Packet buffer allocation, Forwarding, VLANs config, Tunneling/Overlays etc. This patch contains code to initialize the command queue, manage the command queue descriptors and Rx/Tx protocol with the command processor in the form of various commands/results and acknowledgements. Signed-off-by: Daode Huang <[email protected]> Signed-off-by: lipeng <[email protected]> Signed-off-by: Salil Mehta <[email protected]> Signed-off-by: Yisen Zhuang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 38caee9 commit 68c0a5c

File tree

2 files changed

+1096
-0
lines changed

2 files changed

+1096
-0
lines changed
Lines changed: 356 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,356 @@
1+
/*
2+
* Copyright (c) 2016~2017 Hisilicon Limited.
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License as published by
6+
* the Free Software Foundation; either version 2 of the License, or
7+
* (at your option) any later version.
8+
*/
9+
10+
#include <linux/dma-mapping.h>
11+
#include <linux/slab.h>
12+
#include <linux/pci.h>
13+
#include <linux/device.h>
14+
#include <linux/err.h>
15+
#include <linux/dma-direction.h>
16+
#include "hclge_cmd.h"
17+
#include "hnae3.h"
18+
#include "hclge_main.h"
19+
20+
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
21+
#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
22+
DMA_TO_DEVICE : DMA_FROM_DEVICE)
23+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
24+
25+
static int hclge_ring_space(struct hclge_cmq_ring *ring)
26+
{
27+
int ntu = ring->next_to_use;
28+
int ntc = ring->next_to_clean;
29+
int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
30+
31+
return ring->desc_num - used - 1;
32+
}
33+
34+
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
35+
{
36+
int size = ring->desc_num * sizeof(struct hclge_desc);
37+
38+
ring->desc = kzalloc(size, GFP_KERNEL);
39+
if (!ring->desc)
40+
return -ENOMEM;
41+
42+
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
43+
size, DMA_BIDIRECTIONAL);
44+
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
45+
ring->desc_dma_addr = 0;
46+
kfree(ring->desc);
47+
ring->desc = NULL;
48+
return -ENOMEM;
49+
}
50+
51+
return 0;
52+
}
53+
54+
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
55+
{
56+
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
57+
ring->desc_num * sizeof(ring->desc[0]),
58+
DMA_BIDIRECTIONAL);
59+
60+
ring->desc_dma_addr = 0;
61+
kfree(ring->desc);
62+
ring->desc = NULL;
63+
}
64+
65+
static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
66+
{
67+
struct hclge_hw *hw = &hdev->hw;
68+
struct hclge_cmq_ring *ring =
69+
(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
70+
int ret;
71+
72+
ring->flag = ring_type;
73+
ring->dev = hdev;
74+
75+
ret = hclge_alloc_cmd_desc(ring);
76+
if (ret) {
77+
dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
78+
(ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
79+
return ret;
80+
}
81+
82+
ring->next_to_clean = 0;
83+
ring->next_to_use = 0;
84+
85+
return 0;
86+
}
87+
88+
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89+
enum hclge_opcode_type opcode, bool is_read)
90+
{
91+
memset((void *)desc, 0, sizeof(struct hclge_desc));
92+
desc->opcode = cpu_to_le16(opcode);
93+
desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
94+
95+
if (is_read)
96+
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
97+
else
98+
desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
99+
}
100+
101+
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
102+
{
103+
dma_addr_t dma = ring->desc_dma_addr;
104+
struct hclge_dev *hdev = ring->dev;
105+
struct hclge_hw *hw = &hdev->hw;
106+
107+
if (ring->flag == HCLGE_TYPE_CSQ) {
108+
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
109+
(u32)dma);
110+
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
111+
(u32)((dma >> 31) >> 1));
112+
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
113+
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
114+
HCLGE_NIC_CMQ_ENABLE);
115+
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
116+
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
117+
} else {
118+
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119+
(u32)dma);
120+
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121+
(u32)((dma >> 31) >> 1));
122+
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123+
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
124+
HCLGE_NIC_CMQ_ENABLE);
125+
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126+
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
127+
}
128+
}
129+
130+
static void hclge_cmd_init_regs(struct hclge_hw *hw)
131+
{
132+
hclge_cmd_config_regs(&hw->cmq.csq);
133+
hclge_cmd_config_regs(&hw->cmq.crq);
134+
}
135+
136+
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
137+
{
138+
struct hclge_cmq_ring *csq = &hw->cmq.csq;
139+
u16 ntc = csq->next_to_clean;
140+
struct hclge_desc *desc;
141+
int clean = 0;
142+
u32 head;
143+
144+
desc = &csq->desc[ntc];
145+
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
146+
147+
while (head != ntc) {
148+
memset(desc, 0, sizeof(*desc));
149+
ntc++;
150+
if (ntc == csq->desc_num)
151+
ntc = 0;
152+
desc = &csq->desc[ntc];
153+
clean++;
154+
}
155+
csq->next_to_clean = ntc;
156+
157+
return clean;
158+
}
159+
160+
static int hclge_cmd_csq_done(struct hclge_hw *hw)
161+
{
162+
u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
163+
return head == hw->cmq.csq.next_to_use;
164+
}
165+
166+
static bool hclge_is_special_opcode(u16 opcode)
167+
{
168+
u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
169+
int i;
170+
171+
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
172+
if (spec_opcode[i] == opcode)
173+
return true;
174+
}
175+
176+
return false;
177+
}
178+
179+
/**
180+
* hclge_cmd_send - send command to command queue
181+
* @hw: pointer to the hw struct
182+
* @desc: prefilled descriptor for describing the command
183+
* @num : the number of descriptors to be sent
184+
*
185+
* This is the main send command for command queue, it
186+
* sends the queue, cleans the queue, etc
187+
**/
188+
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
189+
{
190+
struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
191+
struct hclge_desc *desc_to_use;
192+
bool complete = false;
193+
u32 timeout = 0;
194+
int handle = 0;
195+
int retval = 0;
196+
u16 opcode, desc_ret;
197+
int ntc;
198+
199+
spin_lock_bh(&hw->cmq.csq.lock);
200+
201+
if (num > hclge_ring_space(&hw->cmq.csq)) {
202+
spin_unlock_bh(&hw->cmq.csq.lock);
203+
return -EBUSY;
204+
}
205+
206+
/**
207+
* Record the location of desc in the ring for this time
208+
* which will be use for hardware to write back
209+
*/
210+
ntc = hw->cmq.csq.next_to_use;
211+
opcode = desc[0].opcode;
212+
while (handle < num) {
213+
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
214+
*desc_to_use = desc[handle];
215+
(hw->cmq.csq.next_to_use)++;
216+
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
217+
hw->cmq.csq.next_to_use = 0;
218+
handle++;
219+
}
220+
221+
/* Write to hardware */
222+
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
223+
224+
/**
225+
* If the command is sync, wait for the firmware to write back,
226+
* if multi descriptors to be sent, use the first one to check
227+
*/
228+
if (HCLGE_SEND_SYNC(desc->flag)) {
229+
do {
230+
if (hclge_cmd_csq_done(hw))
231+
break;
232+
udelay(1);
233+
timeout++;
234+
} while (timeout < hw->cmq.tx_timeout);
235+
}
236+
237+
if (hclge_cmd_csq_done(hw)) {
238+
complete = true;
239+
handle = 0;
240+
while (handle < num) {
241+
/* Get the result of hardware write back */
242+
desc_to_use = &hw->cmq.csq.desc[ntc];
243+
desc[handle] = *desc_to_use;
244+
pr_debug("Get cmd desc:\n");
245+
246+
if (likely(!hclge_is_special_opcode(opcode)))
247+
desc_ret = desc[handle].retval;
248+
else
249+
desc_ret = desc[0].retval;
250+
251+
if ((enum hclge_cmd_return_status)desc_ret ==
252+
HCLGE_CMD_EXEC_SUCCESS)
253+
retval = 0;
254+
else
255+
retval = -EIO;
256+
hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
257+
ntc++;
258+
handle++;
259+
if (ntc == hw->cmq.csq.desc_num)
260+
ntc = 0;
261+
}
262+
}
263+
264+
if (!complete)
265+
retval = -EAGAIN;
266+
267+
/* Clean the command send queue */
268+
handle = hclge_cmd_csq_clean(hw);
269+
if (handle != num) {
270+
dev_warn(&hdev->pdev->dev,
271+
"cleaned %d, need to clean %d\n", handle, num);
272+
}
273+
274+
spin_unlock_bh(&hw->cmq.csq.lock);
275+
276+
return retval;
277+
}
278+
279+
enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
280+
u32 *version)
281+
{
282+
struct hclge_query_version *resp;
283+
struct hclge_desc desc;
284+
int ret;
285+
286+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
287+
resp = (struct hclge_query_version *)desc.data;
288+
289+
ret = hclge_cmd_send(hw, &desc, 1);
290+
if (!ret)
291+
*version = le32_to_cpu(resp->firmware);
292+
293+
return ret;
294+
}
295+
296+
int hclge_cmd_init(struct hclge_dev *hdev)
297+
{
298+
u32 version;
299+
int ret;
300+
301+
/* Setup the queue entries for use cmd queue */
302+
hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
303+
hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
304+
305+
/* Setup the lock for command queue */
306+
spin_lock_init(&hdev->hw.cmq.csq.lock);
307+
spin_lock_init(&hdev->hw.cmq.crq.lock);
308+
309+
/* Setup Tx write back timeout */
310+
hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
311+
312+
/* Setup queue rings */
313+
ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ);
314+
if (ret) {
315+
dev_err(&hdev->pdev->dev,
316+
"CSQ ring setup error %d\n", ret);
317+
return ret;
318+
}
319+
320+
ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ);
321+
if (ret) {
322+
dev_err(&hdev->pdev->dev,
323+
"CRQ ring setup error %d\n", ret);
324+
goto err_csq;
325+
}
326+
327+
hclge_cmd_init_regs(&hdev->hw);
328+
329+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
330+
if (ret) {
331+
dev_err(&hdev->pdev->dev,
332+
"firmware version query failed %d\n", ret);
333+
return ret;
334+
}
335+
hdev->fw_version = version;
336+
337+
dev_info(&hdev->pdev->dev, "The firware version is %08x\n", version);
338+
339+
return 0;
340+
err_csq:
341+
hclge_free_cmd_desc(&hdev->hw.cmq.csq);
342+
return ret;
343+
}
344+
345+
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
346+
{
347+
spin_lock_bh(&ring->lock);
348+
hclge_free_cmd_desc(ring);
349+
spin_unlock_bh(&ring->lock);
350+
}
351+
352+
void hclge_destroy_cmd_queue(struct hclge_hw *hw)
353+
{
354+
hclge_destroy_queue(&hw->cmq.csq);
355+
hclge_destroy_queue(&hw->cmq.crq);
356+
}

0 commit comments

Comments
 (0)