|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
| 2 | +// Copyright (c) 2021-2021 Hisilicon Limited. |
| 3 | + |
| 4 | +#include "hnae3.h" |
| 5 | +#include "hclge_comm_cmd.h" |
| 6 | + |
| 7 | +static bool hclge_is_elem_in_array(const u16 *spec_opcode, u32 size, u16 opcode) |
| 8 | +{ |
| 9 | + u32 i; |
| 10 | + |
| 11 | + for (i = 0; i < size; i++) { |
| 12 | + if (spec_opcode[i] == opcode) |
| 13 | + return true; |
| 14 | + } |
| 15 | + |
| 16 | + return false; |
| 17 | +} |
| 18 | + |
| 19 | +static const u16 pf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT, |
| 20 | + HCLGE_COMM_OPC_STATS_32_BIT, |
| 21 | + HCLGE_COMM_OPC_STATS_MAC, |
| 22 | + HCLGE_COMM_OPC_STATS_MAC_ALL, |
| 23 | + HCLGE_COMM_OPC_QUERY_32_BIT_REG, |
| 24 | + HCLGE_COMM_OPC_QUERY_64_BIT_REG, |
| 25 | + HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT, |
| 26 | + HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT, |
| 27 | + HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT, |
| 28 | + HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT, |
| 29 | + HCLGE_COMM_QUERY_ALL_ERR_INFO }; |
| 30 | + |
| 31 | +static const u16 vf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT, |
| 32 | + HCLGE_COMM_OPC_STATS_32_BIT, |
| 33 | + HCLGE_COMM_OPC_STATS_MAC }; |
| 34 | + |
| 35 | +static bool hclge_comm_is_special_opcode(u16 opcode, bool is_pf) |
| 36 | +{ |
| 37 | + /* these commands have several descriptors, |
| 38 | + * and use the first one to save opcode and return value |
| 39 | + */ |
| 40 | + const u16 *spec_opcode = is_pf ? pf_spec_opcode : vf_spec_opcode; |
| 41 | + u32 size = is_pf ? ARRAY_SIZE(pf_spec_opcode) : |
| 42 | + ARRAY_SIZE(vf_spec_opcode); |
| 43 | + |
| 44 | + return hclge_is_elem_in_array(spec_opcode, size, opcode); |
| 45 | +} |
| 46 | + |
| 47 | +static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) |
| 48 | +{ |
| 49 | + int ntc = ring->next_to_clean; |
| 50 | + int ntu = ring->next_to_use; |
| 51 | + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; |
| 52 | + |
| 53 | + return ring->desc_num - used - 1; |
| 54 | +} |
| 55 | + |
| 56 | +static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, |
| 57 | + struct hclge_desc *desc, int num) |
| 58 | +{ |
| 59 | + struct hclge_desc *desc_to_use; |
| 60 | + int handle = 0; |
| 61 | + |
| 62 | + while (handle < num) { |
| 63 | + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; |
| 64 | + *desc_to_use = desc[handle]; |
| 65 | + (hw->cmq.csq.next_to_use)++; |
| 66 | + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) |
| 67 | + hw->cmq.csq.next_to_use = 0; |
| 68 | + handle++; |
| 69 | + } |
| 70 | +} |
| 71 | + |
| 72 | +static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, |
| 73 | + int head) |
| 74 | +{ |
| 75 | + int ntc = ring->next_to_clean; |
| 76 | + int ntu = ring->next_to_use; |
| 77 | + |
| 78 | + if (ntu > ntc) |
| 79 | + return head >= ntc && head <= ntu; |
| 80 | + |
| 81 | + return head >= ntc || head <= ntu; |
| 82 | +} |
| 83 | + |
| 84 | +static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) |
| 85 | +{ |
| 86 | + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; |
| 87 | + int clean; |
| 88 | + u32 head; |
| 89 | + |
| 90 | + head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
| 91 | + rmb(); /* Make sure head is ready before touch any data */ |
| 92 | + |
| 93 | + if (!hclge_comm_is_valid_csq_clean_head(csq, head)) { |
| 94 | + dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n", |
| 95 | + head, csq->next_to_use, csq->next_to_clean); |
| 96 | + dev_warn(&hw->cmq.csq.pdev->dev, |
| 97 | + "Disabling any further commands to IMP firmware\n"); |
| 98 | + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); |
| 99 | + dev_warn(&hw->cmq.csq.pdev->dev, |
| 100 | + "IMP firmware watchdog reset soon expected!\n"); |
| 101 | + return -EIO; |
| 102 | + } |
| 103 | + |
| 104 | + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; |
| 105 | + csq->next_to_clean = head; |
| 106 | + return clean; |
| 107 | +} |
| 108 | + |
| 109 | +static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) |
| 110 | +{ |
| 111 | + u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
| 112 | + return head == hw->cmq.csq.next_to_use; |
| 113 | +} |
| 114 | + |
| 115 | +static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, |
| 116 | + bool *is_completed) |
| 117 | +{ |
| 118 | + u32 timeout = 0; |
| 119 | + |
| 120 | + do { |
| 121 | + if (hclge_comm_cmd_csq_done(hw)) { |
| 122 | + *is_completed = true; |
| 123 | + break; |
| 124 | + } |
| 125 | + udelay(1); |
| 126 | + timeout++; |
| 127 | + } while (timeout < hw->cmq.tx_timeout); |
| 128 | +} |
| 129 | + |
| 130 | +static int hclge_comm_cmd_convert_err_code(u16 desc_ret) |
| 131 | +{ |
| 132 | + struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { |
| 133 | + { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, |
| 134 | + { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, |
| 135 | + { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, |
| 136 | + { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, |
| 137 | + { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, |
| 138 | + { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, |
| 139 | + { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, |
| 140 | + { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, |
| 141 | + { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, |
| 142 | + { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, |
| 143 | + { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, |
| 144 | + { HCLGE_COMM_CMD_INVALID, -EBADR }, |
| 145 | + }; |
| 146 | + u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); |
| 147 | + u32 i; |
| 148 | + |
| 149 | + for (i = 0; i < errcode_count; i++) |
| 150 | + if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) |
| 151 | + return hclge_comm_cmd_errcode[i].common_errno; |
| 152 | + |
| 153 | + return -EIO; |
| 154 | +} |
| 155 | + |
| 156 | +static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, |
| 157 | + struct hclge_desc *desc, int num, |
| 158 | + int ntc, bool is_pf) |
| 159 | +{ |
| 160 | + u16 opcode, desc_ret; |
| 161 | + int handle; |
| 162 | + |
| 163 | + opcode = le16_to_cpu(desc[0].opcode); |
| 164 | + for (handle = 0; handle < num; handle++) { |
| 165 | + desc[handle] = hw->cmq.csq.desc[ntc]; |
| 166 | + ntc++; |
| 167 | + if (ntc >= hw->cmq.csq.desc_num) |
| 168 | + ntc = 0; |
| 169 | + } |
| 170 | + if (likely(!hclge_comm_is_special_opcode(opcode, is_pf))) |
| 171 | + desc_ret = le16_to_cpu(desc[num - 1].retval); |
| 172 | + else |
| 173 | + desc_ret = le16_to_cpu(desc[0].retval); |
| 174 | + |
| 175 | + hw->cmq.last_status = desc_ret; |
| 176 | + |
| 177 | + return hclge_comm_cmd_convert_err_code(desc_ret); |
| 178 | +} |
| 179 | + |
| 180 | +static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, |
| 181 | + struct hclge_desc *desc, |
| 182 | + int num, int ntc, bool is_pf) |
| 183 | +{ |
| 184 | + bool is_completed = false; |
| 185 | + int handle, ret; |
| 186 | + |
| 187 | + /* If the command is sync, wait for the firmware to write back, |
| 188 | + * if multi descriptors to be sent, use the first one to check |
| 189 | + */ |
| 190 | + if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) |
| 191 | + hclge_comm_wait_for_resp(hw, &is_completed); |
| 192 | + |
| 193 | + if (!is_completed) |
| 194 | + ret = -EBADE; |
| 195 | + else |
| 196 | + ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc, is_pf); |
| 197 | + |
| 198 | + /* Clean the command send queue */ |
| 199 | + handle = hclge_comm_cmd_csq_clean(hw); |
| 200 | + if (handle < 0) |
| 201 | + ret = handle; |
| 202 | + else if (handle != num) |
| 203 | + dev_warn(&hw->cmq.csq.pdev->dev, |
| 204 | + "cleaned %d, need to clean %d\n", handle, num); |
| 205 | + return ret; |
| 206 | +} |
| 207 | + |
| 208 | +/** |
| 209 | + * hclge_comm_cmd_send - send command to command queue |
| 210 | + * @hw: pointer to the hw struct |
| 211 | + * @desc: prefilled descriptor for describing the command |
| 212 | + * @num : the number of descriptors to be sent |
| 213 | + * @is_pf: bool to judge pf/vf module |
| 214 | + * |
| 215 | + * This is the main send command for command queue, it |
| 216 | + * sends the queue, cleans the queue, etc |
| 217 | + **/ |
| 218 | +int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, |
| 219 | + int num, bool is_pf) |
| 220 | +{ |
| 221 | + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; |
| 222 | + int ret; |
| 223 | + int ntc; |
| 224 | + |
| 225 | + spin_lock_bh(&hw->cmq.csq.lock); |
| 226 | + |
| 227 | + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { |
| 228 | + spin_unlock_bh(&hw->cmq.csq.lock); |
| 229 | + return -EBUSY; |
| 230 | + } |
| 231 | + |
| 232 | + if (num > hclge_comm_ring_space(&hw->cmq.csq)) { |
| 233 | + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, |
| 234 | + * need update the SW HEAD pointer csq->next_to_clean |
| 235 | + */ |
| 236 | + csq->next_to_clean = |
| 237 | + hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
| 238 | + spin_unlock_bh(&hw->cmq.csq.lock); |
| 239 | + return -EBUSY; |
| 240 | + } |
| 241 | + |
| 242 | + /** |
| 243 | + * Record the location of desc in the ring for this time |
| 244 | + * which will be use for hardware to write back |
| 245 | + */ |
| 246 | + ntc = hw->cmq.csq.next_to_use; |
| 247 | + |
| 248 | + hclge_comm_cmd_copy_desc(hw, desc, num); |
| 249 | + |
| 250 | + /* Write to hardware */ |
| 251 | + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, |
| 252 | + hw->cmq.csq.next_to_use); |
| 253 | + |
| 254 | + ret = hclge_comm_cmd_check_result(hw, desc, num, ntc, is_pf); |
| 255 | + |
| 256 | + spin_unlock_bh(&hw->cmq.csq.lock); |
| 257 | + |
| 258 | + return ret; |
| 259 | +} |
0 commit comments