1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
10 #include <sys/queue.h>
13 #include <rte_bus_pci.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
23 #include "hns3_ethdev.h"
24 #include "hns3_regs.h"
25 #include "hns3_intr.h"
26 #include "hns3_logs.h"
28 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
30 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
33 hns3_ring_space(struct hns3_cmq_ring *ring)
35 int ntu = ring->next_to_use;
36 int ntc = ring->next_to_clean;
37 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
39 return ring->desc_num - used - 1;
43 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
45 int ntu = ring->next_to_use;
46 int ntc = ring->next_to_clean;
49 return head >= ntc && head <= ntu;
51 return head >= ntc || head <= ntu;
55 * hns3_allocate_dma_mem - Specific memory alloc for command function.
56 * Malloc a memzone, which is a contiguous portion of physical memory identified
58 * @ring: pointer to the ring structure
59 * @size: size of memory requested
60 * @alignment: what to align the allocation to
63 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
64 uint64_t size, uint32_t alignment)
66 const struct rte_memzone *mz = NULL;
67 char z_name[RTE_MEMZONE_NAMESIZE];
69 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
70 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
71 RTE_MEMZONE_IOVA_CONTIG, alignment,
76 ring->buf_size = size;
77 ring->desc = mz->addr;
78 ring->desc_dma_addr = mz->iova;
79 ring->zone = (const void *)mz;
80 hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
81 mz->name, ring->desc_dma_addr);
87 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
89 hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
90 ((const struct rte_memzone *)ring->zone)->name,
92 rte_memzone_free((const struct rte_memzone *)ring->zone);
95 ring->desc_dma_addr = 0;
100 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
102 int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
104 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
105 hns3_err(hw, "allocate dma mem failed");
113 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
116 hns3_free_dma_mem(hw, ring);
120 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
122 struct hns3_cmq_ring *ring =
123 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
126 ring->ring_type = ring_type;
129 ret = hns3_alloc_cmd_desc(hw, ring);
131 hns3_err(hw, "descriptor %s alloc error %d",
132 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
138 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
140 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
142 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
144 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
148 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
149 enum hns3_opcode_type opcode, bool is_read)
151 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
152 desc->opcode = rte_cpu_to_le_16(opcode);
153 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
156 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
160 hns3_cmd_clear_regs(struct hns3_hw *hw)
162 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
163 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
164 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
165 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
166 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
167 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
168 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
169 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
170 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
171 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
175 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
177 uint64_t dma = ring->desc_dma_addr;
179 if (ring->ring_type == HNS3_TYPE_CSQ) {
180 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
182 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
184 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
185 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
186 HNS3_NIC_SW_RST_RDY);
187 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
188 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
190 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
192 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
194 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
195 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
196 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
197 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
202 hns3_cmd_init_regs(struct hns3_hw *hw)
204 hns3_cmd_config_regs(&hw->cmq.csq);
205 hns3_cmd_config_regs(&hw->cmq.crq);
209 hns3_cmd_csq_clean(struct hns3_hw *hw)
211 struct hns3_cmq_ring *csq = &hw->cmq.csq;
215 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
217 if (!is_valid_csq_clean_head(csq, head)) {
218 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
221 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
222 csq->next_to_use, csq->next_to_clean);
223 rte_atomic16_set(&hw->reset.disable_cmd, 1);
225 global = hns3_read_dev(hw, HNS3_VF_RST_ING);
226 fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
227 hns3_err(hw, "Delayed VF reset global: %x fun_rst: %x",
229 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
231 global = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
232 fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
233 hns3_err(hw, "Delayed IMP reset global: %x fun_rst: %x",
235 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
238 hns3_schedule_delayed_reset(hns);
243 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
244 csq->next_to_clean = head;
249 hns3_cmd_csq_done(struct hns3_hw *hw)
251 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
253 return head == hw->cmq.csq.next_to_use;
257 hns3_is_special_opcode(uint16_t opcode)
260 * These commands have several descriptors,
261 * and use the first one to save opcode and return value.
263 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
264 HNS3_OPC_STATS_32_BIT,
266 HNS3_OPC_STATS_MAC_ALL,
267 HNS3_OPC_QUERY_32_BIT_REG,
268 HNS3_OPC_QUERY_64_BIT_REG};
271 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
272 if (spec_opcode[i] == opcode)
279 hns3_cmd_convert_err_code(uint16_t desc_ret)
282 case HNS3_CMD_EXEC_SUCCESS:
284 case HNS3_CMD_NO_AUTH:
286 case HNS3_CMD_NOT_SUPPORTED:
288 case HNS3_CMD_QUEUE_FULL:
290 case HNS3_CMD_NEXT_ERR:
292 case HNS3_CMD_UNEXE_ERR:
294 case HNS3_CMD_PARA_ERR:
296 case HNS3_CMD_RESULT_ERR:
298 case HNS3_CMD_TIMEOUT:
300 case HNS3_CMD_HILINK_ERR:
302 case HNS3_CMD_QUEUE_ILLEGAL:
304 case HNS3_CMD_INVALID:
312 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
313 struct hns3_cmd_desc *desc, int num, int ntc)
315 uint16_t opcode, desc_ret;
316 int current_ntc = ntc;
319 opcode = rte_le_to_cpu_16(desc[0].opcode);
320 for (handle = 0; handle < num; handle++) {
321 /* Get the result of hardware write back */
322 desc[handle] = hw->cmq.csq.desc[current_ntc];
325 if (current_ntc == hw->cmq.csq.desc_num)
329 if (likely(!hns3_is_special_opcode(opcode)))
330 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
332 desc_ret = rte_le_to_cpu_16(desc[0].retval);
334 hw->cmq.last_status = desc_ret;
335 return hns3_cmd_convert_err_code(desc_ret);
338 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
340 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
341 uint32_t timeout = 0;
344 if (hns3_cmd_csq_done(hw))
347 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
349 "Don't wait for reply because of disable_cmd");
353 if (is_reset_pending(hns)) {
354 hns3_err(hw, "Don't wait for reply because of reset pending");
360 } while (timeout < hw->cmq.tx_timeout);
361 hns3_err(hw, "Wait for reply timeout");
366 * hns3_cmd_send - send command to command queue
367 * @hw: pointer to the hw struct
368 * @desc: prefilled descriptor for describing the command
369 * @num : the number of descriptors to be sent
371 * This is the main send command for command queue, it
372 * sends the queue, cleans the queue, etc
375 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
377 struct hns3_cmd_desc *desc_to_use;
382 if (rte_atomic16_read(&hw->reset.disable_cmd))
385 rte_spinlock_lock(&hw->cmq.csq.lock);
387 /* Clean the command send queue */
388 retval = hns3_cmd_csq_clean(hw);
390 rte_spinlock_unlock(&hw->cmq.csq.lock);
394 if (num > hns3_ring_space(&hw->cmq.csq)) {
395 rte_spinlock_unlock(&hw->cmq.csq.lock);
400 * Record the location of desc in the ring for this time
401 * which will be use for hardware to write back
403 ntc = hw->cmq.csq.next_to_use;
405 while (handle < num) {
406 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
407 *desc_to_use = desc[handle];
408 (hw->cmq.csq.next_to_use)++;
409 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
410 hw->cmq.csq.next_to_use = 0;
414 /* Write to hardware */
415 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
418 * If the command is sync, wait for the firmware to write back,
419 * if multi descriptors to be sent, use the first one to check.
421 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
422 retval = hns3_cmd_poll_reply(hw);
424 retval = hns3_cmd_get_hardware_reply(hw, desc, num,
428 rte_spinlock_unlock(&hw->cmq.csq.lock);
432 static enum hns3_cmd_status
433 hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
435 struct hns3_query_version_cmd *resp;
436 struct hns3_cmd_desc desc;
439 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
440 resp = (struct hns3_query_version_cmd *)desc.data;
442 /* Initialize the cmd function */
443 ret = hns3_cmd_send(hw, &desc, 1);
445 *version = rte_le_to_cpu_32(resp->firmware);
451 hns3_cmd_init_queue(struct hns3_hw *hw)
455 /* Setup the lock for command queue */
456 rte_spinlock_init(&hw->cmq.csq.lock);
457 rte_spinlock_init(&hw->cmq.crq.lock);
460 * Clear up all command register,
461 * in case there are some residual values
463 hns3_cmd_clear_regs(hw);
465 /* Setup the queue entries for use cmd queue */
466 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
467 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
469 /* Setup Tx write back timeout */
470 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
472 /* Setup queue rings */
473 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
475 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
479 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
481 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
488 hns3_free_cmd_desc(hw, &hw->cmq.csq);
494 hns3_cmd_init(struct hns3_hw *hw)
498 rte_spinlock_lock(&hw->cmq.csq.lock);
499 rte_spinlock_lock(&hw->cmq.crq.lock);
501 hw->cmq.csq.next_to_clean = 0;
502 hw->cmq.csq.next_to_use = 0;
503 hw->cmq.crq.next_to_clean = 0;
504 hw->cmq.crq.next_to_use = 0;
505 hw->mbx_resp.head = 0;
506 hw->mbx_resp.tail = 0;
507 hw->mbx_resp.lost = 0;
508 hns3_cmd_init_regs(hw);
510 rte_spinlock_unlock(&hw->cmq.crq.lock);
511 rte_spinlock_unlock(&hw->cmq.csq.lock);
514 * Check if there is new reset pending, because the higher level
515 * reset may happen when lower level reset is being processed.
517 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
518 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
522 rte_atomic16_clear(&hw->reset.disable_cmd);
524 ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
526 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
530 PMD_INIT_LOG(INFO, "The firmware version is %08x", hw->fw_version);
540 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
542 rte_spinlock_lock(&ring->lock);
544 hns3_free_cmd_desc(hw, ring);
546 rte_spinlock_unlock(&ring->lock);
550 hns3_cmd_destroy_queue(struct hns3_hw *hw)
552 hns3_destroy_queue(hw, &hw->cmq.csq);
553 hns3_destroy_queue(hw, &hw->cmq.crq);
557 hns3_cmd_uninit(struct hns3_hw *hw)
559 rte_spinlock_lock(&hw->cmq.csq.lock);
560 rte_spinlock_lock(&hw->cmq.crq.lock);
561 rte_atomic16_set(&hw->reset.disable_cmd, 1);
562 hns3_cmd_clear_regs(hw);
563 rte_spinlock_unlock(&hw->cmq.crq.lock);
564 rte_spinlock_unlock(&hw->cmq.csq.lock);