1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
10 #include <sys/queue.h>
13 #include <rte_bus_pci.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
23 #include "hns3_ethdev.h"
24 #include "hns3_regs.h"
25 #include "hns3_intr.h"
26 #include "hns3_logs.h"
28 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
30 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
33 hns3_ring_space(struct hns3_cmq_ring *ring)
35 int ntu = ring->next_to_use;
36 int ntc = ring->next_to_clean;
37 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
39 return ring->desc_num - used - 1;
43 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
45 int ntu = ring->next_to_use;
46 int ntc = ring->next_to_clean;
49 return head >= ntc && head <= ntu;
51 return head >= ntc || head <= ntu;
55 * hns3_allocate_dma_mem - Specific memory alloc for command function.
56 * Malloc a memzone, which is a contiguous portion of physical memory identified
58 * @ring: pointer to the ring structure
59 * @size: size of memory requested
60 * @alignment: what to align the allocation to
63 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
64 uint64_t size, uint32_t alignment)
66 const struct rte_memzone *mz = NULL;
67 char z_name[RTE_MEMZONE_NAMESIZE];
69 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
70 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
71 RTE_MEMZONE_IOVA_CONTIG, alignment,
76 ring->buf_size = size;
77 ring->desc = mz->addr;
78 ring->desc_dma_addr = mz->iova;
79 ring->zone = (const void *)mz;
80 hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
81 mz->name, ring->desc_dma_addr);
87 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
89 hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
90 ((const struct rte_memzone *)ring->zone)->name,
92 rte_memzone_free((const struct rte_memzone *)ring->zone);
95 ring->desc_dma_addr = 0;
100 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
102 int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
104 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
105 hns3_err(hw, "allocate dma mem failed");
113 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
116 hns3_free_dma_mem(hw, ring);
120 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
122 struct hns3_cmq_ring *ring =
123 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
126 ring->ring_type = ring_type;
129 ret = hns3_alloc_cmd_desc(hw, ring);
131 hns3_err(hw, "descriptor %s alloc error %d",
132 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
138 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
140 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
142 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
144 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
148 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
149 enum hns3_opcode_type opcode, bool is_read)
151 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
152 desc->opcode = rte_cpu_to_le_16(opcode);
153 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
156 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
160 hns3_cmd_clear_regs(struct hns3_hw *hw)
162 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
163 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
164 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
165 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
166 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
167 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
168 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
169 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
170 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
171 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
175 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
177 uint64_t dma = ring->desc_dma_addr;
179 if (ring->ring_type == HNS3_TYPE_CSQ) {
180 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
182 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
184 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
185 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
186 HNS3_NIC_SW_RST_RDY);
187 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
188 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
190 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
192 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
194 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
195 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
196 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
197 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
202 hns3_cmd_init_regs(struct hns3_hw *hw)
204 hns3_cmd_config_regs(&hw->cmq.csq);
205 hns3_cmd_config_regs(&hw->cmq.crq);
209 hns3_cmd_csq_clean(struct hns3_hw *hw)
211 struct hns3_cmq_ring *csq = &hw->cmq.csq;
215 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
217 if (!is_valid_csq_clean_head(csq, head)) {
218 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
219 csq->next_to_use, csq->next_to_clean);
220 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
221 rte_atomic16_set(&hw->reset.disable_cmd, 1);
222 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
228 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
229 csq->next_to_clean = head;
234 hns3_cmd_csq_done(struct hns3_hw *hw)
236 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
238 return head == hw->cmq.csq.next_to_use;
242 hns3_is_special_opcode(uint16_t opcode)
245 * These commands have several descriptors,
246 * and use the first one to save opcode and return value.
248 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
249 HNS3_OPC_STATS_32_BIT,
251 HNS3_OPC_STATS_MAC_ALL,
252 HNS3_OPC_QUERY_32_BIT_REG,
253 HNS3_OPC_QUERY_64_BIT_REG};
256 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
257 if (spec_opcode[i] == opcode)
264 hns3_cmd_convert_err_code(uint16_t desc_ret)
267 case HNS3_CMD_EXEC_SUCCESS:
269 case HNS3_CMD_NO_AUTH:
271 case HNS3_CMD_NOT_SUPPORTED:
273 case HNS3_CMD_QUEUE_FULL:
275 case HNS3_CMD_NEXT_ERR:
277 case HNS3_CMD_UNEXE_ERR:
279 case HNS3_CMD_PARA_ERR:
281 case HNS3_CMD_RESULT_ERR:
283 case HNS3_CMD_TIMEOUT:
285 case HNS3_CMD_HILINK_ERR:
287 case HNS3_CMD_QUEUE_ILLEGAL:
289 case HNS3_CMD_INVALID:
297 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
298 struct hns3_cmd_desc *desc, int num, int ntc)
300 uint16_t opcode, desc_ret;
301 int current_ntc = ntc;
304 opcode = rte_le_to_cpu_16(desc[0].opcode);
305 for (handle = 0; handle < num; handle++) {
306 /* Get the result of hardware write back */
307 desc[handle] = hw->cmq.csq.desc[current_ntc];
310 if (current_ntc == hw->cmq.csq.desc_num)
314 if (likely(!hns3_is_special_opcode(opcode)))
315 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
317 desc_ret = rte_le_to_cpu_16(desc[0].retval);
319 hw->cmq.last_status = desc_ret;
320 return hns3_cmd_convert_err_code(desc_ret);
323 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
325 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
326 uint32_t timeout = 0;
329 if (hns3_cmd_csq_done(hw))
332 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
334 "Don't wait for reply because of disable_cmd");
338 if (is_reset_pending(hns)) {
339 hns3_err(hw, "Don't wait for reply because of reset pending");
345 } while (timeout < hw->cmq.tx_timeout);
346 hns3_err(hw, "Wait for reply timeout");
351 * hns3_cmd_send - send command to command queue
354 * pointer to the hw struct
356 * prefilled descriptor for describing the command
358 * the number of descriptors to be sent
360 * - -EBUSY if detect device is in resetting
361 * - -EIO if detect cmd csq corrupted (due to reset) or
362 * there is reset pending
363 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case
364 * - Zero if operation completed successfully
366 * Note -BUSY/-EIO only used in reset case
368 * Note this is the main send command for command queue, it
369 * sends the queue, cleans the queue, etc
372 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
374 struct hns3_cmd_desc *desc_to_use;
379 if (rte_atomic16_read(&hw->reset.disable_cmd))
382 rte_spinlock_lock(&hw->cmq.csq.lock);
384 /* Clean the command send queue */
385 retval = hns3_cmd_csq_clean(hw);
387 rte_spinlock_unlock(&hw->cmq.csq.lock);
391 if (num > hns3_ring_space(&hw->cmq.csq)) {
392 rte_spinlock_unlock(&hw->cmq.csq.lock);
397 * Record the location of desc in the ring for this time
398 * which will be use for hardware to write back
400 ntc = hw->cmq.csq.next_to_use;
402 while (handle < num) {
403 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
404 *desc_to_use = desc[handle];
405 (hw->cmq.csq.next_to_use)++;
406 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
407 hw->cmq.csq.next_to_use = 0;
411 /* Write to hardware */
412 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
415 * If the command is sync, wait for the firmware to write back,
416 * if multi descriptors to be sent, use the first one to check.
418 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
419 retval = hns3_cmd_poll_reply(hw);
421 retval = hns3_cmd_get_hardware_reply(hw, desc, num,
425 rte_spinlock_unlock(&hw->cmq.csq.lock);
429 static void hns3_parse_capability(struct hns3_hw *hw,
430 struct hns3_query_version_cmd *cmd)
432 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
434 if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
435 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
436 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
437 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
439 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B))
440 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
441 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
442 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
443 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
444 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
445 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
446 hns3_set_bit(hw->capability, HNS3_CAPS_TQP_TXRX_INDEP_B, 1);
447 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
448 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
451 static enum hns3_cmd_status
452 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
454 struct hns3_query_version_cmd *resp;
455 struct hns3_cmd_desc desc;
458 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
459 resp = (struct hns3_query_version_cmd *)desc.data;
461 /* Initialize the cmd function */
462 ret = hns3_cmd_send(hw, &desc, 1);
466 hw->fw_version = rte_le_to_cpu_32(resp->firmware);
467 hns3_parse_capability(hw, resp);
473 hns3_cmd_init_queue(struct hns3_hw *hw)
477 /* Setup the lock for command queue */
478 rte_spinlock_init(&hw->cmq.csq.lock);
479 rte_spinlock_init(&hw->cmq.crq.lock);
482 * Clear up all command register,
483 * in case there are some residual values
485 hns3_cmd_clear_regs(hw);
487 /* Setup the queue entries for use cmd queue */
488 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
489 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
491 /* Setup Tx write back timeout */
492 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
494 /* Setup queue rings */
495 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
497 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
501 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
503 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
510 hns3_free_cmd_desc(hw, &hw->cmq.csq);
516 hns3_cmd_init(struct hns3_hw *hw)
521 rte_spinlock_lock(&hw->cmq.csq.lock);
522 rte_spinlock_lock(&hw->cmq.crq.lock);
524 hw->cmq.csq.next_to_clean = 0;
525 hw->cmq.csq.next_to_use = 0;
526 hw->cmq.crq.next_to_clean = 0;
527 hw->cmq.crq.next_to_use = 0;
528 hw->mbx_resp.head = 0;
529 hw->mbx_resp.tail = 0;
530 hw->mbx_resp.lost = 0;
531 hns3_cmd_init_regs(hw);
533 rte_spinlock_unlock(&hw->cmq.crq.lock);
534 rte_spinlock_unlock(&hw->cmq.csq.lock);
537 * Check if there is new reset pending, because the higher level
538 * reset may happen when lower level reset is being processed.
540 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
541 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
545 rte_atomic16_clear(&hw->reset.disable_cmd);
547 ret = hns3_cmd_query_firmware_version_and_capability(hw);
549 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
553 version = hw->fw_version;
554 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
555 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
556 HNS3_FW_VERSION_BYTE3_S),
557 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
558 HNS3_FW_VERSION_BYTE2_S),
559 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
560 HNS3_FW_VERSION_BYTE1_S),
561 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
562 HNS3_FW_VERSION_BYTE0_S));
567 rte_atomic16_set(&hw->reset.disable_cmd, 1);
572 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
574 rte_spinlock_lock(&ring->lock);
576 hns3_free_cmd_desc(hw, ring);
578 rte_spinlock_unlock(&ring->lock);
582 hns3_cmd_destroy_queue(struct hns3_hw *hw)
584 hns3_destroy_queue(hw, &hw->cmq.csq);
585 hns3_destroy_queue(hw, &hw->cmq.crq);
589 hns3_cmd_uninit(struct hns3_hw *hw)
591 rte_spinlock_lock(&hw->cmq.csq.lock);
592 rte_spinlock_lock(&hw->cmq.crq.lock);
593 rte_atomic16_set(&hw->reset.disable_cmd, 1);
594 hns3_cmd_clear_regs(hw);
595 rte_spinlock_unlock(&hw->cmq.crq.lock);
596 rte_spinlock_unlock(&hw->cmq.csq.lock);