1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <ethdev_pci.h>
8 #include "hns3_ethdev.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
13 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
15 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
18 hns3_ring_space(struct hns3_cmq_ring *ring)
20 int ntu = ring->next_to_use;
21 int ntc = ring->next_to_clean;
22 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
24 return ring->desc_num - used - 1;
28 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
30 int ntu = ring->next_to_use;
31 int ntc = ring->next_to_clean;
34 return head >= ntc && head <= ntu;
36 return head >= ntc || head <= ntu;
40 * hns3_allocate_dma_mem - Specific memory alloc for command function.
41 * Malloc a memzone, which is a contiguous portion of physical memory identified
43 * @ring: pointer to the ring structure
44 * @size: size of memory requested
45 * @alignment: what to align the allocation to
48 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
49 uint64_t size, uint32_t alignment)
51 const struct rte_memzone *mz = NULL;
52 char z_name[RTE_MEMZONE_NAMESIZE];
54 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
55 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
56 RTE_MEMZONE_IOVA_CONTIG, alignment,
61 ring->buf_size = size;
62 ring->desc = mz->addr;
63 ring->desc_dma_addr = mz->iova;
64 ring->zone = (const void *)mz;
65 hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
66 mz->name, ring->desc_dma_addr);
72 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
74 hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
75 ((const struct rte_memzone *)ring->zone)->name,
77 rte_memzone_free((const struct rte_memzone *)ring->zone);
80 ring->desc_dma_addr = 0;
85 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
87 int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
89 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
90 hns3_err(hw, "allocate dma mem failed");
98 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
101 hns3_free_dma_mem(hw, ring);
105 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
107 struct hns3_cmq_ring *ring =
108 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
111 ring->ring_type = ring_type;
114 ret = hns3_alloc_cmd_desc(hw, ring);
116 hns3_err(hw, "descriptor %s alloc error %d",
117 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
123 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
125 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
127 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
129 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
133 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
134 enum hns3_opcode_type opcode, bool is_read)
136 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
137 desc->opcode = rte_cpu_to_le_16(opcode);
138 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
141 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
145 hns3_cmd_clear_regs(struct hns3_hw *hw)
147 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
148 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
149 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
150 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
151 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
152 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
153 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
154 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
155 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
156 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
160 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
162 uint64_t dma = ring->desc_dma_addr;
164 if (ring->ring_type == HNS3_TYPE_CSQ) {
165 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
167 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
169 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
170 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
171 HNS3_NIC_SW_RST_RDY);
172 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
173 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
175 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
177 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
179 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
180 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
181 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
182 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
187 hns3_cmd_init_regs(struct hns3_hw *hw)
189 hns3_cmd_config_regs(&hw->cmq.csq);
190 hns3_cmd_config_regs(&hw->cmq.crq);
194 hns3_cmd_csq_clean(struct hns3_hw *hw)
196 struct hns3_cmq_ring *csq = &hw->cmq.csq;
201 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
202 addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
203 if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
204 hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
205 csq->next_to_use, csq->next_to_clean);
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 __atomic_store_n(&hw->reset.disable_cmd, 1,
209 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
215 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
216 csq->next_to_clean = head;
221 hns3_cmd_csq_done(struct hns3_hw *hw)
223 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
225 return head == hw->cmq.csq.next_to_use;
229 hns3_is_special_opcode(uint16_t opcode)
232 * These commands have several descriptors,
233 * and use the first one to save opcode and return value.
235 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
236 HNS3_OPC_STATS_32_BIT,
238 HNS3_OPC_STATS_MAC_ALL,
239 HNS3_OPC_QUERY_32_BIT_REG,
240 HNS3_OPC_QUERY_64_BIT_REG};
243 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
244 if (spec_opcode[i] == opcode)
251 hns3_cmd_convert_err_code(uint16_t desc_ret)
253 static const struct {
254 uint16_t imp_errcode;
256 } hns3_cmdq_status[] = {
257 {HNS3_CMD_EXEC_SUCCESS, 0},
258 {HNS3_CMD_NO_AUTH, -EPERM},
259 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
260 {HNS3_CMD_QUEUE_FULL, -EXFULL},
261 {HNS3_CMD_NEXT_ERR, -ENOSR},
262 {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
263 {HNS3_CMD_PARA_ERR, -EINVAL},
264 {HNS3_CMD_RESULT_ERR, -ERANGE},
265 {HNS3_CMD_TIMEOUT, -ETIME},
266 {HNS3_CMD_HILINK_ERR, -ENOLINK},
267 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
268 {HNS3_CMD_INVALID, -EBADR},
269 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
274 for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
275 if (hns3_cmdq_status[i].imp_errcode == desc_ret)
276 return hns3_cmdq_status[i].linux_errcode;
282 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
283 struct hns3_cmd_desc *desc, int num, int ntc)
285 uint16_t opcode, desc_ret;
286 int current_ntc = ntc;
289 opcode = rte_le_to_cpu_16(desc[0].opcode);
290 for (handle = 0; handle < num; handle++) {
291 /* Get the result of hardware write back */
292 desc[handle] = hw->cmq.csq.desc[current_ntc];
295 if (current_ntc == hw->cmq.csq.desc_num)
299 if (likely(!hns3_is_special_opcode(opcode)))
300 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
302 desc_ret = rte_le_to_cpu_16(desc[0].retval);
304 hw->cmq.last_status = desc_ret;
305 return hns3_cmd_convert_err_code(desc_ret);
308 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
310 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
311 uint32_t timeout = 0;
314 if (hns3_cmd_csq_done(hw))
317 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
319 "Don't wait for reply because of disable_cmd");
323 if (is_reset_pending(hns)) {
324 hns3_err(hw, "Don't wait for reply because of reset pending");
330 } while (timeout < hw->cmq.tx_timeout);
331 hns3_err(hw, "Wait for reply timeout");
336 * hns3_cmd_send - send command to command queue
339 * pointer to the hw struct
341 * prefilled descriptor for describing the command
343 * the number of descriptors to be sent
345 * - -EBUSY if detect device is in resetting
346 * - -EIO if detect cmd csq corrupted (due to reset) or
347 * there is reset pending
348 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case
349 * - Zero if operation completed successfully
351 * Note -BUSY/-EIO only used in reset case
353 * Note this is the main send command for command queue, it
354 * sends the queue, cleans the queue, etc
357 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
359 struct hns3_cmd_desc *desc_to_use;
364 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
367 rte_spinlock_lock(&hw->cmq.csq.lock);
369 /* Clean the command send queue */
370 retval = hns3_cmd_csq_clean(hw);
372 rte_spinlock_unlock(&hw->cmq.csq.lock);
376 if (num > hns3_ring_space(&hw->cmq.csq)) {
377 rte_spinlock_unlock(&hw->cmq.csq.lock);
382 * Record the location of desc in the ring for this time
383 * which will be use for hardware to write back
385 ntc = hw->cmq.csq.next_to_use;
387 while (handle < num) {
388 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
389 *desc_to_use = desc[handle];
390 (hw->cmq.csq.next_to_use)++;
391 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
392 hw->cmq.csq.next_to_use = 0;
396 /* Write to hardware */
397 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
400 * If the command is sync, wait for the firmware to write back,
401 * if multi descriptors to be sent, use the first one to check.
403 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
404 retval = hns3_cmd_poll_reply(hw);
406 retval = hns3_cmd_get_hardware_reply(hw, desc, num,
410 rte_spinlock_unlock(&hw->cmq.csq.lock);
415 hns3_parse_capability(struct hns3_hw *hw,
416 struct hns3_query_version_cmd *cmd)
418 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
420 if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
421 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
422 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
423 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
425 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B))
426 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
427 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
428 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
429 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
430 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
431 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
432 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
433 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
434 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
435 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
436 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
438 if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
439 hns3_set_bit(hw->capability,
440 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
444 hns3_build_api_caps(void)
446 uint32_t api_caps = 0;
448 hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
450 return rte_cpu_to_le_32(api_caps);
454 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
456 struct hns3_query_version_cmd *resp;
457 struct hns3_cmd_desc desc;
460 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
461 resp = (struct hns3_query_version_cmd *)desc.data;
462 resp->api_caps = hns3_build_api_caps();
464 /* Initialize the cmd function */
465 ret = hns3_cmd_send(hw, &desc, 1);
469 hw->fw_version = rte_le_to_cpu_32(resp->firmware);
470 hns3_parse_capability(hw, resp);
476 hns3_cmd_init_queue(struct hns3_hw *hw)
480 /* Setup the lock for command queue */
481 rte_spinlock_init(&hw->cmq.csq.lock);
482 rte_spinlock_init(&hw->cmq.crq.lock);
485 * Clear up all command register,
486 * in case there are some residual values
488 hns3_cmd_clear_regs(hw);
490 /* Setup the queue entries for use cmd queue */
491 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
492 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
494 /* Setup Tx write back timeout */
495 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
497 /* Setup queue rings */
498 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
500 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
504 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
506 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
513 hns3_free_cmd_desc(hw, &hw->cmq.csq);
519 hns3_cmd_init(struct hns3_hw *hw)
524 rte_spinlock_lock(&hw->cmq.csq.lock);
525 rte_spinlock_lock(&hw->cmq.crq.lock);
527 hw->cmq.csq.next_to_clean = 0;
528 hw->cmq.csq.next_to_use = 0;
529 hw->cmq.crq.next_to_clean = 0;
530 hw->cmq.crq.next_to_use = 0;
531 hw->mbx_resp.head = 0;
532 hw->mbx_resp.tail = 0;
533 hw->mbx_resp.lost = 0;
534 hns3_cmd_init_regs(hw);
536 rte_spinlock_unlock(&hw->cmq.crq.lock);
537 rte_spinlock_unlock(&hw->cmq.csq.lock);
540 * Check if there is new reset pending, because the higher level
541 * reset may happen when lower level reset is being processed.
543 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
544 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
548 __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
550 ret = hns3_cmd_query_firmware_version_and_capability(hw);
552 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
556 version = hw->fw_version;
557 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
558 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
559 HNS3_FW_VERSION_BYTE3_S),
560 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
561 HNS3_FW_VERSION_BYTE2_S),
562 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
563 HNS3_FW_VERSION_BYTE1_S),
564 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
565 HNS3_FW_VERSION_BYTE0_S));
570 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
575 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
577 rte_spinlock_lock(&ring->lock);
579 hns3_free_cmd_desc(hw, ring);
581 rte_spinlock_unlock(&ring->lock);
585 hns3_cmd_destroy_queue(struct hns3_hw *hw)
587 hns3_destroy_queue(hw, &hw->cmq.csq);
588 hns3_destroy_queue(hw, &hw->cmq.crq);
592 hns3_cmd_uninit(struct hns3_hw *hw)
594 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
597 * A delay is added to ensure that the register cleanup operations
598 * will not be performed concurrently with the firmware command and
599 * ensure that all the reserved commands are executed.
600 * Concurrency may occur in two scenarios: asynchronous command and
601 * timeout command. If the command fails to be executed due to busy
602 * scheduling, the command will be processed in the next scheduling
605 rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
607 rte_spinlock_lock(&hw->cmq.csq.lock);
608 rte_spinlock_lock(&hw->cmq.crq.lock);
609 hns3_cmd_clear_regs(hw);
610 rte_spinlock_unlock(&hw->cmq.crq.lock);
611 rte_spinlock_unlock(&hw->cmq.csq.lock);