1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define HNS3_RX_RING_PREFETCTH_MASK 3
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
41 if (rxq->sw_ring == NULL)
44 if (rxq->rx_rearm_nb == 0) {
45 for (i = 0; i < rxq->nb_rx_desc; i++) {
46 if (rxq->sw_ring[i].mbuf != NULL) {
47 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
48 rxq->sw_ring[i].mbuf = NULL;
52 for (i = rxq->next_to_use;
53 i != rxq->rx_rearm_start;
54 i = (i + 1) % rxq->nb_rx_desc) {
55 if (rxq->sw_ring[i].mbuf != NULL) {
56 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
57 rxq->sw_ring[i].mbuf = NULL;
62 for (i = 0; i < rxq->bulk_mbuf_num; i++)
63 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
64 rxq->bulk_mbuf_num = 0;
66 if (rxq->pkt_first_seg) {
67 rte_pktmbuf_free(rxq->pkt_first_seg);
68 rxq->pkt_first_seg = NULL;
73 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
77 /* Note: Fake rx queue will not enter here */
79 for (i = 0; i < txq->nb_tx_desc; i++) {
80 if (txq->sw_ring[i].mbuf) {
81 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
82 txq->sw_ring[i].mbuf = NULL;
89 hns3_rx_queue_release(void *queue)
91 struct hns3_rx_queue *rxq = queue;
93 hns3_rx_queue_release_mbufs(rxq);
95 rte_memzone_free(rxq->mz);
97 rte_free(rxq->sw_ring);
103 hns3_tx_queue_release(void *queue)
105 struct hns3_tx_queue *txq = queue;
107 hns3_tx_queue_release_mbufs(txq);
109 rte_memzone_free(txq->mz);
111 rte_free(txq->sw_ring);
119 hns3_dev_rx_queue_release(void *queue)
121 struct hns3_rx_queue *rxq = queue;
122 struct hns3_adapter *hns;
128 rte_spinlock_lock(&hns->hw.lock);
129 hns3_rx_queue_release(queue);
130 rte_spinlock_unlock(&hns->hw.lock);
134 hns3_dev_tx_queue_release(void *queue)
136 struct hns3_tx_queue *txq = queue;
137 struct hns3_adapter *hns;
143 rte_spinlock_lock(&hns->hw.lock);
144 hns3_tx_queue_release(queue);
145 rte_spinlock_unlock(&hns->hw.lock);
149 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
151 struct hns3_rx_queue *rxq = queue;
152 struct hns3_adapter *hns;
162 if (hw->fkq_data.rx_queues[idx]) {
163 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
164 hw->fkq_data.rx_queues[idx] = NULL;
167 /* free fake rx queue arrays */
168 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
169 hw->fkq_data.nb_fake_rx_queues = 0;
170 rte_free(hw->fkq_data.rx_queues);
171 hw->fkq_data.rx_queues = NULL;
176 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
178 struct hns3_tx_queue *txq = queue;
179 struct hns3_adapter *hns;
189 if (hw->fkq_data.tx_queues[idx]) {
190 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
191 hw->fkq_data.tx_queues[idx] = NULL;
194 /* free fake tx queue arrays */
195 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
196 hw->fkq_data.nb_fake_tx_queues = 0;
197 rte_free(hw->fkq_data.tx_queues);
198 hw->fkq_data.tx_queues = NULL;
203 hns3_free_rx_queues(struct rte_eth_dev *dev)
205 struct hns3_adapter *hns = dev->data->dev_private;
206 struct hns3_fake_queue_data *fkq_data;
207 struct hns3_hw *hw = &hns->hw;
211 nb_rx_q = hw->data->nb_rx_queues;
212 for (i = 0; i < nb_rx_q; i++) {
213 if (dev->data->rx_queues[i]) {
214 hns3_rx_queue_release(dev->data->rx_queues[i]);
215 dev->data->rx_queues[i] = NULL;
219 /* Free fake Rx queues */
220 fkq_data = &hw->fkq_data;
221 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
222 if (fkq_data->rx_queues[i])
223 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
228 hns3_free_tx_queues(struct rte_eth_dev *dev)
230 struct hns3_adapter *hns = dev->data->dev_private;
231 struct hns3_fake_queue_data *fkq_data;
232 struct hns3_hw *hw = &hns->hw;
236 nb_tx_q = hw->data->nb_tx_queues;
237 for (i = 0; i < nb_tx_q; i++) {
238 if (dev->data->tx_queues[i]) {
239 hns3_tx_queue_release(dev->data->tx_queues[i]);
240 dev->data->tx_queues[i] = NULL;
244 /* Free fake Tx queues */
245 fkq_data = &hw->fkq_data;
246 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
247 if (fkq_data->tx_queues[i])
248 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
253 hns3_free_all_queues(struct rte_eth_dev *dev)
255 hns3_free_rx_queues(dev);
256 hns3_free_tx_queues(dev);
260 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
262 struct rte_mbuf *mbuf;
266 for (i = 0; i < rxq->nb_rx_desc; i++) {
267 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
268 if (unlikely(mbuf == NULL)) {
269 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
271 hns3_rx_queue_release_mbufs(rxq);
275 rte_mbuf_refcnt_set(mbuf, 1);
277 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
279 mbuf->port = rxq->port_id;
281 rxq->sw_ring[i].mbuf = mbuf;
282 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
283 rxq->rx_ring[i].addr = dma_addr;
284 rxq->rx_ring[i].rx.bd_base_info = 0;
291 hns3_buf_size2type(uint32_t buf_size)
297 bd_size_type = HNS3_BD_SIZE_512_TYPE;
300 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
303 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
306 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
313 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
315 uint32_t rx_buf_len = rxq->rx_buf_len;
316 uint64_t dma_addr = rxq->rx_ring_phys_addr;
318 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
319 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
320 (uint32_t)((dma_addr >> 31) >> 1));
322 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
323 hns3_buf_size2type(rx_buf_len));
324 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
325 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
329 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
331 uint64_t dma_addr = txq->tx_ring_phys_addr;
333 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
334 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
335 (uint32_t)((dma_addr >> 31) >> 1));
337 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
338 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
342 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
344 uint16_t nb_rx_q = hw->data->nb_rx_queues;
345 uint16_t nb_tx_q = hw->data->nb_tx_queues;
346 struct hns3_rx_queue *rxq;
347 struct hns3_tx_queue *txq;
351 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
352 for (i = 0; i < hw->cfg_max_queues; i++) {
354 rxq = hw->data->rx_queues[i];
356 rxq->pvid_sw_discard_en = pvid_en;
359 txq = hw->data->tx_queues[i];
361 txq->pvid_sw_shift_en = pvid_en;
367 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
369 uint16_t nb_rx_q = hw->data->nb_rx_queues;
370 uint16_t nb_tx_q = hw->data->nb_tx_queues;
371 struct hns3_rx_queue *rxq;
372 struct hns3_tx_queue *txq;
376 for (i = 0; i < hw->cfg_max_queues; i++) {
378 rxq = hw->data->rx_queues[i];
380 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
382 txq = hw->data->tx_queues[i];
384 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
385 if (rxq == NULL || txq == NULL ||
386 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
389 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
391 rcb_reg |= BIT(HNS3_RING_EN_B);
393 rcb_reg &= ~BIT(HNS3_RING_EN_B);
394 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
399 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
401 struct hns3_cfg_com_tqp_queue_cmd *req;
402 struct hns3_cmd_desc desc;
405 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
407 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
408 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
410 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
412 ret = hns3_cmd_send(hw, &desc, 1);
414 hns3_err(hw, "TQP enable fail, ret = %d", ret);
420 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
422 struct hns3_reset_tqp_queue_cmd *req;
423 struct hns3_cmd_desc desc;
426 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
428 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
429 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
430 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
432 ret = hns3_cmd_send(hw, &desc, 1);
434 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
440 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
442 struct hns3_reset_tqp_queue_cmd *req;
443 struct hns3_cmd_desc desc;
446 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
448 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
449 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
451 ret = hns3_cmd_send(hw, &desc, 1);
453 hns3_err(hw, "Get reset status error, ret =%d", ret);
457 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
461 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
463 #define HNS3_TQP_RESET_TRY_MS 200
468 ret = hns3_tqp_enable(hw, queue_id, false);
473 * In current version VF is not supported when PF is driven by DPDK
474 * driver, all task queue pairs are mapped to PF function, so PF's queue
475 * id is equals to the global queue id in PF range.
477 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
479 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
483 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
485 /* Wait for tqp hw reset */
486 rte_delay_ms(HNS3_POLL_RESPONE_MS);
487 reset_status = hns3_get_reset_status(hw, queue_id);
492 } while (get_timeofday_ms() < end);
495 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
499 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
501 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
507 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
512 /* Disable VF's queue before send queue reset msg to PF */
513 ret = hns3_tqp_enable(hw, queue_id, false);
517 memcpy(msg_data, &queue_id, sizeof(uint16_t));
519 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
520 sizeof(msg_data), true, NULL, 0);
524 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
526 struct hns3_hw *hw = &hns->hw;
528 return hns3vf_reset_tqp(hw, queue_id);
530 return hns3_reset_tqp(hw, queue_id);
534 hns3_reset_all_queues(struct hns3_adapter *hns)
536 struct hns3_hw *hw = &hns->hw;
539 for (i = 0; i < hw->cfg_max_queues; i++) {
540 ret = hns3_reset_queue(hns, i);
542 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
550 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
551 uint8_t gl_idx, uint16_t gl_value)
553 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
554 HNS3_TQP_INTR_GL1_REG,
555 HNS3_TQP_INTR_GL2_REG};
556 uint32_t addr, value;
558 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
561 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
562 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
563 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
565 value = HNS3_GL_USEC_TO_REG(gl_value);
567 hns3_write_dev(hw, addr, value);
571 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
573 uint32_t addr, value;
575 if (rl_value > HNS3_TQP_INTR_RL_MAX)
578 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
579 value = HNS3_RL_USEC_TO_REG(rl_value);
581 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
583 hns3_write_dev(hw, addr, value);
587 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
591 if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
594 addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
595 hns3_write_dev(hw, addr, ql_value);
597 addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
598 hns3_write_dev(hw, addr, ql_value);
602 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
604 uint32_t addr, value;
606 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
609 hns3_write_dev(hw, addr, value);
613 * Enable all rx queue interrupt when in interrupt rx mode.
614 * This api was called before enable queue rx&tx (in normal start or reset
615 * recover scenes), used to fix hardware rx queue interrupt enable was clear
619 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
621 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
622 uint16_t nb_rx_q = hw->data->nb_rx_queues;
625 if (dev->data->dev_conf.intr_conf.rxq == 0)
628 for (i = 0; i < nb_rx_q; i++)
629 hns3_queue_intr_enable(hw, i, en);
633 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
635 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
636 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
637 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
639 if (dev->data->dev_conf.intr_conf.rxq == 0)
642 hns3_queue_intr_enable(hw, queue_id, true);
644 return rte_intr_ack(intr_handle);
648 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
650 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652 if (dev->data->dev_conf.intr_conf.rxq == 0)
655 hns3_queue_intr_enable(hw, queue_id, false);
661 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
663 struct hns3_hw *hw = &hns->hw;
664 struct hns3_rx_queue *rxq;
667 PMD_INIT_FUNC_TRACE();
669 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
670 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
672 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
677 rxq->next_to_use = 0;
678 rxq->rx_rearm_start = 0;
679 rxq->rx_free_hold = 0;
680 rxq->rx_rearm_nb = 0;
681 rxq->pkt_first_seg = NULL;
682 rxq->pkt_last_seg = NULL;
683 hns3_init_rx_queue_hw(rxq);
684 hns3_rxq_vec_setup(rxq);
690 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
692 struct hns3_hw *hw = &hns->hw;
693 struct hns3_rx_queue *rxq;
695 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
696 rxq->next_to_use = 0;
697 rxq->rx_free_hold = 0;
698 rxq->rx_rearm_start = 0;
699 rxq->rx_rearm_nb = 0;
700 hns3_init_rx_queue_hw(rxq);
704 hns3_init_tx_queue(struct hns3_tx_queue *queue)
706 struct hns3_tx_queue *txq = queue;
707 struct hns3_desc *desc;
712 for (i = 0; i < txq->nb_tx_desc; i++) {
713 desc->tx.tp_fe_sc_vld_ra_ri = 0;
717 txq->next_to_use = 0;
718 txq->next_to_clean = 0;
719 txq->tx_bd_ready = txq->nb_tx_desc - 1;
720 hns3_init_tx_queue_hw(txq);
724 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
726 struct hns3_hw *hw = &hns->hw;
727 struct hns3_tx_queue *txq;
729 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
730 hns3_init_tx_queue(txq);
734 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
736 struct hns3_hw *hw = &hns->hw;
737 struct hns3_tx_queue *txq;
739 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
740 hns3_init_tx_queue(txq);
744 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
746 struct hns3_hw *hw = &hns->hw;
747 struct hns3_tx_queue *txq;
750 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
751 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
754 if (!tc_queue->enable)
757 for (j = 0; j < tc_queue->tqp_count; j++) {
758 num = tc_queue->tqp_offset + j;
759 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
763 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
769 hns3_start_rx_queues(struct hns3_adapter *hns)
771 struct hns3_hw *hw = &hns->hw;
772 struct hns3_rx_queue *rxq;
776 /* Initialize RSS for queues */
777 ret = hns3_config_rss(hns);
779 hns3_err(hw, "Failed to configure rss %d", ret);
783 for (i = 0; i < hw->data->nb_rx_queues; i++) {
784 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
785 if (rxq == NULL || rxq->rx_deferred_start)
787 ret = hns3_dev_rx_queue_start(hns, i);
789 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
795 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
796 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
797 if (rxq == NULL || rxq->rx_deferred_start)
799 hns3_fake_rx_queue_start(hns, i);
804 for (j = 0; j < i; j++) {
805 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
806 hns3_rx_queue_release_mbufs(rxq);
813 hns3_start_tx_queues(struct hns3_adapter *hns)
815 struct hns3_hw *hw = &hns->hw;
816 struct hns3_tx_queue *txq;
819 for (i = 0; i < hw->data->nb_tx_queues; i++) {
820 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
821 if (txq == NULL || txq->tx_deferred_start)
823 hns3_dev_tx_queue_start(hns, i);
826 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
827 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
828 if (txq == NULL || txq->tx_deferred_start)
830 hns3_fake_tx_queue_start(hns, i);
833 hns3_init_tx_ring_tc(hns);
838 * Note: just init and setup queues, and don't enable queue rx&tx.
841 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
843 struct hns3_hw *hw = &hns->hw;
847 ret = hns3_reset_all_queues(hns);
849 hns3_err(hw, "Failed to reset all queues %d", ret);
854 ret = hns3_start_rx_queues(hns);
856 hns3_err(hw, "Failed to start rx queues: %d", ret);
860 hns3_start_tx_queues(hns);
866 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
868 struct hns3_hw *hw = &hns->hw;
871 hns3_enable_all_queues(hw, false);
873 ret = hns3_reset_all_queues(hns);
875 hns3_err(hw, "Failed to reset all queues %d", ret);
883 * Iterate over all Rx Queue, and call the callback() function for each Rx
887 * The target eth dev.
888 * @param[in] callback
889 * The function to call for each queue.
890 * if callback function return nonzero will stop iterate and return it's value
892 * The arguments to provide the callback function with.
895 * 0 on success, otherwise with errno set.
898 hns3_rxq_iterate(struct rte_eth_dev *dev,
899 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
904 if (dev->data->rx_queues == NULL)
907 for (i = 0; i < dev->data->nb_rx_queues; i++) {
908 ret = callback(dev->data->rx_queues[i], arg);
917 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
918 struct hns3_queue_info *q_info)
920 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
921 const struct rte_memzone *rx_mz;
922 struct hns3_rx_queue *rxq;
923 unsigned int rx_desc;
925 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
926 RTE_CACHE_LINE_SIZE, q_info->socket_id);
928 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
933 /* Allocate rx ring hardware descriptors. */
934 rxq->queue_id = q_info->idx;
935 rxq->nb_rx_desc = q_info->nb_desc;
938 * Allocate a litter more memory because rx vector functions
939 * don't check boundaries each time.
941 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
942 sizeof(struct hns3_desc);
943 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
944 rx_desc, HNS3_RING_BASE_ALIGN,
947 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
949 hns3_rx_queue_release(rxq);
953 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
954 rxq->rx_ring_phys_addr = rx_mz->iova;
956 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
957 rxq->rx_ring_phys_addr);
963 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
964 uint16_t nb_desc, unsigned int socket_id)
966 struct hns3_adapter *hns = dev->data->dev_private;
967 struct hns3_hw *hw = &hns->hw;
968 struct hns3_queue_info q_info;
969 struct hns3_rx_queue *rxq;
972 if (hw->fkq_data.rx_queues[idx]) {
973 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
974 hw->fkq_data.rx_queues[idx] = NULL;
978 q_info.socket_id = socket_id;
979 q_info.nb_desc = nb_desc;
980 q_info.type = "hns3 fake RX queue";
981 q_info.ring_name = "rx_fake_ring";
982 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
984 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
988 /* Don't need alloc sw_ring, because upper applications don't use it */
992 rxq->rx_deferred_start = false;
993 rxq->port_id = dev->data->port_id;
994 rxq->configured = true;
995 nb_rx_q = dev->data->nb_rx_queues;
996 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
997 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
998 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1000 rte_spinlock_lock(&hw->lock);
1001 hw->fkq_data.rx_queues[idx] = rxq;
1002 rte_spinlock_unlock(&hw->lock);
1008 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1009 struct hns3_queue_info *q_info)
1011 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1012 const struct rte_memzone *tx_mz;
1013 struct hns3_tx_queue *txq;
1014 struct hns3_desc *desc;
1015 unsigned int tx_desc;
1018 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1019 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1021 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
1026 /* Allocate tx ring hardware descriptors. */
1027 txq->queue_id = q_info->idx;
1028 txq->nb_tx_desc = q_info->nb_desc;
1029 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1030 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1031 tx_desc, HNS3_RING_BASE_ALIGN,
1033 if (tx_mz == NULL) {
1034 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
1036 hns3_tx_queue_release(txq);
1040 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1041 txq->tx_ring_phys_addr = tx_mz->iova;
1043 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
1044 txq->tx_ring_phys_addr);
1047 desc = txq->tx_ring;
1048 for (i = 0; i < txq->nb_tx_desc; i++) {
1049 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1057 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1058 uint16_t nb_desc, unsigned int socket_id)
1060 struct hns3_adapter *hns = dev->data->dev_private;
1061 struct hns3_hw *hw = &hns->hw;
1062 struct hns3_queue_info q_info;
1063 struct hns3_tx_queue *txq;
1066 if (hw->fkq_data.tx_queues[idx] != NULL) {
1067 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1068 hw->fkq_data.tx_queues[idx] = NULL;
1072 q_info.socket_id = socket_id;
1073 q_info.nb_desc = nb_desc;
1074 q_info.type = "hns3 fake TX queue";
1075 q_info.ring_name = "tx_fake_ring";
1076 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1078 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
1082 /* Don't need alloc sw_ring, because upper applications don't use it */
1083 txq->sw_ring = NULL;
1087 txq->tx_deferred_start = false;
1088 txq->port_id = dev->data->port_id;
1089 txq->configured = true;
1090 nb_tx_q = dev->data->nb_tx_queues;
1091 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1092 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1094 rte_spinlock_lock(&hw->lock);
1095 hw->fkq_data.tx_queues[idx] = txq;
1096 rte_spinlock_unlock(&hw->lock);
1102 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1104 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1108 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1109 /* first time configuration */
1111 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1112 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1113 RTE_CACHE_LINE_SIZE);
1114 if (hw->fkq_data.rx_queues == NULL) {
1115 hw->fkq_data.nb_fake_rx_queues = 0;
1118 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1120 rxq = hw->fkq_data.rx_queues;
1121 for (i = nb_queues; i < old_nb_queues; i++)
1122 hns3_dev_rx_queue_release(rxq[i]);
1124 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1125 RTE_CACHE_LINE_SIZE);
1128 if (nb_queues > old_nb_queues) {
1129 uint16_t new_qs = nb_queues - old_nb_queues;
1130 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1133 hw->fkq_data.rx_queues = rxq;
1134 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1135 rxq = hw->fkq_data.rx_queues;
1136 for (i = nb_queues; i < old_nb_queues; i++)
1137 hns3_dev_rx_queue_release(rxq[i]);
1139 rte_free(hw->fkq_data.rx_queues);
1140 hw->fkq_data.rx_queues = NULL;
1143 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1149 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1151 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1155 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1156 /* first time configuration */
1158 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1159 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1160 RTE_CACHE_LINE_SIZE);
1161 if (hw->fkq_data.tx_queues == NULL) {
1162 hw->fkq_data.nb_fake_tx_queues = 0;
1165 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1167 txq = hw->fkq_data.tx_queues;
1168 for (i = nb_queues; i < old_nb_queues; i++)
1169 hns3_dev_tx_queue_release(txq[i]);
1170 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1171 RTE_CACHE_LINE_SIZE);
1174 if (nb_queues > old_nb_queues) {
1175 uint16_t new_qs = nb_queues - old_nb_queues;
1176 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1179 hw->fkq_data.tx_queues = txq;
1180 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1181 txq = hw->fkq_data.tx_queues;
1182 for (i = nb_queues; i < old_nb_queues; i++)
1183 hns3_dev_tx_queue_release(txq[i]);
1185 rte_free(hw->fkq_data.tx_queues);
1186 hw->fkq_data.tx_queues = NULL;
1188 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1194 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1197 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1198 uint16_t rx_need_add_nb_q;
1199 uint16_t tx_need_add_nb_q;
1204 /* Setup new number of fake RX/TX queues and reconfigure device. */
1205 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1206 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1207 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1208 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1210 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1211 goto cfg_fake_rx_q_fail;
1214 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1216 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1217 goto cfg_fake_tx_q_fail;
1220 /* Allocate and set up fake RX queue per Ethernet port. */
1221 port_id = hw->data->port_id;
1222 for (q = 0; q < rx_need_add_nb_q; q++) {
1223 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1224 rte_eth_dev_socket_id(port_id));
1226 goto setup_fake_rx_q_fail;
1229 /* Allocate and set up fake TX queue per Ethernet port. */
1230 for (q = 0; q < tx_need_add_nb_q; q++) {
1231 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1232 rte_eth_dev_socket_id(port_id));
1234 goto setup_fake_tx_q_fail;
1239 setup_fake_tx_q_fail:
1240 setup_fake_rx_q_fail:
1241 (void)hns3_fake_tx_queue_config(hw, 0);
1243 (void)hns3_fake_rx_queue_config(hw, 0);
1245 hw->cfg_max_queues = 0;
1251 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1253 struct rte_eth_dev_data *dev_data = hns->hw.data;
1254 struct hns3_rx_queue *rxq;
1255 struct hns3_tx_queue *txq;
1258 if (dev_data->rx_queues)
1259 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1260 rxq = dev_data->rx_queues[i];
1261 if (rxq == NULL || rxq->rx_deferred_start)
1263 hns3_rx_queue_release_mbufs(rxq);
1266 if (dev_data->tx_queues)
1267 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1268 txq = dev_data->tx_queues[i];
1269 if (txq == NULL || txq->tx_deferred_start)
1271 hns3_tx_queue_release_mbufs(txq);
1276 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1278 uint16_t vld_buf_size;
1279 uint16_t num_hw_specs;
1283 * hns3 network engine only support to set 4 typical specification, and
1284 * different buffer size will affect the max packet_len and the max
1285 * number of segmentation when hw gro is turned on in receive side. The
1286 * relationship between them is as follows:
1287 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1288 * ---------------------|-------------------|----------------
1289 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1290 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1291 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1292 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1294 static const uint16_t hw_rx_buf_size[] = {
1295 HNS3_4K_BD_BUF_SIZE,
1296 HNS3_2K_BD_BUF_SIZE,
1297 HNS3_1K_BD_BUF_SIZE,
1298 HNS3_512_BD_BUF_SIZE
1301 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1302 RTE_PKTMBUF_HEADROOM);
1304 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1307 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1308 for (i = 0; i < num_hw_specs; i++) {
1309 if (vld_buf_size >= hw_rx_buf_size[i]) {
1310 *rx_buf_len = hw_rx_buf_size[i];
1318 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1319 struct rte_mempool *mp, uint16_t nb_desc,
1322 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1323 nb_desc % HNS3_ALIGN_RING_DESC) {
1324 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1329 if (conf->rx_drop_en == 0)
1330 hns3_warn(hw, "if no descriptors available, packets are always "
1331 "dropped and rx_drop_en (1) is fixed on");
1333 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1334 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1335 "minimal data room size (%u).",
1336 rte_pktmbuf_data_room_size(mp),
1337 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1345 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1346 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1347 struct rte_mempool *mp)
1349 struct hns3_adapter *hns = dev->data->dev_private;
1350 struct hns3_hw *hw = &hns->hw;
1351 struct hns3_queue_info q_info;
1352 struct hns3_rx_queue *rxq;
1353 uint16_t rx_buf_size;
1357 if (dev->data->dev_started) {
1358 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1362 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1366 if (dev->data->rx_queues[idx]) {
1367 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1368 dev->data->rx_queues[idx] = NULL;
1372 q_info.socket_id = socket_id;
1373 q_info.nb_desc = nb_desc;
1374 q_info.type = "hns3 RX queue";
1375 q_info.ring_name = "rx_ring";
1377 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1380 "Failed to alloc mem and reserve DMA mem for rx ring!");
1385 rxq->ptype_tbl = &hns->ptype_tbl;
1387 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1388 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1389 rxq->rx_deferred_start = conf->rx_deferred_start;
1391 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1392 sizeof(struct hns3_entry);
1393 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1394 RTE_CACHE_LINE_SIZE, socket_id);
1395 if (rxq->sw_ring == NULL) {
1396 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1397 hns3_rx_queue_release(rxq);
1401 rxq->next_to_use = 0;
1402 rxq->rx_free_hold = 0;
1403 rxq->rx_rearm_start = 0;
1404 rxq->rx_rearm_nb = 0;
1405 rxq->pkt_first_seg = NULL;
1406 rxq->pkt_last_seg = NULL;
1407 rxq->port_id = dev->data->port_id;
1409 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1410 * the pvid_sw_discard_en in the queue struct should not be changed,
1411 * because PVID-related operations do not need to be processed by PMD
1412 * driver. For hns3 VF device, whether it needs to process PVID depends
1413 * on the configuration of PF kernel mode netdevice driver. And the
1414 * related PF configuration is delivered through the mailbox and finally
1415 * reflectd in port_base_vlan_cfg.
1417 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1418 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1419 HNS3_PORT_BASE_VLAN_ENABLE;
1421 rxq->pvid_sw_discard_en = false;
1422 rxq->configured = true;
1423 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1424 idx * HNS3_TQP_REG_SIZE);
1425 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1426 HNS3_RING_RX_HEAD_REG);
1427 rxq->rx_buf_len = rx_buf_size;
1429 rxq->pkt_len_errors = 0;
1430 rxq->l3_csum_errors = 0;
1431 rxq->l4_csum_errors = 0;
1432 rxq->ol3_csum_errors = 0;
1433 rxq->ol4_csum_errors = 0;
1435 /* CRC len set here is used for amending packet length */
1436 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1437 rxq->crc_len = RTE_ETHER_CRC_LEN;
1441 rxq->bulk_mbuf_num = 0;
1443 rte_spinlock_lock(&hw->lock);
1444 dev->data->rx_queues[idx] = rxq;
1445 rte_spinlock_unlock(&hw->lock);
1451 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1453 struct hns3_adapter *hns = dev->data->dev_private;
1454 struct hns3_hw *hw = &hns->hw;
1457 dev->data->scattered_rx = false;
1461 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1463 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1464 struct hns3_adapter *hns = dev->data->dev_private;
1465 struct hns3_hw *hw = &hns->hw;
1466 struct hns3_rx_queue *rxq;
1469 if (dev->data->rx_queues == NULL)
1472 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1473 rxq = dev->data->rx_queues[queue_id];
1474 if (hw->rx_buf_len == 0)
1475 hw->rx_buf_len = rxq->rx_buf_len;
1477 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1481 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1482 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1483 dev->data->scattered_rx = true;
1487 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1489 static const uint32_t ptypes[] = {
1491 RTE_PTYPE_L2_ETHER_VLAN,
1492 RTE_PTYPE_L2_ETHER_QINQ,
1493 RTE_PTYPE_L2_ETHER_LLDP,
1494 RTE_PTYPE_L2_ETHER_ARP,
1496 RTE_PTYPE_L3_IPV4_EXT,
1498 RTE_PTYPE_L3_IPV6_EXT,
1504 RTE_PTYPE_TUNNEL_GRE,
1508 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1509 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
1510 dev->rx_pkt_burst == hns3_recv_pkts_vec)
1517 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
1519 struct hns3_adapter *hns = dev->data->dev_private;
1520 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
1522 memset(tbl, 0, sizeof(*tbl));
1524 tbl->l2table[0] = RTE_PTYPE_L2_ETHER;
1525 tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;
1526 tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;
1527 tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;
1529 tbl->l3table[0] = RTE_PTYPE_L3_IPV4;
1530 tbl->l3table[1] = RTE_PTYPE_L3_IPV6;
1531 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
1532 tbl->l3table[3] = RTE_PTYPE_L2_ETHER;
1533 tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1534 tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1535 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
1537 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
1538 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
1539 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1540 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
1541 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
1542 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
1544 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
1545 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
1546 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
1548 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
1549 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
1550 tbl->inner_l3table[2] = 0;
1551 tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;
1552 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
1553 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
1555 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
1556 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
1557 tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1558 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
1559 tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;
1560 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
1562 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
1563 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
1564 tbl->ol3table[2] = 0;
1565 tbl->ol3table[3] = 0;
1566 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1567 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1569 tbl->ol4table[0] = 0;
1570 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
1571 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
1575 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1576 uint32_t l234_info, const struct hns3_desc *rxd)
1578 #define HNS3_STRP_STATUS_NUM 0x4
1580 #define HNS3_NO_STRP_VLAN_VLD 0x0
1581 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1582 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1583 uint32_t strip_status;
1584 uint32_t report_mode;
1587 * Since HW limitation, the vlan tag will always be inserted into RX
1588 * descriptor when strip the tag from packet, driver needs to determine
1589 * reporting which tag to mbuf according to the PVID configuration
1590 * and vlan striped status.
1592 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1594 HNS3_NO_STRP_VLAN_VLD,
1595 HNS3_OUTER_STRP_VLAN_VLD,
1596 HNS3_INNER_STRP_VLAN_VLD,
1597 HNS3_OUTER_STRP_VLAN_VLD
1600 HNS3_NO_STRP_VLAN_VLD,
1601 HNS3_NO_STRP_VLAN_VLD,
1602 HNS3_NO_STRP_VLAN_VLD,
1603 HNS3_INNER_STRP_VLAN_VLD
1606 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1607 HNS3_RXD_STRP_TAGP_S);
1608 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
1609 switch (report_mode) {
1610 case HNS3_NO_STRP_VLAN_VLD:
1613 case HNS3_INNER_STRP_VLAN_VLD:
1614 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1615 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
1617 case HNS3_OUTER_STRP_VLAN_VLD:
1618 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1619 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
1628 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
1629 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
1632 uint8_t crc_len = rxq->crc_len;
1634 if (data_len <= crc_len) {
1635 rte_pktmbuf_free_seg(rxm);
1636 first_seg->nb_segs--;
1637 last_seg->data_len = (uint16_t)(last_seg->data_len -
1638 (crc_len - data_len));
1639 last_seg->next = NULL;
1641 rxm->data_len = (uint16_t)(data_len - crc_len);
1644 static inline struct rte_mbuf *
1645 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
1649 if (likely(rxq->bulk_mbuf_num > 0))
1650 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1652 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
1653 HNS3_BULK_ALLOC_MBUF_NUM);
1654 if (likely(ret == 0)) {
1655 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
1656 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1658 return rte_mbuf_raw_alloc(rxq->mb_pool);
1662 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1664 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1665 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1666 struct hns3_rx_queue *rxq; /* RX queue */
1667 struct hns3_entry *sw_ring;
1668 struct hns3_entry *rxe;
1669 struct hns3_desc rxd;
1670 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1671 struct rte_mbuf *rxm;
1672 uint32_t bd_base_info;
1685 rx_ring = rxq->rx_ring;
1686 sw_ring = rxq->sw_ring;
1687 rx_id = rxq->next_to_use;
1689 while (nb_rx < nb_pkts) {
1690 rxdp = &rx_ring[rx_id];
1691 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1692 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1695 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1696 (1u << HNS3_RXD_VLD_B)];
1698 nmb = hns3_rx_alloc_buffer(rxq);
1699 if (unlikely(nmb == NULL)) {
1702 port_id = rxq->port_id;
1703 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
1708 rxe = &sw_ring[rx_id];
1710 if (unlikely(rx_id == rxq->nb_rx_desc))
1713 rte_prefetch0(sw_ring[rx_id].mbuf);
1714 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1715 rte_prefetch0(&rx_ring[rx_id]);
1716 rte_prefetch0(&sw_ring[rx_id]);
1722 dma_addr = rte_mbuf_data_iova_default(nmb);
1723 rxdp->addr = rte_cpu_to_le_64(dma_addr);
1724 rxdp->rx.bd_base_info = 0;
1726 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1727 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
1729 rxm->data_len = rxm->pkt_len;
1730 rxm->port = rxq->port_id;
1731 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1732 rxm->ol_flags = PKT_RX_RSS_HASH;
1733 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1735 rte_le_to_cpu_16(rxd.rx.fd_id);
1736 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1741 /* Load remained descriptor data and extract necessary fields */
1742 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1743 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1744 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
1745 l234_info, &cksum_err);
1749 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
1751 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
1752 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
1754 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
1756 rx_pkts[nb_rx++] = rxm;
1759 rte_pktmbuf_free(rxm);
1762 rxq->next_to_use = rx_id;
1763 rxq->rx_free_hold += nb_rx_bd;
1764 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1765 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1766 rxq->rx_free_hold = 0;
1773 hns3_recv_scattered_pkts(void *rx_queue,
1774 struct rte_mbuf **rx_pkts,
1777 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1778 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1779 struct hns3_rx_queue *rxq; /* RX queue */
1780 struct hns3_entry *sw_ring;
1781 struct hns3_entry *rxe;
1782 struct rte_mbuf *first_seg;
1783 struct rte_mbuf *last_seg;
1784 struct hns3_desc rxd;
1785 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1786 struct rte_mbuf *rxm;
1787 struct rte_eth_dev *dev;
1788 uint32_t bd_base_info;
1803 rx_id = rxq->next_to_use;
1804 rx_ring = rxq->rx_ring;
1805 sw_ring = rxq->sw_ring;
1806 first_seg = rxq->pkt_first_seg;
1807 last_seg = rxq->pkt_last_seg;
1809 while (nb_rx < nb_pkts) {
1810 rxdp = &rx_ring[rx_id];
1811 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1812 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1816 * The interactive process between software and hardware of
1817 * receiving a new packet in hns3 network engine:
1818 * 1. Hardware network engine firstly writes the packet content
1819 * to the memory pointed by the 'addr' field of the Rx Buffer
1820 * Descriptor, secondly fills the result of parsing the
1821 * packet include the valid field into the Rx Buffer
1822 * Descriptor in one write operation.
1823 * 2. Driver reads the Rx BD's valid field in the loop to check
1824 * whether it's valid, if valid then assign a new address to
1825 * the addr field, clear the valid field, get the other
1826 * information of the packet by parsing Rx BD's other fields,
1827 * finally write back the number of Rx BDs processed by the
1828 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1830 * In the above process, the ordering is very important. We must
1831 * make sure that CPU read Rx BD's other fields only after the
1834 * There are two type of re-ordering: compiler re-ordering and
1835 * CPU re-ordering under the ARMv8 architecture.
1836 * 1. we use volatile to deal with compiler re-ordering, so you
1837 * can see that rx_ring/rxdp defined with volatile.
1838 * 2. we commonly use memory barrier to deal with CPU
1839 * re-ordering, but the cost is high.
1841 * In order to solve the high cost of using memory barrier, we
1842 * use the data dependency order under the ARMv8 architecture,
1845 * instr02: load B <- A
1846 * the instr02 will always execute after instr01.
1848 * To construct the data dependency ordering, we use the
1849 * following assignment:
1850 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1851 * (1u<<HNS3_RXD_VLD_B)]
1852 * Using gcc compiler under the ARMv8 architecture, the related
1853 * assembly code example as follows:
1854 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1855 * instr01: ldr w26, [x22, #28] --read bd_base_info
1856 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1857 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1859 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1860 * instr05: ldp x2, x3, [x0]
1861 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1862 * instr07: ldp x4, x5, [x0, #16]
1863 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1864 * the instr05~08 depend on x0's value, x0 depent on w26's
1865 * value, the w26 is the bd_base_info, this form the data
1866 * dependency ordering.
1867 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1868 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1869 * assignment is correct.
1871 * So we use the data dependency ordering instead of memory
1872 * barrier to improve receive performance.
1874 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1875 (1u << HNS3_RXD_VLD_B)];
1877 nmb = hns3_rx_alloc_buffer(rxq);
1878 if (unlikely(nmb == NULL)) {
1879 dev = &rte_eth_devices[rxq->port_id];
1880 dev->data->rx_mbuf_alloc_failed++;
1885 rxe = &sw_ring[rx_id];
1887 if (unlikely(rx_id == rxq->nb_rx_desc))
1890 rte_prefetch0(sw_ring[rx_id].mbuf);
1891 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1892 rte_prefetch0(&rx_ring[rx_id]);
1893 rte_prefetch0(&sw_ring[rx_id]);
1899 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1900 rxdp->rx.bd_base_info = 0;
1901 rxdp->addr = dma_addr;
1903 if (first_seg == NULL) {
1905 first_seg->nb_segs = 1;
1907 first_seg->nb_segs++;
1908 last_seg->next = rxm;
1911 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1912 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
1914 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
1921 * The last buffer of the received packet. packet len from
1922 * buffer description may contains CRC len, packet len should
1923 * subtract it, same as data len.
1925 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
1928 * This is the last buffer of the received packet. If the CRC
1929 * is not stripped by the hardware:
1930 * - Subtract the CRC length from the total packet length.
1931 * - If the last buffer only contains the whole CRC or a part
1932 * of it, free the mbuf associated to the last buffer. If part
1933 * of the CRC is also contained in the previous mbuf, subtract
1934 * the length of that CRC part from the data length of the
1938 if (unlikely(rxq->crc_len > 0)) {
1939 first_seg->pkt_len -= rxq->crc_len;
1940 recalculate_data_len(first_seg, last_seg, rxm, rxq,
1944 first_seg->port = rxq->port_id;
1945 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1946 first_seg->ol_flags = PKT_RX_RSS_HASH;
1947 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1948 first_seg->hash.fdir.hi =
1949 rte_le_to_cpu_16(rxd.rx.fd_id);
1950 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1953 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
1954 HNS3_RXD_GRO_SIZE_S);
1955 if (gro_size != 0) {
1956 first_seg->ol_flags |= PKT_RX_LRO;
1957 first_seg->tso_segsz = gro_size;
1960 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1961 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1962 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1963 l234_info, &cksum_err);
1967 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
1968 l234_info, ol_info);
1970 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1971 hns3_rx_set_cksum_flag(first_seg,
1972 first_seg->packet_type,
1974 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
1976 rx_pkts[nb_rx++] = first_seg;
1980 rte_pktmbuf_free(first_seg);
1984 rxq->next_to_use = rx_id;
1985 rxq->pkt_first_seg = first_seg;
1986 rxq->pkt_last_seg = last_seg;
1988 rxq->rx_free_hold += nb_rx_bd;
1989 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1990 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1991 rxq->rx_free_hold = 0;
1998 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2003 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2009 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2010 __rte_unused struct rte_mbuf **tx_pkts,
2011 __rte_unused uint16_t nb_pkts)
2017 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2018 struct rte_eth_burst_mode *mode)
2020 static const struct {
2021 eth_rx_burst_t pkt_burst;
2024 { hns3_recv_pkts, "Scalar" },
2025 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2026 { hns3_recv_pkts_vec, "Vector Neon" },
2029 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2033 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2034 if (pkt_burst == burst_infos[i].pkt_burst) {
2035 snprintf(mode->info, sizeof(mode->info), "%s",
2036 burst_infos[i].info);
2045 static eth_rx_burst_t
2046 hns3_get_rx_function(struct rte_eth_dev *dev)
2048 struct hns3_adapter *hns = dev->data->dev_private;
2049 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2051 if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
2052 return hns3_recv_pkts_vec;
2054 if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
2055 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
2056 return hns3_recv_pkts;
2058 return hns3_recv_scattered_pkts;
2062 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2063 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2064 uint16_t *tx_free_thresh, uint16_t idx)
2066 #define HNS3_TX_RS_FREE_THRESH_GAP 8
2067 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2069 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2070 nb_desc % HNS3_ALIGN_RING_DESC) {
2071 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2076 rs_thresh = (conf->tx_rs_thresh > 0) ?
2077 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2078 free_thresh = (conf->tx_free_thresh > 0) ?
2079 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2080 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2081 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2082 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2083 hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
2084 "(%d) of tx descriptors for port=%d queue=%d check "
2086 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2091 if (conf->tx_free_thresh == 0) {
2092 /* Fast free Tx memory buffer to improve cache hit rate */
2093 fast_free_thresh = nb_desc - rs_thresh;
2094 if (fast_free_thresh >=
2095 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2096 free_thresh = fast_free_thresh -
2097 HNS3_TX_FAST_FREE_AHEAD;
2100 *tx_rs_thresh = rs_thresh;
2101 *tx_free_thresh = free_thresh;
2106 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2107 unsigned int socket_id, const struct rte_eth_txconf *conf)
2109 struct hns3_adapter *hns = dev->data->dev_private;
2110 uint16_t tx_rs_thresh, tx_free_thresh;
2111 struct hns3_hw *hw = &hns->hw;
2112 struct hns3_queue_info q_info;
2113 struct hns3_tx_queue *txq;
2117 if (dev->data->dev_started) {
2118 hns3_err(hw, "tx_queue_setup after dev_start no supported");
2122 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2123 &tx_rs_thresh, &tx_free_thresh, idx);
2127 if (dev->data->tx_queues[idx] != NULL) {
2128 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2129 dev->data->tx_queues[idx] = NULL;
2133 q_info.socket_id = socket_id;
2134 q_info.nb_desc = nb_desc;
2135 q_info.type = "hns3 TX queue";
2136 q_info.ring_name = "tx_ring";
2137 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2140 "Failed to alloc mem and reserve DMA mem for tx ring!");
2144 txq->tx_deferred_start = conf->tx_deferred_start;
2145 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2146 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2147 RTE_CACHE_LINE_SIZE, socket_id);
2148 if (txq->sw_ring == NULL) {
2149 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2150 hns3_tx_queue_release(txq);
2155 txq->next_to_use = 0;
2156 txq->next_to_clean = 0;
2157 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2158 txq->tx_free_thresh = tx_free_thresh;
2159 txq->tx_rs_thresh = tx_rs_thresh;
2160 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2161 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2162 RTE_CACHE_LINE_SIZE, socket_id);
2164 hns3_err(hw, "failed to allocate tx mbuf free array!");
2165 hns3_tx_queue_release(txq);
2169 txq->port_id = dev->data->port_id;
2171 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
2172 * the pvid_sw_shift_en in the queue struct should not be changed,
2173 * because PVID-related operations do not need to be processed by PMD
2174 * driver. For hns3 VF device, whether it needs to process PVID depends
2175 * on the configuration of PF kernel mode netdev driver. And the
2176 * related PF configuration is delivered through the mailbox and finally
2177 * reflectd in port_base_vlan_cfg.
2179 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
2180 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
2181 HNS3_PORT_BASE_VLAN_ENABLE;
2183 txq->pvid_sw_shift_en = false;
2184 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
2185 txq->configured = true;
2186 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
2187 idx * HNS3_TQP_REG_SIZE);
2188 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2189 HNS3_RING_TX_TAIL_REG);
2190 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2191 txq->tso_mode = hw->tso_mode;
2192 txq->over_length_pkt_cnt = 0;
2193 txq->exceed_limit_bd_pkt_cnt = 0;
2194 txq->exceed_limit_bd_reassem_fail = 0;
2195 txq->unsupported_tunnel_pkt_cnt = 0;
2196 txq->queue_full_cnt = 0;
2197 txq->pkt_padding_fail_cnt = 0;
2198 rte_spinlock_lock(&hw->lock);
2199 dev->data->tx_queues[idx] = txq;
2200 rte_spinlock_unlock(&hw->lock);
2206 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2208 uint16_t tx_next_clean = txq->next_to_clean;
2209 uint16_t tx_next_use = txq->next_to_use;
2210 uint16_t tx_bd_ready = txq->tx_bd_ready;
2211 uint16_t tx_bd_max = txq->nb_tx_desc;
2212 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2213 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2214 struct rte_mbuf *mbuf;
2216 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2217 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2218 tx_next_use != tx_next_clean) {
2219 mbuf = tx_bak_pkt->mbuf;
2221 rte_pktmbuf_free_seg(mbuf);
2222 tx_bak_pkt->mbuf = NULL;
2230 if (tx_next_clean >= tx_bd_max) {
2232 desc = txq->tx_ring;
2233 tx_bak_pkt = txq->sw_ring;
2237 txq->next_to_clean = tx_next_clean;
2238 txq->tx_bd_ready = tx_bd_ready;
2242 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
2243 struct rte_mbuf *rxm, uint8_t *l2_len)
2249 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
2253 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
2254 switch (tun_flags) {
2255 case PKT_TX_TUNNEL_GENEVE:
2256 case PKT_TX_TUNNEL_VXLAN:
2257 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
2259 case PKT_TX_TUNNEL_GRE:
2261 * OL4 header size, defined in 4 Bytes, it contains outer
2262 * L4(GRE) length and tunneling length.
2264 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
2266 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
2269 /* For non UDP / GRE tunneling, drop the tunnel packet */
2272 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2273 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
2274 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
2280 hns3_config_gro(struct hns3_hw *hw, bool en)
2282 struct hns3_cfg_gro_status_cmd *req;
2283 struct hns3_cmd_desc desc;
2286 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2287 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2289 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2291 ret = hns3_cmd_send(hw, &desc, 1);
2293 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2294 en ? "enable" : "disable", ret);
2300 hns3_restore_gro_conf(struct hns3_hw *hw)
2306 offloads = hw->data->dev_conf.rxmode.offloads;
2307 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2308 ret = hns3_config_gro(hw, gro_en);
2310 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2311 gro_en ? "enabled" : "disabled", ret);
2317 hns3_pkt_is_tso(struct rte_mbuf *m)
2319 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2323 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2324 uint32_t paylen, struct rte_mbuf *rxm)
2326 uint8_t l2_len = rxm->l2_len;
2329 if (!hns3_pkt_is_tso(rxm))
2332 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2335 if (paylen <= rxm->tso_segsz)
2338 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2339 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2340 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2341 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2342 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2343 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2344 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2345 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2346 l2_len >> HNS3_L2_LEN_UNIT);
2347 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2348 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2352 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2354 desc->addr = rte_mbuf_data_iova(rxm);
2355 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2356 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2360 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2361 struct rte_mbuf *rxm)
2363 uint64_t ol_flags = rxm->ol_flags;
2367 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2368 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2369 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2370 paylen = rxm->pkt_len - hdr_len;
2371 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2372 hns3_set_tso(desc, ol_flags, paylen, rxm);
2375 * Currently, hardware doesn't support more than two layers VLAN offload
2376 * in Tx direction based on hns3 network engine. So when the number of
2377 * VLANs in the packets represented by rxm plus the number of VLAN
2378 * offload by hardware such as PVID etc, exceeds two, the packets will
2379 * be discarded or the original VLAN of the packets will be overwitted
2380 * by hardware. When the PF PVID is enabled by calling the API function
2381 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2382 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2383 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2384 * be added to the position close to the IP header when PVID is enabled.
2386 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
2388 desc->tx.ol_type_vlan_len_msec |=
2389 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2390 if (ol_flags & PKT_TX_QINQ_PKT)
2391 desc->tx.outer_vlan_tag =
2392 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2394 desc->tx.outer_vlan_tag =
2395 rte_cpu_to_le_16(rxm->vlan_tci);
2398 if (ol_flags & PKT_TX_QINQ_PKT ||
2399 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
2400 desc->tx.type_cs_vlan_tso_len |=
2401 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2402 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2407 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
2408 struct rte_mbuf **alloc_mbuf)
2410 #define MAX_NON_TSO_BD_PER_PKT 18
2411 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
2414 /* Allocate enough mbufs */
2415 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
2418 for (i = 0; i < nb_new_buf - 1; i++)
2419 pkt_segs[i]->next = pkt_segs[i + 1];
2421 pkt_segs[nb_new_buf - 1]->next = NULL;
2422 pkt_segs[0]->nb_segs = nb_new_buf;
2423 *alloc_mbuf = pkt_segs[0];
2429 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2431 new_pkt->ol_flags = old_pkt->ol_flags;
2432 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2433 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2434 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2435 new_pkt->l2_len = old_pkt->l2_len;
2436 new_pkt->l3_len = old_pkt->l3_len;
2437 new_pkt->l4_len = old_pkt->l4_len;
2438 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2439 new_pkt->vlan_tci = old_pkt->vlan_tci;
2443 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
2444 uint8_t max_non_tso_bd_num)
2446 struct rte_mempool *mb_pool;
2447 struct rte_mbuf *new_mbuf;
2448 struct rte_mbuf *temp_new;
2449 struct rte_mbuf *temp;
2450 uint16_t last_buf_len;
2451 uint16_t nb_new_buf;
2461 mb_pool = tx_pkt->pool;
2462 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2463 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2464 if (nb_new_buf > max_non_tso_bd_num)
2467 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2468 if (last_buf_len == 0)
2469 last_buf_len = buf_size;
2471 /* Allocate enough mbufs */
2472 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
2476 /* Copy the original packet content to the new mbufs */
2478 s = rte_pktmbuf_mtod(temp, char *);
2479 len_s = rte_pktmbuf_data_len(temp);
2480 temp_new = new_mbuf;
2481 while (temp != NULL && temp_new != NULL) {
2482 d = rte_pktmbuf_mtod(temp_new, char *);
2483 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
2487 len = RTE_MIN(len_s, len_d);
2491 len_d = len_d - len;
2492 len_s = len_s - len;
2498 s = rte_pktmbuf_mtod(temp, char *);
2499 len_s = rte_pktmbuf_data_len(temp);
2503 temp_new->data_len = buf_len;
2504 temp_new = temp_new->next;
2506 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2508 /* free original mbufs */
2509 rte_pktmbuf_free(tx_pkt);
2511 *new_pkt = new_mbuf;
2517 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2519 uint32_t tmp = *ol_type_vlan_len_msec;
2521 /* (outer) IP header type */
2522 if (ol_flags & PKT_TX_OUTER_IPV4) {
2523 /* OL3 header size, defined in 4 bytes */
2524 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2525 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2526 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2527 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2528 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2530 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2531 HNS3_OL3T_IPV4_NO_CSUM);
2532 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2533 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2535 /* OL3 header size, defined in 4 bytes */
2536 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2537 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2540 *ol_type_vlan_len_msec = tmp;
2544 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2545 struct rte_net_hdr_lens *hdr_lens)
2547 uint32_t tmp = *ol_type_vlan_len_msec;
2550 /* OL2 header size, defined in 2 bytes */
2551 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2552 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2554 /* L4TUNT: L4 Tunneling Type */
2555 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2556 case PKT_TX_TUNNEL_GENEVE:
2557 case PKT_TX_TUNNEL_VXLAN:
2558 /* MAC in UDP tunnelling packet, include VxLAN */
2559 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2560 HNS3_TUN_MAC_IN_UDP);
2562 * OL4 header size, defined in 4 Bytes, it contains outer
2563 * L4(UDP) length and tunneling length.
2565 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2566 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2569 case PKT_TX_TUNNEL_GRE:
2570 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2573 * OL4 header size, defined in 4 Bytes, it contains outer
2574 * L4(GRE) length and tunneling length.
2576 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2577 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2578 l4_len >> HNS3_L4_LEN_UNIT);
2581 /* For non UDP / GRE tunneling, drop the tunnel packet */
2585 *ol_type_vlan_len_msec = tmp;
2591 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2593 struct rte_net_hdr_lens *hdr_lens)
2595 struct hns3_desc *tx_ring = txq->tx_ring;
2596 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2600 hns3_parse_outer_params(ol_flags, &value);
2601 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2605 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2611 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2615 /* Enable L3 checksum offloads */
2616 if (ol_flags & PKT_TX_IPV4) {
2617 tmp = *type_cs_vlan_tso_len;
2618 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2620 /* inner(/normal) L3 header size, defined in 4 bytes */
2621 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2622 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2623 if (ol_flags & PKT_TX_IP_CKSUM)
2624 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2625 *type_cs_vlan_tso_len = tmp;
2626 } else if (ol_flags & PKT_TX_IPV6) {
2627 tmp = *type_cs_vlan_tso_len;
2628 /* L3T, IPv6 don't do checksum */
2629 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2631 /* inner(/normal) L3 header size, defined in 4 bytes */
2632 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2633 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2634 *type_cs_vlan_tso_len = tmp;
2639 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2643 /* Enable L4 checksum offloads */
2644 switch (ol_flags & PKT_TX_L4_MASK) {
2645 case PKT_TX_TCP_CKSUM:
2646 tmp = *type_cs_vlan_tso_len;
2647 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2649 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2650 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2651 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2652 *type_cs_vlan_tso_len = tmp;
2654 case PKT_TX_UDP_CKSUM:
2655 tmp = *type_cs_vlan_tso_len;
2656 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2658 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2659 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2660 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2661 *type_cs_vlan_tso_len = tmp;
2663 case PKT_TX_SCTP_CKSUM:
2664 tmp = *type_cs_vlan_tso_len;
2665 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2667 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2668 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2669 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2670 *type_cs_vlan_tso_len = tmp;
2678 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2681 struct hns3_desc *tx_ring = txq->tx_ring;
2682 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2685 /* inner(/normal) L2 header size, defined in 2 bytes */
2686 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2687 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2689 hns3_parse_l3_cksum_params(ol_flags, &value);
2690 hns3_parse_l4_cksum_params(ol_flags, &value);
2692 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2696 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
2697 uint32_t max_non_tso_bd_num)
2699 struct rte_mbuf *m_first = tx_pkts;
2700 struct rte_mbuf *m_last = tx_pkts;
2701 uint32_t tot_len = 0;
2706 * Hardware requires that the sum of the data length of every 8
2707 * consecutive buffers is greater than MSS in hns3 network engine.
2708 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2709 * frags greater than gso header len + mss, and the remaining 7
2710 * consecutive frags greater than MSS except the last 7 frags.
2712 if (bd_num <= max_non_tso_bd_num)
2715 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
2716 i++, m_last = m_last->next)
2717 tot_len += m_last->data_len;
2722 /* ensure the first 8 frags is greater than mss + header */
2723 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2724 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2725 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2726 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2730 * ensure the sum of the data length of every 7 consecutive buffer
2731 * is greater than mss except the last one.
2733 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
2734 tot_len -= m_first->data_len;
2735 tot_len += m_last->data_len;
2737 if (tot_len < tx_pkts->tso_segsz)
2740 m_first = m_first->next;
2741 m_last = m_last->next;
2748 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2750 uint64_t ol_flags = m->ol_flags;
2751 struct rte_ipv4_hdr *ipv4_hdr;
2752 struct rte_udp_hdr *udp_hdr;
2753 uint32_t paylen, hdr_len;
2755 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2758 if (ol_flags & PKT_TX_IPV4) {
2759 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2762 if (ol_flags & PKT_TX_IP_CKSUM)
2763 ipv4_hdr->hdr_checksum = 0;
2766 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2767 ol_flags & PKT_TX_TCP_SEG) {
2768 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2769 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2770 m->outer_l2_len + m->outer_l3_len : 0;
2771 paylen = m->pkt_len - hdr_len;
2772 if (paylen <= m->tso_segsz)
2774 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2777 udp_hdr->dgram_cksum = 0;
2782 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2784 uint32_t tmp_data_len_sum = 0;
2785 uint16_t nb_buf = m->nb_segs;
2786 uint32_t paylen, hdr_len;
2787 struct rte_mbuf *m_seg;
2790 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2793 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2794 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2795 m->outer_l2_len + m->outer_l3_len : 0;
2796 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2799 paylen = m->pkt_len - hdr_len;
2800 if (paylen > HNS3_MAX_BD_PAYLEN)
2804 * The TSO header (include outer and inner L2, L3 and L4 header)
2805 * should be provided by three descriptors in maximum in hns3 network
2809 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2810 i++, m_seg = m_seg->next) {
2811 tmp_data_len_sum += m_seg->data_len;
2814 if (hdr_len > tmp_data_len_sum)
2820 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2822 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
2824 struct rte_ether_hdr *eh;
2825 struct rte_vlan_hdr *vh;
2827 if (!txq->pvid_sw_shift_en)
2831 * Due to hardware limitations, we only support two-layer VLAN hardware
2832 * offload in Tx direction based on hns3 network engine, so when PVID is
2833 * enabled, QinQ insert is no longer supported.
2834 * And when PVID is enabled, in the following two cases:
2835 * i) packets with more than two VLAN tags.
2836 * ii) packets with one VLAN tag while the hardware VLAN insert is
2838 * The packets will be regarded as abnormal packets and discarded by
2839 * hardware in Tx direction. For debugging purposes, a validation check
2840 * for these types of packets is added to the '.tx_pkt_prepare' ops
2841 * implementation function named hns3_prep_pkts to inform users that
2842 * these packets will be discarded.
2844 if (m->ol_flags & PKT_TX_QINQ_PKT)
2847 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2848 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
2849 if (m->ol_flags & PKT_TX_VLAN_PKT)
2852 /* Ensure the incoming packet is not a QinQ packet */
2853 vh = (struct rte_vlan_hdr *)(eh + 1);
2854 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
2863 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
2867 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2868 ret = rte_validate_tx_offload(m);
2874 ret = hns3_vld_vlan_chk(tx_queue, m);
2880 if (hns3_pkt_is_tso(m)) {
2881 if (hns3_pkt_need_linearized(m, m->nb_segs,
2882 tx_queue->max_non_tso_bd_num) ||
2883 hns3_check_tso_pkt_valid(m)) {
2888 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
2890 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
2891 * hardware support recalculate the TCP pseudo header
2892 * checksum of packets that need TSO, so network driver
2893 * software not need to recalculate it.
2895 hns3_outer_header_cksum_prepare(m);
2900 ret = rte_net_intel_cksum_prepare(m);
2906 hns3_outer_header_cksum_prepare(m);
2912 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2918 for (i = 0; i < nb_pkts; i++) {
2920 if (hns3_prep_pkt_proc(tx_queue, m))
2928 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2929 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2931 /* Fill in tunneling parameters if necessary */
2932 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2933 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2934 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2936 txq->unsupported_tunnel_pkt_cnt++;
2940 /* Enable checksum offloading */
2941 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2942 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2948 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2949 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2951 uint8_t max_non_tso_bd_num;
2952 struct rte_mbuf *new_pkt;
2955 if (hns3_pkt_is_tso(*m_seg))
2959 * If packet length is greater than HNS3_MAX_FRAME_LEN
2960 * driver support, the packet will be ignored.
2962 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2963 txq->over_length_pkt_cnt++;
2967 max_non_tso_bd_num = txq->max_non_tso_bd_num;
2968 if (unlikely(nb_buf > max_non_tso_bd_num)) {
2969 txq->exceed_limit_bd_pkt_cnt++;
2970 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
2971 max_non_tso_bd_num);
2973 txq->exceed_limit_bd_reassem_fail++;
2983 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
2985 struct hns3_entry *tx_entry;
2986 struct hns3_desc *desc;
2987 uint16_t tx_next_clean;
2991 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
2995 * All mbufs can be released only when the VLD bits of all
2996 * descriptors in a batch are cleared.
2998 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3000 desc = &txq->tx_ring[tx_next_clean];
3001 for (i = 0; i < txq->tx_rs_thresh; i++) {
3002 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3003 BIT(HNS3_TXD_VLD_B))
3008 tx_entry = &txq->sw_ring[txq->next_to_clean];
3010 for (i = 0; i < txq->tx_rs_thresh; i++)
3011 rte_prefetch0((tx_entry + i)->mbuf);
3012 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3013 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3014 tx_entry->mbuf = NULL;
3017 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3018 txq->tx_bd_ready += txq->tx_rs_thresh;
3023 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3025 tx_entry->mbuf = pkts[0];
3029 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3031 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
3032 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
3033 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
3034 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
3038 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3040 #define PER_LOOP_NUM 4
3041 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3045 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
3046 dma_addr = rte_mbuf_data_iova(*pkts);
3047 txdp->addr = rte_cpu_to_le_64(dma_addr);
3048 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3049 txdp->tx.paylen = 0;
3050 txdp->tx.type_cs_vlan_tso_len = 0;
3051 txdp->tx.ol_type_vlan_len_msec = 0;
3052 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3057 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
3059 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
3062 dma_addr = rte_mbuf_data_iova(*pkts);
3063 txdp->addr = rte_cpu_to_le_64(dma_addr);
3064 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
3065 txdp->tx.paylen = 0;
3066 txdp->tx.type_cs_vlan_tso_len = 0;
3067 txdp->tx.ol_type_vlan_len_msec = 0;
3068 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
3072 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
3073 struct rte_mbuf **pkts,
3076 #define PER_LOOP_NUM 4
3077 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
3078 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
3079 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
3080 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
3081 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
3084 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
3085 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
3086 hns3_tx_setup_4bd(txdp + i, pkts + i);
3088 if (unlikely(leftover > 0)) {
3089 for (i = 0; i < leftover; i++) {
3090 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
3091 pkts + mainpart + i);
3092 hns3_tx_setup_1bd(txdp + mainpart + i,
3093 pkts + mainpart + i);
3099 hns3_xmit_pkts_simple(void *tx_queue,
3100 struct rte_mbuf **tx_pkts,
3103 struct hns3_tx_queue *txq = tx_queue;
3106 hns3_tx_free_buffer_simple(txq);
3108 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
3109 if (unlikely(nb_pkts == 0)) {
3110 if (txq->tx_bd_ready == 0)
3111 txq->queue_full_cnt++;
3115 txq->tx_bd_ready -= nb_pkts;
3116 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
3117 nb_tx = txq->nb_tx_desc - txq->next_to_use;
3118 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
3119 txq->next_to_use = 0;
3122 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
3123 txq->next_to_use += nb_pkts - nb_tx;
3125 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
3131 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3133 struct rte_net_hdr_lens hdr_lens = {0};
3134 struct hns3_tx_queue *txq = tx_queue;
3135 struct hns3_entry *tx_bak_pkt;
3136 struct hns3_desc *tx_ring;
3137 struct rte_mbuf *tx_pkt;
3138 struct rte_mbuf *m_seg;
3139 struct hns3_desc *desc;
3140 uint32_t nb_hold = 0;
3141 uint16_t tx_next_use;
3142 uint16_t tx_pkt_num;
3148 /* free useless buffer */
3149 hns3_tx_free_useless_buffer(txq);
3151 tx_next_use = txq->next_to_use;
3152 tx_bd_max = txq->nb_tx_desc;
3153 tx_pkt_num = nb_pkts;
3154 tx_ring = txq->tx_ring;
3157 tx_bak_pkt = &txq->sw_ring[tx_next_use];
3158 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
3159 tx_pkt = *tx_pkts++;
3161 nb_buf = tx_pkt->nb_segs;
3163 if (nb_buf > txq->tx_bd_ready) {
3164 txq->queue_full_cnt++;
3172 * If packet length is less than minimum packet length supported
3173 * by hardware in Tx direction, driver need to pad it to avoid
3176 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
3177 txq->min_tx_pkt_len)) {
3181 add_len = txq->min_tx_pkt_len -
3182 rte_pktmbuf_pkt_len(tx_pkt);
3183 appended = rte_pktmbuf_append(tx_pkt, add_len);
3184 if (appended == NULL) {
3185 txq->pkt_padding_fail_cnt++;
3189 memset(appended, 0, add_len);
3194 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
3197 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
3201 desc = &tx_ring[tx_next_use];
3204 * If the packet is divided into multiple Tx Buffer Descriptors,
3205 * only need to fill vlan, paylen and tso into the first Tx
3206 * Buffer Descriptor.
3208 hns3_fill_first_desc(txq, desc, m_seg);
3211 desc = &tx_ring[tx_next_use];
3213 * Fill valid bits, DMA address and data length for each
3214 * Tx Buffer Descriptor.
3216 hns3_fill_per_desc(desc, m_seg);
3217 tx_bak_pkt->mbuf = m_seg;
3218 m_seg = m_seg->next;
3221 if (tx_next_use >= tx_bd_max) {
3223 tx_bak_pkt = txq->sw_ring;
3227 } while (m_seg != NULL);
3229 /* Add end flag for the last Tx Buffer Descriptor */
3230 desc->tx.tp_fe_sc_vld_ra_ri |=
3231 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
3234 txq->next_to_use = tx_next_use;
3235 txq->tx_bd_ready -= i;
3241 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
3247 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
3253 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
3254 __rte_unused struct rte_mbuf **tx_pkts,
3255 __rte_unused uint16_t nb_pkts)
3261 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3262 struct rte_eth_burst_mode *mode)
3264 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3265 const char *info = NULL;
3267 if (pkt_burst == hns3_xmit_pkts_simple)
3268 info = "Scalar Simple";
3269 else if (pkt_burst == hns3_xmit_pkts)
3271 else if (pkt_burst == hns3_xmit_pkts_vec)
3272 info = "Vector Neon";
3277 snprintf(mode->info, sizeof(mode->info), "%s", info);
3282 static eth_tx_burst_t
3283 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
3285 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
3286 struct hns3_adapter *hns = dev->data->dev_private;
3288 if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
3290 return hns3_xmit_pkts_vec;
3293 if (hns->tx_simple_allowed &&
3294 offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
3296 return hns3_xmit_pkts_simple;
3299 *prep = hns3_prep_pkts;
3300 return hns3_xmit_pkts;
3304 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
3305 struct rte_mbuf **pkts __rte_unused,
3306 uint16_t pkts_n __rte_unused)
3311 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
3313 struct hns3_adapter *hns = eth_dev->data->dev_private;
3314 eth_tx_prep_t prep = NULL;
3316 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
3317 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
3318 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
3319 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
3320 eth_dev->tx_pkt_prepare = prep;
3322 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
3323 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
3324 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
3329 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3330 struct rte_eth_rxq_info *qinfo)
3332 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
3334 qinfo->mp = rxq->mb_pool;
3335 qinfo->nb_desc = rxq->nb_rx_desc;
3336 qinfo->scattered_rx = dev->data->scattered_rx;
3337 /* Report the HW Rx buffer length to user */
3338 qinfo->rx_buf_size = rxq->rx_buf_len;
3341 * If there are no available Rx buffer descriptors, incoming packets
3342 * are always dropped by hardware based on hns3 network engine.
3344 qinfo->conf.rx_drop_en = 1;
3345 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
3346 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3347 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3351 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3352 struct rte_eth_txq_info *qinfo)
3354 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
3356 qinfo->nb_desc = txq->nb_tx_desc;
3357 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
3358 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
3359 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3360 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;