1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <rte_bus_pci.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_vxlan.h>
19 #include <rte_ethdev_driver.h>
24 #include <rte_malloc.h>
27 #include "hns3_ethdev.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_regs.h"
30 #include "hns3_logs.h"
32 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
33 #define HNS3_RX_RING_PREFETCTH_MASK 3
36 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
40 /* Note: Fake rx queue will not enter here */
41 if (rxq->sw_ring == NULL)
44 for (i = 0; i < rxq->nb_rx_desc; i++)
45 if (rxq->sw_ring[i].mbuf)
46 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
48 for (i = 0; i < rxq->bulk_mbuf_num; i++)
49 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
50 rxq->bulk_mbuf_num = 0;
52 if (rxq->pkt_first_seg) {
53 rte_pktmbuf_free(rxq->pkt_first_seg);
54 rxq->pkt_first_seg = NULL;
59 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
63 /* Note: Fake rx queue will not enter here */
65 for (i = 0; i < txq->nb_tx_desc; i++) {
66 if (txq->sw_ring[i].mbuf) {
67 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
68 txq->sw_ring[i].mbuf = NULL;
75 hns3_rx_queue_release(void *queue)
77 struct hns3_rx_queue *rxq = queue;
79 hns3_rx_queue_release_mbufs(rxq);
81 rte_memzone_free(rxq->mz);
83 rte_free(rxq->sw_ring);
89 hns3_tx_queue_release(void *queue)
91 struct hns3_tx_queue *txq = queue;
93 hns3_tx_queue_release_mbufs(txq);
95 rte_memzone_free(txq->mz);
97 rte_free(txq->sw_ring);
105 hns3_dev_rx_queue_release(void *queue)
107 struct hns3_rx_queue *rxq = queue;
108 struct hns3_adapter *hns;
114 rte_spinlock_lock(&hns->hw.lock);
115 hns3_rx_queue_release(queue);
116 rte_spinlock_unlock(&hns->hw.lock);
120 hns3_dev_tx_queue_release(void *queue)
122 struct hns3_tx_queue *txq = queue;
123 struct hns3_adapter *hns;
129 rte_spinlock_lock(&hns->hw.lock);
130 hns3_tx_queue_release(queue);
131 rte_spinlock_unlock(&hns->hw.lock);
135 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
137 struct hns3_rx_queue *rxq = queue;
138 struct hns3_adapter *hns;
148 if (hw->fkq_data.rx_queues[idx]) {
149 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
150 hw->fkq_data.rx_queues[idx] = NULL;
153 /* free fake rx queue arrays */
154 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
155 hw->fkq_data.nb_fake_rx_queues = 0;
156 rte_free(hw->fkq_data.rx_queues);
157 hw->fkq_data.rx_queues = NULL;
162 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
164 struct hns3_tx_queue *txq = queue;
165 struct hns3_adapter *hns;
175 if (hw->fkq_data.tx_queues[idx]) {
176 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
177 hw->fkq_data.tx_queues[idx] = NULL;
180 /* free fake tx queue arrays */
181 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
182 hw->fkq_data.nb_fake_tx_queues = 0;
183 rte_free(hw->fkq_data.tx_queues);
184 hw->fkq_data.tx_queues = NULL;
189 hns3_free_rx_queues(struct rte_eth_dev *dev)
191 struct hns3_adapter *hns = dev->data->dev_private;
192 struct hns3_fake_queue_data *fkq_data;
193 struct hns3_hw *hw = &hns->hw;
197 nb_rx_q = hw->data->nb_rx_queues;
198 for (i = 0; i < nb_rx_q; i++) {
199 if (dev->data->rx_queues[i]) {
200 hns3_rx_queue_release(dev->data->rx_queues[i]);
201 dev->data->rx_queues[i] = NULL;
205 /* Free fake Rx queues */
206 fkq_data = &hw->fkq_data;
207 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
208 if (fkq_data->rx_queues[i])
209 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
214 hns3_free_tx_queues(struct rte_eth_dev *dev)
216 struct hns3_adapter *hns = dev->data->dev_private;
217 struct hns3_fake_queue_data *fkq_data;
218 struct hns3_hw *hw = &hns->hw;
222 nb_tx_q = hw->data->nb_tx_queues;
223 for (i = 0; i < nb_tx_q; i++) {
224 if (dev->data->tx_queues[i]) {
225 hns3_tx_queue_release(dev->data->tx_queues[i]);
226 dev->data->tx_queues[i] = NULL;
230 /* Free fake Tx queues */
231 fkq_data = &hw->fkq_data;
232 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
233 if (fkq_data->tx_queues[i])
234 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
239 hns3_free_all_queues(struct rte_eth_dev *dev)
241 hns3_free_rx_queues(dev);
242 hns3_free_tx_queues(dev);
246 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
248 struct rte_mbuf *mbuf;
252 for (i = 0; i < rxq->nb_rx_desc; i++) {
253 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
254 if (unlikely(mbuf == NULL)) {
255 hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
257 hns3_rx_queue_release_mbufs(rxq);
261 rte_mbuf_refcnt_set(mbuf, 1);
263 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
265 mbuf->port = rxq->port_id;
267 rxq->sw_ring[i].mbuf = mbuf;
268 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
269 rxq->rx_ring[i].addr = dma_addr;
270 rxq->rx_ring[i].rx.bd_base_info = 0;
277 hns3_buf_size2type(uint32_t buf_size)
283 bd_size_type = HNS3_BD_SIZE_512_TYPE;
286 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
289 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
292 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
299 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
301 uint32_t rx_buf_len = rxq->rx_buf_len;
302 uint64_t dma_addr = rxq->rx_ring_phys_addr;
304 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
305 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
306 (uint32_t)((dma_addr >> 31) >> 1));
308 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
309 hns3_buf_size2type(rx_buf_len));
310 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
311 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
315 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
317 uint64_t dma_addr = txq->tx_ring_phys_addr;
319 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
320 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
321 (uint32_t)((dma_addr >> 31) >> 1));
323 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
324 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
328 hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
330 uint16_t nb_rx_q = hw->data->nb_rx_queues;
331 uint16_t nb_tx_q = hw->data->nb_tx_queues;
332 struct hns3_rx_queue *rxq;
333 struct hns3_tx_queue *txq;
337 pvid_state = hw->port_base_vlan_cfg.state;
338 for (i = 0; i < hw->cfg_max_queues; i++) {
340 rxq = hw->data->rx_queues[i];
342 rxq->pvid_state = pvid_state;
345 txq = hw->data->tx_queues[i];
347 txq->pvid_state = pvid_state;
353 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
355 uint16_t nb_rx_q = hw->data->nb_rx_queues;
356 uint16_t nb_tx_q = hw->data->nb_tx_queues;
357 struct hns3_rx_queue *rxq;
358 struct hns3_tx_queue *txq;
362 for (i = 0; i < hw->cfg_max_queues; i++) {
364 rxq = hw->data->rx_queues[i];
366 rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
368 txq = hw->data->tx_queues[i];
370 txq = hw->fkq_data.tx_queues[i - nb_tx_q];
371 if (rxq == NULL || txq == NULL ||
372 (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
375 rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
377 rcb_reg |= BIT(HNS3_RING_EN_B);
379 rcb_reg &= ~BIT(HNS3_RING_EN_B);
380 hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
385 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
387 struct hns3_cfg_com_tqp_queue_cmd *req;
388 struct hns3_cmd_desc desc;
391 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
393 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
394 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
396 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
398 ret = hns3_cmd_send(hw, &desc, 1);
400 hns3_err(hw, "TQP enable fail, ret = %d", ret);
406 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
408 struct hns3_reset_tqp_queue_cmd *req;
409 struct hns3_cmd_desc desc;
412 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
414 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
415 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
416 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
418 ret = hns3_cmd_send(hw, &desc, 1);
420 hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
426 hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
428 struct hns3_reset_tqp_queue_cmd *req;
429 struct hns3_cmd_desc desc;
432 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
434 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
435 req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
437 ret = hns3_cmd_send(hw, &desc, 1);
439 hns3_err(hw, "Get reset status error, ret =%d", ret);
443 return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
447 hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
449 #define HNS3_TQP_RESET_TRY_MS 200
454 ret = hns3_tqp_enable(hw, queue_id, false);
459 * In current version VF is not supported when PF is driven by DPDK
460 * driver, all task queue pairs are mapped to PF function, so PF's queue
461 * id is equals to the global queue id in PF range.
463 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
465 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
469 end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
471 /* Wait for tqp hw reset */
472 rte_delay_ms(HNS3_POLL_RESPONE_MS);
473 reset_status = hns3_get_reset_status(hw, queue_id);
478 } while (get_timeofday_ms() < end);
481 hns3_err(hw, "Reset TQP fail, ret = %d", ret);
485 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
487 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
493 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
498 /* Disable VF's queue before send queue reset msg to PF */
499 ret = hns3_tqp_enable(hw, queue_id, false);
503 memcpy(msg_data, &queue_id, sizeof(uint16_t));
505 return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
506 sizeof(msg_data), true, NULL, 0);
510 hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
512 struct hns3_hw *hw = &hns->hw;
514 return hns3vf_reset_tqp(hw, queue_id);
516 return hns3_reset_tqp(hw, queue_id);
520 hns3_reset_all_queues(struct hns3_adapter *hns)
522 struct hns3_hw *hw = &hns->hw;
525 for (i = 0; i < hw->cfg_max_queues; i++) {
526 ret = hns3_reset_queue(hns, i);
528 hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
536 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
537 uint8_t gl_idx, uint16_t gl_value)
539 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
540 HNS3_TQP_INTR_GL1_REG,
541 HNS3_TQP_INTR_GL2_REG};
542 uint32_t addr, value;
544 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
547 addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
548 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
549 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
551 value = HNS3_GL_USEC_TO_REG(gl_value);
553 hns3_write_dev(hw, addr, value);
557 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
559 uint32_t addr, value;
561 if (rl_value > HNS3_TQP_INTR_RL_MAX)
564 addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
565 value = HNS3_RL_USEC_TO_REG(rl_value);
567 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
569 hns3_write_dev(hw, addr, value);
573 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
577 if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
580 addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
581 hns3_write_dev(hw, addr, ql_value);
583 addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
584 hns3_write_dev(hw, addr, ql_value);
588 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
590 uint32_t addr, value;
592 addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
595 hns3_write_dev(hw, addr, value);
599 * Enable all rx queue interrupt when in interrupt rx mode.
600 * This api was called before enable queue rx&tx (in normal start or reset
601 * recover scenes), used to fix hardware rx queue interrupt enable was clear
605 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
607 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
608 uint16_t nb_rx_q = hw->data->nb_rx_queues;
611 if (dev->data->dev_conf.intr_conf.rxq == 0)
614 for (i = 0; i < nb_rx_q; i++)
615 hns3_queue_intr_enable(hw, i, en);
619 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
621 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
622 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
623 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 if (dev->data->dev_conf.intr_conf.rxq == 0)
628 hns3_queue_intr_enable(hw, queue_id, true);
630 return rte_intr_ack(intr_handle);
634 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
636 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
638 if (dev->data->dev_conf.intr_conf.rxq == 0)
641 hns3_queue_intr_enable(hw, queue_id, false);
647 hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
649 struct hns3_hw *hw = &hns->hw;
650 struct hns3_rx_queue *rxq;
653 PMD_INIT_FUNC_TRACE();
655 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
656 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
658 hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
663 rxq->next_to_use = 0;
664 rxq->rx_free_hold = 0;
665 rxq->pkt_first_seg = NULL;
666 rxq->pkt_last_seg = NULL;
667 hns3_init_rx_queue_hw(rxq);
673 hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
675 struct hns3_hw *hw = &hns->hw;
676 struct hns3_rx_queue *rxq;
678 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
679 rxq->next_to_use = 0;
680 rxq->rx_free_hold = 0;
681 hns3_init_rx_queue_hw(rxq);
685 hns3_init_tx_queue(struct hns3_tx_queue *queue)
687 struct hns3_tx_queue *txq = queue;
688 struct hns3_desc *desc;
693 for (i = 0; i < txq->nb_tx_desc; i++) {
694 desc->tx.tp_fe_sc_vld_ra_ri = 0;
698 txq->next_to_use = 0;
699 txq->next_to_clean = 0;
700 txq->tx_bd_ready = txq->nb_tx_desc - 1;
701 hns3_init_tx_queue_hw(txq);
705 hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
707 struct hns3_hw *hw = &hns->hw;
708 struct hns3_tx_queue *txq;
710 txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
711 hns3_init_tx_queue(txq);
715 hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
717 struct hns3_hw *hw = &hns->hw;
718 struct hns3_tx_queue *txq;
720 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
721 hns3_init_tx_queue(txq);
725 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
727 struct hns3_hw *hw = &hns->hw;
728 struct hns3_tx_queue *txq;
731 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
732 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
735 if (!tc_queue->enable)
738 for (j = 0; j < tc_queue->tqp_count; j++) {
739 num = tc_queue->tqp_offset + j;
740 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
744 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
750 hns3_start_rx_queues(struct hns3_adapter *hns)
752 struct hns3_hw *hw = &hns->hw;
753 struct hns3_rx_queue *rxq;
757 /* Initialize RSS for queues */
758 ret = hns3_config_rss(hns);
760 hns3_err(hw, "Failed to configure rss %d", ret);
764 for (i = 0; i < hw->data->nb_rx_queues; i++) {
765 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
766 if (rxq == NULL || rxq->rx_deferred_start)
768 ret = hns3_dev_rx_queue_start(hns, i);
770 hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
776 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
777 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
778 if (rxq == NULL || rxq->rx_deferred_start)
780 hns3_fake_rx_queue_start(hns, i);
785 for (j = 0; j < i; j++) {
786 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
787 hns3_rx_queue_release_mbufs(rxq);
794 hns3_start_tx_queues(struct hns3_adapter *hns)
796 struct hns3_hw *hw = &hns->hw;
797 struct hns3_tx_queue *txq;
800 for (i = 0; i < hw->data->nb_tx_queues; i++) {
801 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
802 if (txq == NULL || txq->tx_deferred_start)
804 hns3_dev_tx_queue_start(hns, i);
807 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
808 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
809 if (txq == NULL || txq->tx_deferred_start)
811 hns3_fake_tx_queue_start(hns, i);
814 hns3_init_tx_ring_tc(hns);
819 * Note: just init and setup queues, and don't enable queue rx&tx.
822 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
824 struct hns3_hw *hw = &hns->hw;
828 ret = hns3_reset_all_queues(hns);
830 hns3_err(hw, "Failed to reset all queues %d", ret);
835 ret = hns3_start_rx_queues(hns);
837 hns3_err(hw, "Failed to start rx queues: %d", ret);
841 hns3_start_tx_queues(hns);
847 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
849 struct hns3_hw *hw = &hns->hw;
852 hns3_enable_all_queues(hw, false);
854 ret = hns3_reset_all_queues(hns);
856 hns3_err(hw, "Failed to reset all queues %d", ret);
864 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
865 struct hns3_queue_info *q_info)
867 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868 const struct rte_memzone *rx_mz;
869 struct hns3_rx_queue *rxq;
870 unsigned int rx_desc;
872 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
873 RTE_CACHE_LINE_SIZE, q_info->socket_id);
875 hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
880 /* Allocate rx ring hardware descriptors. */
881 rxq->queue_id = q_info->idx;
882 rxq->nb_rx_desc = q_info->nb_desc;
883 rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
884 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
885 rx_desc, HNS3_RING_BASE_ALIGN,
888 hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
890 hns3_rx_queue_release(rxq);
894 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
895 rxq->rx_ring_phys_addr = rx_mz->iova;
897 hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
898 rxq->rx_ring_phys_addr);
904 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
905 uint16_t nb_desc, unsigned int socket_id)
907 struct hns3_adapter *hns = dev->data->dev_private;
908 struct hns3_hw *hw = &hns->hw;
909 struct hns3_queue_info q_info;
910 struct hns3_rx_queue *rxq;
913 if (hw->fkq_data.rx_queues[idx]) {
914 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
915 hw->fkq_data.rx_queues[idx] = NULL;
919 q_info.socket_id = socket_id;
920 q_info.nb_desc = nb_desc;
921 q_info.type = "hns3 fake RX queue";
922 q_info.ring_name = "rx_fake_ring";
923 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
925 hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
929 /* Don't need alloc sw_ring, because upper applications don't use it */
933 rxq->rx_deferred_start = false;
934 rxq->port_id = dev->data->port_id;
935 rxq->configured = true;
936 nb_rx_q = dev->data->nb_rx_queues;
937 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
938 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
939 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
941 rte_spinlock_lock(&hw->lock);
942 hw->fkq_data.rx_queues[idx] = rxq;
943 rte_spinlock_unlock(&hw->lock);
949 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
950 struct hns3_queue_info *q_info)
952 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
953 const struct rte_memzone *tx_mz;
954 struct hns3_tx_queue *txq;
955 struct hns3_desc *desc;
956 unsigned int tx_desc;
959 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
960 RTE_CACHE_LINE_SIZE, q_info->socket_id);
962 hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
967 /* Allocate tx ring hardware descriptors. */
968 txq->queue_id = q_info->idx;
969 txq->nb_tx_desc = q_info->nb_desc;
970 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
971 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
972 tx_desc, HNS3_RING_BASE_ALIGN,
975 hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
977 hns3_tx_queue_release(txq);
981 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
982 txq->tx_ring_phys_addr = tx_mz->iova;
984 hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
985 txq->tx_ring_phys_addr);
989 for (i = 0; i < txq->nb_tx_desc; i++) {
990 desc->tx.tp_fe_sc_vld_ra_ri = 0;
998 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
999 uint16_t nb_desc, unsigned int socket_id)
1001 struct hns3_adapter *hns = dev->data->dev_private;
1002 struct hns3_hw *hw = &hns->hw;
1003 struct hns3_queue_info q_info;
1004 struct hns3_tx_queue *txq;
1007 if (hw->fkq_data.tx_queues[idx] != NULL) {
1008 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1009 hw->fkq_data.tx_queues[idx] = NULL;
1013 q_info.socket_id = socket_id;
1014 q_info.nb_desc = nb_desc;
1015 q_info.type = "hns3 fake TX queue";
1016 q_info.ring_name = "tx_fake_ring";
1017 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1019 hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
1023 /* Don't need alloc sw_ring, because upper applications don't use it */
1024 txq->sw_ring = NULL;
1028 txq->tx_deferred_start = false;
1029 txq->port_id = dev->data->port_id;
1030 txq->configured = true;
1031 nb_tx_q = dev->data->nb_tx_queues;
1032 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1033 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1035 rte_spinlock_lock(&hw->lock);
1036 hw->fkq_data.tx_queues[idx] = txq;
1037 rte_spinlock_unlock(&hw->lock);
1043 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1045 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1049 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1050 /* first time configuration */
1052 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1053 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1054 RTE_CACHE_LINE_SIZE);
1055 if (hw->fkq_data.rx_queues == NULL) {
1056 hw->fkq_data.nb_fake_rx_queues = 0;
1059 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1061 rxq = hw->fkq_data.rx_queues;
1062 for (i = nb_queues; i < old_nb_queues; i++)
1063 hns3_dev_rx_queue_release(rxq[i]);
1065 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1066 RTE_CACHE_LINE_SIZE);
1069 if (nb_queues > old_nb_queues) {
1070 uint16_t new_qs = nb_queues - old_nb_queues;
1071 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1074 hw->fkq_data.rx_queues = rxq;
1075 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1076 rxq = hw->fkq_data.rx_queues;
1077 for (i = nb_queues; i < old_nb_queues; i++)
1078 hns3_dev_rx_queue_release(rxq[i]);
1080 rte_free(hw->fkq_data.rx_queues);
1081 hw->fkq_data.rx_queues = NULL;
1084 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1090 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1092 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1096 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1097 /* first time configuration */
1099 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1100 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1101 RTE_CACHE_LINE_SIZE);
1102 if (hw->fkq_data.tx_queues == NULL) {
1103 hw->fkq_data.nb_fake_tx_queues = 0;
1106 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1108 txq = hw->fkq_data.tx_queues;
1109 for (i = nb_queues; i < old_nb_queues; i++)
1110 hns3_dev_tx_queue_release(txq[i]);
1111 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1112 RTE_CACHE_LINE_SIZE);
1115 if (nb_queues > old_nb_queues) {
1116 uint16_t new_qs = nb_queues - old_nb_queues;
1117 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1120 hw->fkq_data.tx_queues = txq;
1121 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1122 txq = hw->fkq_data.tx_queues;
1123 for (i = nb_queues; i < old_nb_queues; i++)
1124 hns3_dev_tx_queue_release(txq[i]);
1126 rte_free(hw->fkq_data.tx_queues);
1127 hw->fkq_data.tx_queues = NULL;
1129 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1135 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1138 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1139 uint16_t rx_need_add_nb_q;
1140 uint16_t tx_need_add_nb_q;
1145 /* Setup new number of fake RX/TX queues and reconfigure device. */
1146 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1147 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1148 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1149 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1151 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1152 goto cfg_fake_rx_q_fail;
1155 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1157 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1158 goto cfg_fake_tx_q_fail;
1161 /* Allocate and set up fake RX queue per Ethernet port. */
1162 port_id = hw->data->port_id;
1163 for (q = 0; q < rx_need_add_nb_q; q++) {
1164 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1165 rte_eth_dev_socket_id(port_id));
1167 goto setup_fake_rx_q_fail;
1170 /* Allocate and set up fake TX queue per Ethernet port. */
1171 for (q = 0; q < tx_need_add_nb_q; q++) {
1172 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1173 rte_eth_dev_socket_id(port_id));
1175 goto setup_fake_tx_q_fail;
1180 setup_fake_tx_q_fail:
1181 setup_fake_rx_q_fail:
1182 (void)hns3_fake_tx_queue_config(hw, 0);
1184 (void)hns3_fake_rx_queue_config(hw, 0);
1186 hw->cfg_max_queues = 0;
1192 hns3_dev_release_mbufs(struct hns3_adapter *hns)
1194 struct rte_eth_dev_data *dev_data = hns->hw.data;
1195 struct hns3_rx_queue *rxq;
1196 struct hns3_tx_queue *txq;
1199 if (dev_data->rx_queues)
1200 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1201 rxq = dev_data->rx_queues[i];
1202 if (rxq == NULL || rxq->rx_deferred_start)
1204 hns3_rx_queue_release_mbufs(rxq);
1207 if (dev_data->tx_queues)
1208 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1209 txq = dev_data->tx_queues[i];
1210 if (txq == NULL || txq->tx_deferred_start)
1212 hns3_tx_queue_release_mbufs(txq);
1217 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1219 uint16_t vld_buf_size;
1220 uint16_t num_hw_specs;
1224 * hns3 network engine only support to set 4 typical specification, and
1225 * different buffer size will affect the max packet_len and the max
1226 * number of segmentation when hw gro is turned on in receive side. The
1227 * relationship between them is as follows:
1228 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
1229 * ---------------------|-------------------|----------------
1230 * HNS3_4K_BD_BUF_SIZE | 60KB | 15
1231 * HNS3_2K_BD_BUF_SIZE | 62KB | 31
1232 * HNS3_1K_BD_BUF_SIZE | 63KB | 63
1233 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
1235 static const uint16_t hw_rx_buf_size[] = {
1236 HNS3_4K_BD_BUF_SIZE,
1237 HNS3_2K_BD_BUF_SIZE,
1238 HNS3_1K_BD_BUF_SIZE,
1239 HNS3_512_BD_BUF_SIZE
1242 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1243 RTE_PKTMBUF_HEADROOM);
1245 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1248 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1249 for (i = 0; i < num_hw_specs; i++) {
1250 if (vld_buf_size >= hw_rx_buf_size[i]) {
1251 *rx_buf_len = hw_rx_buf_size[i];
1259 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1260 struct rte_mempool *mp, uint16_t nb_desc,
1263 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1264 nb_desc % HNS3_ALIGN_RING_DESC) {
1265 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1270 if (conf->rx_drop_en == 0)
1271 hns3_warn(hw, "if no descriptors available, packets are always "
1272 "dropped and rx_drop_en (1) is fixed on");
1274 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1275 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1276 "minimal data room size (%u).",
1277 rte_pktmbuf_data_room_size(mp),
1278 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1286 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1287 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1288 struct rte_mempool *mp)
1290 struct hns3_adapter *hns = dev->data->dev_private;
1291 struct hns3_hw *hw = &hns->hw;
1292 struct hns3_queue_info q_info;
1293 struct hns3_rx_queue *rxq;
1294 uint16_t rx_buf_size;
1298 if (dev->data->dev_started) {
1299 hns3_err(hw, "rx_queue_setup after dev_start no supported");
1303 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1307 if (dev->data->rx_queues[idx]) {
1308 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1309 dev->data->rx_queues[idx] = NULL;
1313 q_info.socket_id = socket_id;
1314 q_info.nb_desc = nb_desc;
1315 q_info.type = "hns3 RX queue";
1316 q_info.ring_name = "rx_ring";
1318 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1321 "Failed to alloc mem and reserve DMA mem for rx ring!");
1326 rxq->ptype_tbl = &hns->ptype_tbl;
1328 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1329 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1330 rxq->rx_deferred_start = conf->rx_deferred_start;
1332 rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc;
1333 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1334 RTE_CACHE_LINE_SIZE, socket_id);
1335 if (rxq->sw_ring == NULL) {
1336 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1337 hns3_rx_queue_release(rxq);
1341 rxq->next_to_use = 0;
1342 rxq->rx_free_hold = 0;
1343 rxq->pkt_first_seg = NULL;
1344 rxq->pkt_last_seg = NULL;
1345 rxq->port_id = dev->data->port_id;
1346 rxq->pvid_state = hw->port_base_vlan_cfg.state;
1347 rxq->configured = true;
1348 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1349 idx * HNS3_TQP_REG_SIZE);
1350 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1351 HNS3_RING_RX_HEAD_REG);
1352 rxq->rx_buf_len = rx_buf_size;
1354 rxq->pkt_len_errors = 0;
1355 rxq->l3_csum_errors = 0;
1356 rxq->l4_csum_errors = 0;
1357 rxq->ol3_csum_errors = 0;
1358 rxq->ol4_csum_errors = 0;
1360 /* CRC len set here is used for amending packet length */
1361 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1362 rxq->crc_len = RTE_ETHER_CRC_LEN;
1366 rxq->bulk_mbuf_num = 0;
1368 rte_spinlock_lock(&hw->lock);
1369 dev->data->rx_queues[idx] = rxq;
1370 rte_spinlock_unlock(&hw->lock);
1376 hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1378 struct hns3_adapter *hns = dev->data->dev_private;
1379 struct hns3_hw *hw = &hns->hw;
1382 dev->data->scattered_rx = false;
1386 hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1388 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1389 struct hns3_adapter *hns = dev->data->dev_private;
1390 struct hns3_hw *hw = &hns->hw;
1391 struct hns3_rx_queue *rxq;
1394 if (dev->data->rx_queues == NULL)
1397 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1398 rxq = dev->data->rx_queues[queue_id];
1399 if (hw->rx_buf_len == 0)
1400 hw->rx_buf_len = rxq->rx_buf_len;
1402 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1406 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1407 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1408 dev->data->scattered_rx = true;
1412 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1414 static const uint32_t ptypes[] = {
1416 RTE_PTYPE_L2_ETHER_VLAN,
1417 RTE_PTYPE_L2_ETHER_QINQ,
1418 RTE_PTYPE_L2_ETHER_LLDP,
1419 RTE_PTYPE_L2_ETHER_ARP,
1421 RTE_PTYPE_L3_IPV4_EXT,
1423 RTE_PTYPE_L3_IPV6_EXT,
1429 RTE_PTYPE_TUNNEL_GRE,
1433 if (dev->rx_pkt_burst == hns3_recv_pkts ||
1434 dev->rx_pkt_burst == hns3_recv_scattered_pkts)
1441 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
1443 struct hns3_adapter *hns = dev->data->dev_private;
1444 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
1446 memset(tbl, 0, sizeof(*tbl));
1448 tbl->l2table[0] = RTE_PTYPE_L2_ETHER;
1449 tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;
1450 tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;
1451 tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;
1453 tbl->l3table[0] = RTE_PTYPE_L3_IPV4;
1454 tbl->l3table[1] = RTE_PTYPE_L3_IPV6;
1455 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
1456 tbl->l3table[3] = RTE_PTYPE_L2_ETHER;
1457 tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1458 tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1459 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
1461 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
1462 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
1463 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1464 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
1465 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
1466 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
1468 tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
1469 tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
1470 tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
1472 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
1473 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
1474 tbl->inner_l3table[2] = 0;
1475 tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;
1476 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
1477 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
1479 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
1480 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
1481 tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;
1482 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
1483 tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;
1484 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
1486 tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
1487 tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
1488 tbl->ol3table[2] = 0;
1489 tbl->ol3table[3] = 0;
1490 tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
1491 tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
1493 tbl->ol4table[0] = 0;
1494 tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
1495 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
1499 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
1500 uint32_t l234_info, const struct hns3_desc *rxd)
1502 #define HNS3_STRP_STATUS_NUM 0x4
1504 #define HNS3_NO_STRP_VLAN_VLD 0x0
1505 #define HNS3_INNER_STRP_VLAN_VLD 0x1
1506 #define HNS3_OUTER_STRP_VLAN_VLD 0x2
1507 uint32_t strip_status;
1508 uint32_t report_mode;
1511 * Since HW limitation, the vlan tag will always be inserted into RX
1512 * descriptor when strip the tag from packet, driver needs to determine
1513 * reporting which tag to mbuf according to the PVID configuration
1514 * and vlan striped status.
1516 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
1518 HNS3_NO_STRP_VLAN_VLD,
1519 HNS3_OUTER_STRP_VLAN_VLD,
1520 HNS3_INNER_STRP_VLAN_VLD,
1521 HNS3_OUTER_STRP_VLAN_VLD
1524 HNS3_NO_STRP_VLAN_VLD,
1525 HNS3_NO_STRP_VLAN_VLD,
1526 HNS3_NO_STRP_VLAN_VLD,
1527 HNS3_INNER_STRP_VLAN_VLD
1530 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
1531 HNS3_RXD_STRP_TAGP_S);
1532 report_mode = report_type[rxq->pvid_state][strip_status];
1533 switch (report_mode) {
1534 case HNS3_NO_STRP_VLAN_VLD:
1537 case HNS3_INNER_STRP_VLAN_VLD:
1538 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1539 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
1541 case HNS3_OUTER_STRP_VLAN_VLD:
1542 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1543 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
1549 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
1550 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
1553 uint8_t crc_len = rxq->crc_len;
1555 if (data_len <= crc_len) {
1556 rte_pktmbuf_free_seg(rxm);
1557 first_seg->nb_segs--;
1558 last_seg->data_len = (uint16_t)(last_seg->data_len -
1559 (crc_len - data_len));
1560 last_seg->next = NULL;
1562 rxm->data_len = (uint16_t)(data_len - crc_len);
1565 static inline struct rte_mbuf *
1566 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
1570 if (likely(rxq->bulk_mbuf_num > 0))
1571 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1573 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
1574 HNS3_BULK_ALLOC_MBUF_NUM);
1575 if (likely(ret == 0)) {
1576 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
1577 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
1579 return rte_mbuf_raw_alloc(rxq->mb_pool);
1583 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1585 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1586 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1587 struct hns3_rx_queue *rxq; /* RX queue */
1588 struct hns3_entry *sw_ring;
1589 struct hns3_entry *rxe;
1590 struct hns3_desc rxd;
1591 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1592 struct rte_mbuf *rxm;
1593 uint32_t bd_base_info;
1606 rx_ring = rxq->rx_ring;
1607 sw_ring = rxq->sw_ring;
1608 rx_id = rxq->next_to_use;
1610 while (nb_rx < nb_pkts) {
1611 rxdp = &rx_ring[rx_id];
1612 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1613 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1616 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1617 (1u << HNS3_RXD_VLD_B)];
1619 nmb = hns3_rx_alloc_buffer(rxq);
1620 if (unlikely(nmb == NULL)) {
1623 port_id = rxq->port_id;
1624 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
1629 rxe = &sw_ring[rx_id];
1631 if (unlikely(rx_id == rxq->nb_rx_desc))
1634 rte_prefetch0(sw_ring[rx_id].mbuf);
1635 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1636 rte_prefetch0(&rx_ring[rx_id]);
1637 rte_prefetch0(&sw_ring[rx_id]);
1643 dma_addr = rte_mbuf_data_iova_default(nmb);
1644 rxdp->addr = rte_cpu_to_le_64(dma_addr);
1645 rxdp->rx.bd_base_info = 0;
1647 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1648 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
1650 rxm->data_len = rxm->pkt_len;
1651 rxm->port = rxq->port_id;
1652 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1653 rxm->ol_flags = PKT_RX_RSS_HASH;
1654 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1656 rte_le_to_cpu_16(rxd.rx.fd_id);
1657 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1662 /* Load remained descriptor data and extract necessary fields */
1663 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1664 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1665 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
1666 l234_info, &cksum_err);
1670 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
1672 if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
1673 hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
1675 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
1677 rx_pkts[nb_rx++] = rxm;
1680 rte_pktmbuf_free(rxm);
1683 rxq->next_to_use = rx_id;
1684 rxq->rx_free_hold += nb_rx_bd;
1685 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1686 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1687 rxq->rx_free_hold = 0;
1694 hns3_recv_scattered_pkts(void *rx_queue,
1695 struct rte_mbuf **rx_pkts,
1698 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
1699 volatile struct hns3_desc *rxdp; /* pointer of the current desc */
1700 struct hns3_rx_queue *rxq; /* RX queue */
1701 struct hns3_entry *sw_ring;
1702 struct hns3_entry *rxe;
1703 struct rte_mbuf *first_seg;
1704 struct rte_mbuf *last_seg;
1705 struct hns3_desc rxd;
1706 struct rte_mbuf *nmb; /* pointer of the new mbuf */
1707 struct rte_mbuf *rxm;
1708 struct rte_eth_dev *dev;
1709 uint32_t bd_base_info;
1724 rx_id = rxq->next_to_use;
1725 rx_ring = rxq->rx_ring;
1726 sw_ring = rxq->sw_ring;
1727 first_seg = rxq->pkt_first_seg;
1728 last_seg = rxq->pkt_last_seg;
1730 while (nb_rx < nb_pkts) {
1731 rxdp = &rx_ring[rx_id];
1732 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
1733 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
1737 * The interactive process between software and hardware of
1738 * receiving a new packet in hns3 network engine:
1739 * 1. Hardware network engine firstly writes the packet content
1740 * to the memory pointed by the 'addr' field of the Rx Buffer
1741 * Descriptor, secondly fills the result of parsing the
1742 * packet include the valid field into the Rx Buffer
1743 * Descriptor in one write operation.
1744 * 2. Driver reads the Rx BD's valid field in the loop to check
1745 * whether it's valid, if valid then assign a new address to
1746 * the addr field, clear the valid field, get the other
1747 * information of the packet by parsing Rx BD's other fields,
1748 * finally write back the number of Rx BDs processed by the
1749 * driver to the HNS3_RING_RX_HEAD_REG register to inform
1751 * In the above process, the ordering is very important. We must
1752 * make sure that CPU read Rx BD's other fields only after the
1755 * There are two type of re-ordering: compiler re-ordering and
1756 * CPU re-ordering under the ARMv8 architecture.
1757 * 1. we use volatile to deal with compiler re-ordering, so you
1758 * can see that rx_ring/rxdp defined with volatile.
1759 * 2. we commonly use memory barrier to deal with CPU
1760 * re-ordering, but the cost is high.
1762 * In order to solve the high cost of using memory barrier, we
1763 * use the data dependency order under the ARMv8 architecture,
1766 * instr02: load B <- A
1767 * the instr02 will always execute after instr01.
1769 * To construct the data dependency ordering, we use the
1770 * following assignment:
1771 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1772 * (1u<<HNS3_RXD_VLD_B)]
1773 * Using gcc compiler under the ARMv8 architecture, the related
1774 * assembly code example as follows:
1775 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
1776 * instr01: ldr w26, [x22, #28] --read bd_base_info
1777 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10
1778 * instr03: sub w0, w0, #0x10 --calc (bd_base_info &
1780 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr
1781 * instr05: ldp x2, x3, [x0]
1782 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
1783 * instr07: ldp x4, x5, [x0, #16]
1784 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
1785 * the instr05~08 depend on x0's value, x0 depent on w26's
1786 * value, the w26 is the bd_base_info, this form the data
1787 * dependency ordering.
1788 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
1789 * (1u<<HNS3_RXD_VLD_B) will always zero, so the
1790 * assignment is correct.
1792 * So we use the data dependency ordering instead of memory
1793 * barrier to improve receive performance.
1795 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
1796 (1u << HNS3_RXD_VLD_B)];
1798 nmb = hns3_rx_alloc_buffer(rxq);
1799 if (unlikely(nmb == NULL)) {
1800 dev = &rte_eth_devices[rxq->port_id];
1801 dev->data->rx_mbuf_alloc_failed++;
1806 rxe = &sw_ring[rx_id];
1808 if (unlikely(rx_id == rxq->nb_rx_desc))
1811 rte_prefetch0(sw_ring[rx_id].mbuf);
1812 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
1813 rte_prefetch0(&rx_ring[rx_id]);
1814 rte_prefetch0(&sw_ring[rx_id]);
1820 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1821 rxdp->rx.bd_base_info = 0;
1822 rxdp->addr = dma_addr;
1824 if (first_seg == NULL) {
1826 first_seg->nb_segs = 1;
1828 first_seg->nb_segs++;
1829 last_seg->next = rxm;
1832 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1833 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
1835 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
1842 * The last buffer of the received packet. packet len from
1843 * buffer description may contains CRC len, packet len should
1844 * subtract it, same as data len.
1846 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
1849 * This is the last buffer of the received packet. If the CRC
1850 * is not stripped by the hardware:
1851 * - Subtract the CRC length from the total packet length.
1852 * - If the last buffer only contains the whole CRC or a part
1853 * of it, free the mbuf associated to the last buffer. If part
1854 * of the CRC is also contained in the previous mbuf, subtract
1855 * the length of that CRC part from the data length of the
1859 if (unlikely(rxq->crc_len > 0)) {
1860 first_seg->pkt_len -= rxq->crc_len;
1861 recalculate_data_len(first_seg, last_seg, rxm, rxq,
1865 first_seg->port = rxq->port_id;
1866 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
1867 first_seg->ol_flags = PKT_RX_RSS_HASH;
1868 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
1869 first_seg->hash.fdir.hi =
1870 rte_le_to_cpu_16(rxd.rx.fd_id);
1871 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1874 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
1875 HNS3_RXD_GRO_SIZE_S);
1876 if (gro_size != 0) {
1877 first_seg->ol_flags |= PKT_RX_LRO;
1878 first_seg->tso_segsz = gro_size;
1881 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
1882 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
1883 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
1884 l234_info, &cksum_err);
1888 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
1889 l234_info, ol_info);
1891 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
1892 hns3_rx_set_cksum_flag(first_seg,
1893 first_seg->packet_type,
1895 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
1897 rx_pkts[nb_rx++] = first_seg;
1901 rte_pktmbuf_free(first_seg);
1905 rxq->next_to_use = rx_id;
1906 rxq->pkt_first_seg = first_seg;
1907 rxq->pkt_last_seg = last_seg;
1909 rxq->rx_free_hold += nb_rx_bd;
1910 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
1911 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
1912 rxq->rx_free_hold = 0;
1919 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
1920 struct rte_eth_burst_mode *mode)
1922 static const struct {
1923 eth_rx_burst_t pkt_burst;
1926 { hns3_recv_pkts, "Scalar" },
1927 { hns3_recv_scattered_pkts, "Scalar Scattered" },
1930 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1934 for (i = 0; i < RTE_DIM(burst_infos); i++) {
1935 if (pkt_burst == burst_infos[i].pkt_burst) {
1936 snprintf(mode->info, sizeof(mode->info), "%s",
1937 burst_infos[i].info);
1946 static eth_rx_burst_t
1947 hns3_get_rx_function(struct rte_eth_dev *dev)
1949 struct hns3_adapter *hns = dev->data->dev_private;
1950 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
1952 if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
1953 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
1954 return hns3_recv_pkts;
1956 return hns3_recv_scattered_pkts;
1960 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
1961 uint16_t nb_desc, uint16_t *tx_rs_thresh,
1962 uint16_t *tx_free_thresh, uint16_t idx)
1964 #define HNS3_TX_RS_FREE_THRESH_GAP 8
1965 uint16_t rs_thresh, free_thresh, fast_free_thresh;
1967 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1968 nb_desc % HNS3_ALIGN_RING_DESC) {
1969 hns3_err(hw, "number (%u) of tx descriptors is invalid",
1974 rs_thresh = (conf->tx_rs_thresh > 0) ?
1975 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
1976 free_thresh = (conf->tx_free_thresh > 0) ?
1977 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
1978 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
1979 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
1980 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
1981 hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
1982 "(%d) of tx descriptors for port=%d queue=%d check "
1984 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
1989 if (conf->tx_free_thresh == 0) {
1990 /* Fast free Tx memory buffer to improve cache hit rate */
1991 fast_free_thresh = nb_desc - rs_thresh;
1992 if (fast_free_thresh >=
1993 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
1994 free_thresh = fast_free_thresh -
1995 HNS3_TX_FAST_FREE_AHEAD;
1998 *tx_rs_thresh = rs_thresh;
1999 *tx_free_thresh = free_thresh;
2004 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2005 unsigned int socket_id, const struct rte_eth_txconf *conf)
2007 struct hns3_adapter *hns = dev->data->dev_private;
2008 uint16_t tx_rs_thresh, tx_free_thresh;
2009 struct hns3_hw *hw = &hns->hw;
2010 struct hns3_queue_info q_info;
2011 struct hns3_tx_queue *txq;
2015 if (dev->data->dev_started) {
2016 hns3_err(hw, "tx_queue_setup after dev_start no supported");
2020 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2021 &tx_rs_thresh, &tx_free_thresh, idx);
2025 if (dev->data->tx_queues[idx] != NULL) {
2026 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2027 dev->data->tx_queues[idx] = NULL;
2031 q_info.socket_id = socket_id;
2032 q_info.nb_desc = nb_desc;
2033 q_info.type = "hns3 TX queue";
2034 q_info.ring_name = "tx_ring";
2035 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2038 "Failed to alloc mem and reserve DMA mem for tx ring!");
2042 txq->tx_deferred_start = conf->tx_deferred_start;
2043 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
2044 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
2045 RTE_CACHE_LINE_SIZE, socket_id);
2046 if (txq->sw_ring == NULL) {
2047 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
2048 hns3_tx_queue_release(txq);
2053 txq->next_to_use = 0;
2054 txq->next_to_clean = 0;
2055 txq->tx_bd_ready = txq->nb_tx_desc - 1;
2056 txq->tx_free_thresh = tx_free_thresh;
2057 txq->tx_rs_thresh = tx_rs_thresh;
2058 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
2059 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
2060 RTE_CACHE_LINE_SIZE, socket_id);
2062 hns3_err(hw, "failed to allocate tx mbuf free array!");
2063 hns3_tx_queue_release(txq);
2067 txq->port_id = dev->data->port_id;
2068 txq->pvid_state = hw->port_base_vlan_cfg.state;
2069 txq->configured = true;
2070 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
2071 idx * HNS3_TQP_REG_SIZE);
2072 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
2073 HNS3_RING_TX_TAIL_REG);
2074 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
2075 txq->over_length_pkt_cnt = 0;
2076 txq->exceed_limit_bd_pkt_cnt = 0;
2077 txq->exceed_limit_bd_reassem_fail = 0;
2078 txq->unsupported_tunnel_pkt_cnt = 0;
2079 txq->queue_full_cnt = 0;
2080 txq->pkt_padding_fail_cnt = 0;
2081 rte_spinlock_lock(&hw->lock);
2082 dev->data->tx_queues[idx] = txq;
2083 rte_spinlock_unlock(&hw->lock);
2089 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
2091 uint16_t tx_next_clean = txq->next_to_clean;
2092 uint16_t tx_next_use = txq->next_to_use;
2093 uint16_t tx_bd_ready = txq->tx_bd_ready;
2094 uint16_t tx_bd_max = txq->nb_tx_desc;
2095 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
2096 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
2097 struct rte_mbuf *mbuf;
2099 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
2100 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
2101 tx_next_use != tx_next_clean) {
2102 mbuf = tx_bak_pkt->mbuf;
2104 rte_pktmbuf_free_seg(mbuf);
2105 tx_bak_pkt->mbuf = NULL;
2113 if (tx_next_clean >= tx_bd_max) {
2115 desc = txq->tx_ring;
2116 tx_bak_pkt = txq->sw_ring;
2120 txq->next_to_clean = tx_next_clean;
2121 txq->tx_bd_ready = tx_bd_ready;
2125 hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
2126 struct rte_mbuf *rxm, uint8_t *l2_len)
2132 tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
2136 otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
2137 switch (tun_flags) {
2138 case PKT_TX_TUNNEL_GENEVE:
2139 case PKT_TX_TUNNEL_VXLAN:
2140 *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
2142 case PKT_TX_TUNNEL_GRE:
2144 * OL4 header size, defined in 4 Bytes, it contains outer
2145 * L4(GRE) length and tunneling length.
2147 ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
2149 *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
2152 /* For non UDP / GRE tunneling, drop the tunnel packet */
2155 hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2156 rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
2157 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
2163 hns3_config_gro(struct hns3_hw *hw, bool en)
2165 struct hns3_cfg_gro_status_cmd *req;
2166 struct hns3_cmd_desc desc;
2169 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
2170 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
2172 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
2174 ret = hns3_cmd_send(hw, &desc, 1);
2176 hns3_err(hw, "%s hardware GRO failed, ret = %d",
2177 en ? "enable" : "disable", ret);
2183 hns3_restore_gro_conf(struct hns3_hw *hw)
2189 offloads = hw->data->dev_conf.rxmode.offloads;
2190 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2191 ret = hns3_config_gro(hw, gro_en);
2193 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
2194 gro_en ? "enabled" : "disabled", ret);
2200 hns3_pkt_is_tso(struct rte_mbuf *m)
2202 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
2206 hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
2207 uint32_t paylen, struct rte_mbuf *rxm)
2209 uint8_t l2_len = rxm->l2_len;
2212 if (!hns3_pkt_is_tso(rxm))
2215 if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
2218 if (paylen <= rxm->tso_segsz)
2221 tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
2222 hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
2223 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2224 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
2225 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2226 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2227 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2228 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2229 l2_len >> HNS3_L2_LEN_UNIT);
2230 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
2231 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
2235 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
2237 desc->addr = rte_mbuf_data_iova(rxm);
2238 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
2239 desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
2243 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
2244 struct rte_mbuf *rxm)
2246 uint64_t ol_flags = rxm->ol_flags;
2250 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
2251 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2252 rxm->outer_l2_len + rxm->outer_l3_len : 0;
2253 paylen = rxm->pkt_len - hdr_len;
2254 desc->tx.paylen = rte_cpu_to_le_32(paylen);
2255 hns3_set_tso(desc, ol_flags, paylen, rxm);
2258 * Currently, hardware doesn't support more than two layers VLAN offload
2259 * in Tx direction based on hns3 network engine. So when the number of
2260 * VLANs in the packets represented by rxm plus the number of VLAN
2261 * offload by hardware such as PVID etc, exceeds two, the packets will
2262 * be discarded or the original VLAN of the packets will be overwitted
2263 * by hardware. When the PF PVID is enabled by calling the API function
2264 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
2265 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
2266 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
2267 * be added to the position close to the IP header when PVID is enabled.
2269 if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
2271 desc->tx.ol_type_vlan_len_msec |=
2272 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
2273 if (ol_flags & PKT_TX_QINQ_PKT)
2274 desc->tx.outer_vlan_tag =
2275 rte_cpu_to_le_16(rxm->vlan_tci_outer);
2277 desc->tx.outer_vlan_tag =
2278 rte_cpu_to_le_16(rxm->vlan_tci);
2281 if (ol_flags & PKT_TX_QINQ_PKT ||
2282 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
2283 desc->tx.type_cs_vlan_tso_len |=
2284 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
2285 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
2290 hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
2291 uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
2293 struct rte_mbuf *new_mbuf = NULL;
2294 struct rte_eth_dev *dev;
2295 struct rte_mbuf *temp;
2299 /* Allocate enough mbufs */
2300 for (i = 0; i < nb_new_buf; i++) {
2301 temp = rte_pktmbuf_alloc(mb_pool);
2302 if (unlikely(temp == NULL)) {
2303 dev = &rte_eth_devices[txq->port_id];
2304 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305 hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
2306 "queue_id=%d in reassemble tx pkts.",
2307 txq->port_id, txq->queue_id);
2308 rte_pktmbuf_free(new_mbuf);
2311 temp->next = new_mbuf;
2315 if (new_mbuf == NULL)
2318 new_mbuf->nb_segs = nb_new_buf;
2319 *alloc_mbuf = new_mbuf;
2325 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
2327 new_pkt->ol_flags = old_pkt->ol_flags;
2328 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
2329 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
2330 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
2331 new_pkt->l2_len = old_pkt->l2_len;
2332 new_pkt->l3_len = old_pkt->l3_len;
2333 new_pkt->l4_len = old_pkt->l4_len;
2334 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
2335 new_pkt->vlan_tci = old_pkt->vlan_tci;
2339 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
2340 struct rte_mbuf **new_pkt)
2342 struct hns3_tx_queue *txq = tx_queue;
2343 struct rte_mempool *mb_pool;
2344 struct rte_mbuf *new_mbuf;
2345 struct rte_mbuf *temp_new;
2346 struct rte_mbuf *temp;
2347 uint16_t last_buf_len;
2348 uint16_t nb_new_buf;
2359 mb_pool = tx_pkt->pool;
2360 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
2361 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
2362 if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
2365 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
2366 if (last_buf_len == 0)
2367 last_buf_len = buf_size;
2369 /* Allocate enough mbufs */
2370 ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
2374 /* Copy the original packet content to the new mbufs */
2376 s = rte_pktmbuf_mtod(temp, char *);
2377 len_s = rte_pktmbuf_data_len(temp);
2378 temp_new = new_mbuf;
2379 for (i = 0; i < nb_new_buf; i++) {
2380 d = rte_pktmbuf_mtod(temp_new, char *);
2381 if (i < nb_new_buf - 1)
2384 buf_len = last_buf_len;
2388 len = RTE_MIN(len_s, len_d);
2392 len_d = len_d - len;
2393 len_s = len_s - len;
2399 s = rte_pktmbuf_mtod(temp, char *);
2400 len_s = rte_pktmbuf_data_len(temp);
2404 temp_new->data_len = buf_len;
2405 temp_new = temp_new->next;
2407 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
2409 /* free original mbufs */
2410 rte_pktmbuf_free(tx_pkt);
2412 *new_pkt = new_mbuf;
2418 hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
2420 uint32_t tmp = *ol_type_vlan_len_msec;
2422 /* (outer) IP header type */
2423 if (ol_flags & PKT_TX_OUTER_IPV4) {
2424 /* OL3 header size, defined in 4 bytes */
2425 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2426 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2427 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2428 hns3_set_field(tmp, HNS3_TXD_OL3T_M,
2429 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
2431 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2432 HNS3_OL3T_IPV4_NO_CSUM);
2433 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
2434 hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
2436 /* OL3 header size, defined in 4 bytes */
2437 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2438 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2441 *ol_type_vlan_len_msec = tmp;
2445 hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
2446 struct rte_net_hdr_lens *hdr_lens)
2448 uint32_t tmp = *ol_type_vlan_len_msec;
2451 /* OL2 header size, defined in 2 bytes */
2452 hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2453 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2455 /* L4TUNT: L4 Tunneling Type */
2456 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2457 case PKT_TX_TUNNEL_GENEVE:
2458 case PKT_TX_TUNNEL_VXLAN:
2459 /* MAC in UDP tunnelling packet, include VxLAN */
2460 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2461 HNS3_TUN_MAC_IN_UDP);
2463 * OL4 header size, defined in 4 Bytes, it contains outer
2464 * L4(UDP) length and tunneling length.
2466 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2467 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
2470 case PKT_TX_TUNNEL_GRE:
2471 hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
2474 * OL4 header size, defined in 4 Bytes, it contains outer
2475 * L4(GRE) length and tunneling length.
2477 l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
2478 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2479 l4_len >> HNS3_L4_LEN_UNIT);
2482 /* For non UDP / GRE tunneling, drop the tunnel packet */
2486 *ol_type_vlan_len_msec = tmp;
2492 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2494 struct rte_net_hdr_lens *hdr_lens)
2496 struct hns3_desc *tx_ring = txq->tx_ring;
2497 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2501 hns3_parse_outer_params(ol_flags, &value);
2502 ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
2506 desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
2512 hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2516 /* Enable L3 checksum offloads */
2517 if (ol_flags & PKT_TX_IPV4) {
2518 tmp = *type_cs_vlan_tso_len;
2519 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2521 /* inner(/normal) L3 header size, defined in 4 bytes */
2522 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2523 sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
2524 if (ol_flags & PKT_TX_IP_CKSUM)
2525 hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
2526 *type_cs_vlan_tso_len = tmp;
2527 } else if (ol_flags & PKT_TX_IPV6) {
2528 tmp = *type_cs_vlan_tso_len;
2529 /* L3T, IPv6 don't do checksum */
2530 hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
2532 /* inner(/normal) L3 header size, defined in 4 bytes */
2533 hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
2534 sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
2535 *type_cs_vlan_tso_len = tmp;
2540 hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
2544 /* Enable L4 checksum offloads */
2545 switch (ol_flags & PKT_TX_L4_MASK) {
2546 case PKT_TX_TCP_CKSUM:
2547 tmp = *type_cs_vlan_tso_len;
2548 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2550 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2551 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2552 sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
2553 *type_cs_vlan_tso_len = tmp;
2555 case PKT_TX_UDP_CKSUM:
2556 tmp = *type_cs_vlan_tso_len;
2557 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2559 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2560 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2561 sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
2562 *type_cs_vlan_tso_len = tmp;
2564 case PKT_TX_SCTP_CKSUM:
2565 tmp = *type_cs_vlan_tso_len;
2566 hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
2568 hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
2569 hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
2570 sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
2571 *type_cs_vlan_tso_len = tmp;
2579 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2582 struct hns3_desc *tx_ring = txq->tx_ring;
2583 struct hns3_desc *desc = &tx_ring[tx_desc_id];
2586 /* inner(/normal) L2 header size, defined in 2 bytes */
2587 hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
2588 sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
2590 hns3_parse_l3_cksum_params(ol_flags, &value);
2591 hns3_parse_l4_cksum_params(ol_flags, &value);
2593 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
2597 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
2599 struct rte_mbuf *m_first = tx_pkts;
2600 struct rte_mbuf *m_last = tx_pkts;
2601 uint32_t tot_len = 0;
2606 * Hardware requires that the sum of the data length of every 8
2607 * consecutive buffers is greater than MSS in hns3 network engine.
2608 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
2609 * frags greater than gso header len + mss, and the remaining 7
2610 * consecutive frags greater than MSS except the last 7 frags.
2612 if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
2615 for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
2616 i++, m_last = m_last->next)
2617 tot_len += m_last->data_len;
2622 /* ensure the first 8 frags is greater than mss + header */
2623 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
2624 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
2625 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
2626 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
2630 * ensure the sum of the data length of every 7 consecutive buffer
2631 * is greater than mss except the last one.
2633 for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
2634 tot_len -= m_first->data_len;
2635 tot_len += m_last->data_len;
2637 if (tot_len < tx_pkts->tso_segsz)
2640 m_first = m_first->next;
2641 m_last = m_last->next;
2648 hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
2650 uint64_t ol_flags = m->ol_flags;
2651 struct rte_ipv4_hdr *ipv4_hdr;
2652 struct rte_udp_hdr *udp_hdr;
2653 uint32_t paylen, hdr_len;
2655 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
2658 if (ol_flags & PKT_TX_IPV4) {
2659 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2662 if (ol_flags & PKT_TX_IP_CKSUM)
2663 ipv4_hdr->hdr_checksum = 0;
2666 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
2667 ol_flags & PKT_TX_TCP_SEG) {
2668 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2669 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
2670 m->outer_l2_len + m->outer_l3_len : 0;
2671 paylen = m->pkt_len - hdr_len;
2672 if (paylen <= m->tso_segsz)
2674 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
2677 udp_hdr->dgram_cksum = 0;
2682 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
2684 uint32_t tmp_data_len_sum = 0;
2685 uint16_t nb_buf = m->nb_segs;
2686 uint32_t paylen, hdr_len;
2687 struct rte_mbuf *m_seg;
2690 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
2693 hdr_len = m->l2_len + m->l3_len + m->l4_len;
2694 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
2695 m->outer_l2_len + m->outer_l3_len : 0;
2696 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
2699 paylen = m->pkt_len - hdr_len;
2700 if (paylen > HNS3_MAX_BD_PAYLEN)
2704 * The TSO header (include outer and inner L2, L3 and L4 header)
2705 * should be provided by three descriptors in maximum in hns3 network
2709 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
2710 i++, m_seg = m_seg->next) {
2711 tmp_data_len_sum += m_seg->data_len;
2714 if (hdr_len > tmp_data_len_sum)
2720 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2722 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
2724 struct rte_ether_hdr *eh;
2725 struct rte_vlan_hdr *vh;
2727 if (!txq->pvid_state)
2731 * Due to hardware limitations, we only support two-layer VLAN hardware
2732 * offload in Tx direction based on hns3 network engine, so when PVID is
2733 * enabled, QinQ insert is no longer supported.
2734 * And when PVID is enabled, in the following two cases:
2735 * i) packets with more than two VLAN tags.
2736 * ii) packets with one VLAN tag while the hardware VLAN insert is
2738 * The packets will be regarded as abnormal packets and discarded by
2739 * hardware in Tx direction. For debugging purposes, a validation check
2740 * for these types of packets is added to the '.tx_pkt_prepare' ops
2741 * implementation function named hns3_prep_pkts to inform users that
2742 * these packets will be discarded.
2744 if (m->ol_flags & PKT_TX_QINQ_PKT)
2747 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2748 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
2749 if (m->ol_flags & PKT_TX_VLAN_PKT)
2752 /* Ensure the incoming packet is not a QinQ packet */
2753 vh = (struct rte_vlan_hdr *)(eh + 1);
2754 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
2763 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2770 for (i = 0; i < nb_pkts; i++) {
2773 if (hns3_pkt_is_tso(m) &&
2774 (hns3_pkt_need_linearized(m, m->nb_segs) ||
2775 hns3_check_tso_pkt_valid(m))) {
2780 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2781 ret = rte_validate_tx_offload(m);
2787 if (hns3_vld_vlan_chk(tx_queue, m)) {
2792 ret = rte_net_intel_cksum_prepare(m);
2798 hns3_outer_header_cksum_prepare(m);
2805 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
2806 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
2808 /* Fill in tunneling parameters if necessary */
2809 if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
2810 (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
2811 if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
2813 txq->unsupported_tunnel_pkt_cnt++;
2817 /* Enable checksum offloading */
2818 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
2819 hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
2825 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
2826 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
2828 struct rte_mbuf *new_pkt;
2831 if (hns3_pkt_is_tso(*m_seg))
2835 * If packet length is greater than HNS3_MAX_FRAME_LEN
2836 * driver support, the packet will be ignored.
2838 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
2839 txq->over_length_pkt_cnt++;
2843 if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
2844 txq->exceed_limit_bd_pkt_cnt++;
2845 ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
2847 txq->exceed_limit_bd_reassem_fail++;
2857 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
2859 struct hns3_entry *tx_entry;
2860 struct hns3_desc *desc;
2861 uint16_t tx_next_clean;
2865 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
2869 * All mbufs can be released only when the VLD bits of all
2870 * descriptors in a batch are cleared.
2872 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
2874 desc = &txq->tx_ring[tx_next_clean];
2875 for (i = 0; i < txq->tx_rs_thresh; i++) {
2876 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
2877 BIT(HNS3_TXD_VLD_B))
2882 tx_entry = &txq->sw_ring[txq->next_to_clean];
2884 for (i = 0; i < txq->tx_rs_thresh; i++)
2885 rte_prefetch0((tx_entry + i)->mbuf);
2886 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
2887 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
2888 tx_entry->mbuf = NULL;
2891 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
2892 txq->tx_bd_ready += txq->tx_rs_thresh;
2897 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
2899 tx_entry->mbuf = pkts[0];
2903 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
2905 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
2906 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
2907 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
2908 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
2912 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
2914 #define PER_LOOP_NUM 4
2915 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
2919 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
2920 dma_addr = rte_mbuf_data_iova(*pkts);
2921 txdp->addr = rte_cpu_to_le_64(dma_addr);
2922 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
2923 txdp->tx.paylen = 0;
2924 txdp->tx.type_cs_vlan_tso_len = 0;
2925 txdp->tx.ol_type_vlan_len_msec = 0;
2926 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
2931 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
2933 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
2936 dma_addr = rte_mbuf_data_iova(*pkts);
2937 txdp->addr = rte_cpu_to_le_64(dma_addr);
2938 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
2939 txdp->tx.paylen = 0;
2940 txdp->tx.type_cs_vlan_tso_len = 0;
2941 txdp->tx.ol_type_vlan_len_msec = 0;
2942 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
2946 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
2947 struct rte_mbuf **pkts,
2950 #define PER_LOOP_NUM 4
2951 #define PER_LOOP_MASK (PER_LOOP_NUM - 1)
2952 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
2953 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
2954 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
2955 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
2958 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
2959 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
2960 hns3_tx_setup_4bd(txdp + i, pkts + i);
2962 if (unlikely(leftover > 0)) {
2963 for (i = 0; i < leftover; i++) {
2964 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
2965 pkts + mainpart + i);
2966 hns3_tx_setup_1bd(txdp + mainpart + i,
2967 pkts + mainpart + i);
2973 hns3_xmit_pkts_simple(void *tx_queue,
2974 struct rte_mbuf **tx_pkts,
2977 struct hns3_tx_queue *txq = tx_queue;
2980 hns3_tx_free_buffer_simple(txq);
2982 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
2983 if (unlikely(nb_pkts == 0)) {
2984 if (txq->tx_bd_ready == 0)
2985 txq->queue_full_cnt++;
2989 txq->tx_bd_ready -= nb_pkts;
2990 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
2991 nb_tx = txq->nb_tx_desc - txq->next_to_use;
2992 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
2993 txq->next_to_use = 0;
2996 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
2997 txq->next_to_use += nb_pkts - nb_tx;
2999 hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
3005 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3007 struct rte_net_hdr_lens hdr_lens = {0};
3008 struct hns3_tx_queue *txq = tx_queue;
3009 struct hns3_entry *tx_bak_pkt;
3010 struct hns3_desc *tx_ring;
3011 struct rte_mbuf *tx_pkt;
3012 struct rte_mbuf *m_seg;
3013 struct hns3_desc *desc;
3014 uint32_t nb_hold = 0;
3015 uint16_t tx_next_use;
3016 uint16_t tx_pkt_num;
3022 /* free useless buffer */
3023 hns3_tx_free_useless_buffer(txq);
3025 tx_next_use = txq->next_to_use;
3026 tx_bd_max = txq->nb_tx_desc;
3027 tx_pkt_num = nb_pkts;
3028 tx_ring = txq->tx_ring;
3031 tx_bak_pkt = &txq->sw_ring[tx_next_use];
3032 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
3033 tx_pkt = *tx_pkts++;
3035 nb_buf = tx_pkt->nb_segs;
3037 if (nb_buf > txq->tx_bd_ready) {
3038 txq->queue_full_cnt++;
3046 * If packet length is less than minimum packet length supported
3047 * by hardware in Tx direction, driver need to pad it to avoid
3050 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
3051 txq->min_tx_pkt_len)) {
3055 add_len = txq->min_tx_pkt_len -
3056 rte_pktmbuf_pkt_len(tx_pkt);
3057 appended = rte_pktmbuf_append(tx_pkt, add_len);
3058 if (appended == NULL) {
3059 txq->pkt_padding_fail_cnt++;
3063 memset(appended, 0, add_len);
3068 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
3071 if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
3075 desc = &tx_ring[tx_next_use];
3078 * If the packet is divided into multiple Tx Buffer Descriptors,
3079 * only need to fill vlan, paylen and tso into the first Tx
3080 * Buffer Descriptor.
3082 hns3_fill_first_desc(txq, desc, m_seg);
3085 desc = &tx_ring[tx_next_use];
3087 * Fill valid bits, DMA address and data length for each
3088 * Tx Buffer Descriptor.
3090 hns3_fill_per_desc(desc, m_seg);
3091 tx_bak_pkt->mbuf = m_seg;
3092 m_seg = m_seg->next;
3095 if (tx_next_use >= tx_bd_max) {
3097 tx_bak_pkt = txq->sw_ring;
3101 } while (m_seg != NULL);
3103 /* Add end flag for the last Tx Buffer Descriptor */
3104 desc->tx.tp_fe_sc_vld_ra_ri |=
3105 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
3108 txq->next_to_use = tx_next_use;
3109 txq->tx_bd_ready -= i;
3115 hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
3121 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
3127 hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
3128 __rte_unused struct rte_mbuf **tx_pkts,
3129 __rte_unused uint16_t nb_pkts)
3135 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3136 struct rte_eth_burst_mode *mode)
3138 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3139 const char *info = NULL;
3141 if (pkt_burst == hns3_xmit_pkts_simple)
3142 info = "Scalar Simple";
3143 else if (pkt_burst == hns3_xmit_pkts)
3145 else if (pkt_burst == hns3_xmit_pkts_vec)
3146 info = "Vector Neon";
3151 snprintf(mode->info, sizeof(mode->info), "%s", info);
3156 static eth_tx_burst_t
3157 hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
3159 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
3160 struct hns3_adapter *hns = dev->data->dev_private;
3162 if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
3164 return hns3_xmit_pkts_vec;
3167 if (hns->tx_simple_allowed &&
3168 offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
3170 return hns3_xmit_pkts_simple;
3173 *prep = hns3_prep_pkts;
3174 return hns3_xmit_pkts;
3178 hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
3179 struct rte_mbuf **pkts __rte_unused,
3180 uint16_t pkts_n __rte_unused)
3185 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
3187 struct hns3_adapter *hns = eth_dev->data->dev_private;
3188 eth_tx_prep_t prep = NULL;
3190 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
3191 rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
3192 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
3193 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
3194 eth_dev->tx_pkt_prepare = prep;
3196 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
3197 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
3198 eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
3203 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3204 struct rte_eth_rxq_info *qinfo)
3206 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
3208 qinfo->mp = rxq->mb_pool;
3209 qinfo->nb_desc = rxq->nb_rx_desc;
3210 qinfo->scattered_rx = dev->data->scattered_rx;
3213 * If there are no available Rx buffer descriptors, incoming packets
3214 * are always dropped by hardware based on hns3 network engine.
3216 qinfo->conf.rx_drop_en = 1;
3217 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
3218 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3219 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3223 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3224 struct rte_eth_txq_info *qinfo)
3226 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
3228 qinfo->nb_desc = txq->nb_tx_desc;
3229 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
3230 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
3231 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3232 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;