X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=c76e635ff112e0497bbbaa760475fe82c27ccf0b;hb=135155a8363d4279b4684ae4723e70bb6554b9e3;hp=dc09ea00f1573a8672221975455d5ae15744c217;hpb=7ef933908f04983ce6e8d1a930d59263b84d0485;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index dc09ea00f1..c76e635ff1 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -2,27 +2,17 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include -#include -#include -#include -#include #include -#include #include #include -#include -#include -#include #include #include #include -#include -#include #include #include -#include +#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) +#include +#endif #include "hns3_ethdev.h" #include "hns3_rxtx.h" @@ -41,9 +31,23 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) if (rxq->sw_ring == NULL) return; - for (i = 0; i < rxq->nb_rx_desc; i++) - if (rxq->sw_ring[i].mbuf) - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + if (rxq->rx_rearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } else { + for (i = rxq->next_to_use; + i != rxq->rx_rearm_start; + i = (i + 1) % rxq->nb_rx_desc) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } for (i = 0; i < rxq->bulk_mbuf_num; i++) rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]); @@ -60,7 +64,7 @@ hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq) { uint16_t i; - /* Note: Fake rx queue will not enter here */ + /* Note: Fake tx queue will not enter here */ if (txq->sw_ring) { for (i = 0; i < txq->nb_tx_desc; i++) { if (txq->sw_ring[i].mbuf) { @@ -95,6 +99,8 @@ hns3_tx_queue_release(void *queue) rte_memzone_free(txq->mz); if (txq->sw_ring) rte_free(txq->sw_ring); + if (txq->free) + rte_free(txq->free); rte_free(txq); } } @@ -250,7 +256,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(mbuf == NULL)) { - hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!", + hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!", i); hns3_rx_queue_release_mbufs(rxq); return -ENOMEM; @@ -323,26 +329,26 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) } void -hns3_update_all_queues_pvid_state(struct hns3_hw *hw) +hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw) { uint16_t nb_rx_q = hw->data->nb_rx_queues; uint16_t nb_tx_q = hw->data->nb_tx_queues; struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; - int pvid_state; + bool pvid_en; int i; - pvid_state = hw->port_base_vlan_cfg.state; + pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE; for (i = 0; i < hw->cfg_max_queues; i++) { if (i < nb_rx_q) { rxq = hw->data->rx_queues[i]; if (rxq != NULL) - rxq->pvid_state = pvid_state; + rxq->pvid_sw_discard_en = pvid_en; } if (i < nb_tx_q) { txq = hw->data->tx_queues[i]; if (txq != NULL) - txq->pvid_state = pvid_state; + txq->pvid_sw_shift_en = pvid_en; } } } @@ -355,27 +361,179 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en) struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; uint32_t rcb_reg; + void *tqp_base; int i; for (i = 0; i < hw->cfg_max_queues; i++) { - if (i < nb_rx_q) - rxq = hw->data->rx_queues[i]; - else - rxq = hw->fkq_data.rx_queues[i - nb_rx_q]; - if (i < nb_tx_q) - txq = hw->data->tx_queues[i]; - else - txq = hw->fkq_data.tx_queues[i - nb_tx_q]; - if (rxq == NULL || txq == NULL || - (en && (rxq->rx_deferred_start || txq->tx_deferred_start))) - continue; + if (hns3_dev_indep_txrx_supported(hw)) { + rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; + txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; + /* + * After initialization, rxq and txq won't be NULL at + * the same time. + */ + if (rxq != NULL) + tqp_base = rxq->io_base; + else if (txq != NULL) + tqp_base = txq->io_base; + else + return; + } else { + rxq = i < nb_rx_q ? hw->data->rx_queues[i] : + hw->fkq_data.rx_queues[i - nb_rx_q]; - rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG); + tqp_base = rxq->io_base; + } + /* + * This is the master switch that used to control the enabling + * of a pair of Tx and Rx queues. Both the Rx and Tx point to + * the same register + */ + rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG); if (en) rcb_reg |= BIT(HNS3_RING_EN_B); else rcb_reg &= ~BIT(HNS3_RING_EN_B); - hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg); + hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg); + } +} + +static void +hns3_enable_txq(struct hns3_tx_queue *txq, bool en) +{ + struct hns3_hw *hw = &txq->hns->hw; + uint32_t reg; + + if (hns3_dev_indep_txrx_supported(hw)) { + reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); + else + reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg); + } + txq->enabled = en; +} + +static void +hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en) +{ + struct hns3_hw *hw = &rxq->hns->hw; + uint32_t reg; + + if (hns3_dev_indep_txrx_supported(hw)) { + reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); + else + reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg); + } + rxq->enabled = en; +} + +int +hns3_start_all_txqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq; + uint16_t i, j; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (!txq) { + hns3_err(hw, "Tx queue %u not available or setup.", i); + goto start_txqs_fail; + } + /* + * Tx queue is enabled by default. Therefore, the Tx queues + * needs to be disabled when deferred_start is set. There is + * another master switch used to control the enabling of a pair + * of Tx and Rx queues. And the master switch is disabled by + * default. + */ + if (txq->tx_deferred_start) + hns3_enable_txq(txq, false); + else + hns3_enable_txq(txq, true); + } + return 0; + +start_txqs_fail: + for (j = 0; j < i; j++) { + txq = hw->data->tx_queues[j]; + hns3_enable_txq(txq, false); + } + return -EINVAL; +} + +int +hns3_start_all_rxqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq; + uint16_t i, j; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (!rxq) { + hns3_err(hw, "Rx queue %u not available or setup.", i); + goto start_rxqs_fail; + } + /* + * Rx queue is enabled by default. Therefore, the Rx queues + * needs to be disabled when deferred_start is set. There is + * another master switch used to control the enabling of a pair + * of Tx and Rx queues. And the master switch is disabled by + * default. + */ + if (rxq->rx_deferred_start) + hns3_enable_rxq(rxq, false); + else + hns3_enable_rxq(rxq, true); + } + return 0; + +start_rxqs_fail: + for (j = 0; j < i; j++) { + rxq = hw->data->rx_queues[j]; + hns3_enable_rxq(rxq, false); + } + return -EINVAL; +} + +void +hns3_restore_tqp_enable_state(struct hns3_hw *hw) +{ + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq != NULL) + hns3_enable_rxq(rxq, rxq->enabled); + } + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq != NULL) + hns3_enable_txq(txq, txq->enabled); + } +} + +void +hns3_stop_all_txqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (!txq) + continue; + hns3_enable_txq(txq, false); } } @@ -389,7 +547,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false); - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); req->stream_id = 0; hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0); @@ -410,18 +568,19 @@ hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable) hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false); req = (struct hns3_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); - ret = hns3_cmd_send(hw, &desc, 1); if (ret) - hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret); + hns3_err(hw, "send tqp reset cmd error, queue_id = %u, " + "ret = %d", queue_id, ret); return ret; } static int -hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id) +hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id, + uint8_t *reset_status) { struct hns3_reset_tqp_queue_cmd *req; struct hns3_cmd_desc desc; @@ -430,23 +589,24 @@ hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id) hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true); req = (struct hns3_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hns3_err(hw, "Get reset status error, ret =%d", ret); + hns3_err(hw, "get tqp reset status error, queue_id = %u, " + "ret = %d.", queue_id, ret); return ret; } - - return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + return ret; } static int -hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) +hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) { #define HNS3_TQP_RESET_TRY_MS 200 + uint8_t reset_status; uint64_t end; - int reset_status; int ret; ret = hns3_tqp_enable(hw, queue_id, false); @@ -463,21 +623,23 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret); return ret; } - ret = -ETIMEDOUT; end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS; do { /* Wait for tqp hw reset */ rte_delay_ms(HNS3_POLL_RESPONE_MS); - reset_status = hns3_get_reset_status(hw, queue_id); - if (reset_status) { - ret = 0; + ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status); + if (ret) + goto tqp_reset_fail; + + if (reset_status) break; - } } while (get_timeofday_ms() < end); - if (ret) { - hns3_err(hw, "Reset TQP fail, ret = %d", ret); - return ret; + if (!reset_status) { + ret = -ETIMEDOUT; + hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d", + queue_id, ret); + goto tqp_reset_fail; } ret = hns3_send_reset_tqp_cmd(hw, queue_id, false); @@ -485,6 +647,10 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret); return ret; + +tqp_reset_fail: + hns3_send_reset_tqp_cmd(hw, queue_id, false); + return ret; } static int @@ -500,28 +666,33 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) memcpy(msg_data, &queue_id, sizeof(uint16_t)); - return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, + ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, sizeof(msg_data), true, NULL, 0); + if (ret) + hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", + queue_id, ret); + return ret; } static int -hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id) +hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id) { struct hns3_hw *hw = &hns->hw; + if (hns->is_vf) return hns3vf_reset_tqp(hw, queue_id); else - return hns3_reset_tqp(hw, queue_id); + return hns3pf_reset_tqp(hw, queue_id); } int -hns3_reset_all_queues(struct hns3_adapter *hns) +hns3_reset_all_tqps(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; int ret, i; for (i = 0; i < hw->cfg_max_queues; i++) { - ret = hns3_reset_queue(hns, i); + ret = hns3_reset_tqp(hns, i); if (ret) { hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret); return ret; @@ -530,6 +701,121 @@ hns3_reset_all_queues(struct hns3_adapter *hns) return 0; } +static int +hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type, bool enable) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int queue_direction; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id); + queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; + req->queue_direction = rte_cpu_to_le_16(queue_direction); + hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "send queue reset cmd error, queue_id = %u, " + "queue_type = %s, ret = %d.", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); + return ret; +} + +static int +hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type, + uint8_t *reset_status) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int queue_direction; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id); + queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; + req->queue_direction = rte_cpu_to_le_16(queue_direction); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "get queue reset status error, queue_id = %u " + "queue_type = %s, ret = %d.", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); + return ret; + } + + *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + return ret; +} + +static int +hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type) +{ +#define HNS3_QUEUE_RESET_TRY_MS 200 + struct hns3_tx_queue *txq; + struct hns3_rx_queue *rxq; + uint32_t reset_wait_times; + uint32_t max_wait_times; + uint8_t reset_status; + int ret; + + if (queue_type == HNS3_RING_TYPE_TX) { + txq = hw->data->tx_queues[queue_id]; + hns3_enable_txq(txq, false); + } else { + rxq = hw->data->rx_queues[queue_id]; + hns3_enable_rxq(rxq, false); + } + + ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true); + if (ret) { + hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret); + return ret; + } + + reset_wait_times = 0; + max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS; + while (reset_wait_times < max_wait_times) { + /* Wait for queue hw reset */ + rte_delay_ms(HNS3_POLL_RESPONE_MS); + ret = hns3_get_queue_reset_status(hw, queue_id, + queue_type, &reset_status); + if (ret) + goto queue_reset_fail; + + if (reset_status) + break; + reset_wait_times++; + } + + if (!reset_status) { + hns3_err(hw, "reset queue timeout, queue_id = %u, " + "queue_type = %s", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx"); + ret = -ETIMEDOUT; + goto queue_reset_fail; + } + + ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); + if (ret) + hns3_err(hw, "deassert queue reset fail, ret = %d.", ret); + + return ret; + +queue_reset_fail: + hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); + return ret; +} + + void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, uint8_t gl_idx, uint16_t gl_value) @@ -572,7 +858,12 @@ hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) { uint32_t addr; - if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL) + /* + * int_ql_max == 0 means the hardware does not support QL, + * QL regs config is not permitted if QL is not supported, + * here just return. + */ + if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) return; addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; @@ -642,7 +933,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } static int -hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; @@ -653,22 +944,25 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx]; ret = hns3_alloc_rx_queue_mbufs(hw, rxq); if (ret) { - hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d", + hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.", idx, ret); return ret; } rxq->next_to_use = 0; + rxq->rx_rearm_start = 0; rxq->rx_free_hold = 0; + rxq->rx_rearm_nb = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; hns3_init_rx_queue_hw(rxq); + hns3_rxq_vec_setup(rxq); return 0; } static void -hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; @@ -676,13 +970,14 @@ hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx]; rxq->next_to_use = 0; rxq->rx_free_hold = 0; + rxq->rx_rearm_start = 0; + rxq->rx_rearm_nb = 0; hns3_init_rx_queue_hw(rxq); } static void -hns3_init_tx_queue(struct hns3_tx_queue *queue) +hns3_init_txq(struct hns3_tx_queue *txq) { - struct hns3_tx_queue *txq = queue; struct hns3_desc *desc; int i; @@ -699,26 +994,6 @@ hns3_init_tx_queue(struct hns3_tx_queue *queue) hns3_init_tx_queue_hw(txq); } -static void -hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) -{ - struct hns3_hw *hw = &hns->hw; - struct hns3_tx_queue *txq; - - txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx]; - hns3_init_tx_queue(txq); -} - -static void -hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) -{ - struct hns3_hw *hw = &hns->hw; - struct hns3_tx_queue *txq; - - txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx]; - hns3_init_tx_queue(txq); -} - static void hns3_init_tx_ring_tc(struct hns3_adapter *hns) { @@ -745,38 +1020,41 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns) } static int -hns3_start_rx_queues(struct hns3_adapter *hns) +hns3_init_rx_queues(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; - int i, j; + uint16_t i, j; int ret; /* Initialize RSS for queues */ ret = hns3_config_rss(hns); if (ret) { - hns3_err(hw, "Failed to configure rss %d", ret); + hns3_err(hw, "failed to configure rss, ret = %d.", ret); return ret; } for (i = 0; i < hw->data->nb_rx_queues; i++) { rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) + if (!rxq) { + hns3_err(hw, "Rx queue %u not available or setup.", i); + goto out; + } + + if (rxq->rx_deferred_start) continue; - ret = hns3_dev_rx_queue_start(hns, i); + + ret = hns3_init_rxq(hns, i); if (ret) { - hns3_err(hw, "Failed to start No.%d rx queue: %d", i, + hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i, ret); goto out; } } - for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) { - rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) - continue; - hns3_fake_rx_queue_start(hns, i); - } + for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) + hns3_init_fake_rxq(hns, i); + return 0; out: @@ -788,73 +1066,137 @@ out: return ret; } -static void -hns3_start_tx_queues(struct hns3_adapter *hns) +static int +hns3_init_tx_queues(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; struct hns3_tx_queue *txq; - int i; + uint16_t i; for (i = 0; i < hw->data->nb_tx_queues; i++) { txq = (struct hns3_tx_queue *)hw->data->tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) + if (!txq) { + hns3_err(hw, "Tx queue %u not available or setup.", i); + return -EINVAL; + } + + if (txq->tx_deferred_start) continue; - hns3_dev_tx_queue_start(hns, i); + hns3_init_txq(txq); } for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) { txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) - continue; - hns3_fake_tx_queue_start(hns, i); + hns3_init_txq(txq); } - hns3_init_tx_ring_tc(hns); + + return 0; } /* - * Start all queues. - * Note: just init and setup queues, and don't enable queue rx&tx. + * Init all queues. + * Note: just init and setup queues, and don't enable tqps. */ int -hns3_start_queues(struct hns3_adapter *hns, bool reset_queue) +hns3_init_queues(struct hns3_adapter *hns, bool reset_queue) { struct hns3_hw *hw = &hns->hw; int ret; if (reset_queue) { - ret = hns3_reset_all_queues(hns); + ret = hns3_reset_all_tqps(hns); if (ret) { - hns3_err(hw, "Failed to reset all queues %d", ret); + hns3_err(hw, "failed to reset all queues, ret = %d.", + ret); return ret; } } - ret = hns3_start_rx_queues(hns); + ret = hns3_init_rx_queues(hns); if (ret) { - hns3_err(hw, "Failed to start rx queues: %d", ret); + hns3_err(hw, "failed to init rx queues, ret = %d.", ret); return ret; } - hns3_start_tx_queues(hns); + ret = hns3_init_tx_queues(hns); + if (ret) { + hns3_dev_release_mbufs(hns); + hns3_err(hw, "failed to init tx queues, ret = %d.", ret); + } + + return ret; +} - return 0; +void +hns3_start_tqps(struct hns3_hw *hw) +{ + struct hns3_tx_queue *txq; + struct hns3_rx_queue *rxq; + uint16_t i; + + hns3_enable_all_queues(hw, true); + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq->enabled) + hw->data->tx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq->enabled) + hw->data->rx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } +} + +void +hns3_stop_tqps(struct hns3_hw *hw) +{ + uint16_t i; + + hns3_enable_all_queues(hw, false); + + for (i = 0; i < hw->data->nb_tx_queues; i++) + hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + for (i = 0; i < hw->data->nb_rx_queues; i++) + hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } +/* + * Iterate over all Rx Queue, and call the callback() function for each Rx + * queue. + * + * @param[in] dev + * The target eth dev. + * @param[in] callback + * The function to call for each queue. + * if callback function return nonzero will stop iterate and return it's value + * @param[in] arg + * The arguments to provide the callback function with. + * + * @return + * 0 on success, otherwise with errno set. + */ int -hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue) +hns3_rxq_iterate(struct rte_eth_dev *dev, + int (*callback)(struct hns3_rx_queue *, void *), void *arg) { - struct hns3_hw *hw = &hns->hw; + uint32_t i; int ret; - hns3_enable_all_queues(hw, false); - if (reset_queue) { - ret = hns3_reset_all_queues(hns); - if (ret) { - hns3_err(hw, "Failed to reset all queues %d", ret); + if (dev->data->rx_queues == NULL) + return -EINVAL; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ret = callback(dev->data->rx_queues[i], arg); + if (ret != 0) return ret; - } } + return 0; } @@ -870,7 +1212,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (rxq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d rx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u rx ring!", q_info->idx); return NULL; } @@ -878,12 +1220,18 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, /* Allocate rx ring hardware descriptors. */ rxq->queue_id = q_info->idx; rxq->nb_rx_desc = q_info->nb_desc; - rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc); + + /* + * Allocate a litter more memory because rx vector functions + * don't check boundaries each time. + */ + rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * + sizeof(struct hns3_desc); rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, rx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (rx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!", q_info->idx); hns3_rx_queue_release(rxq); return NULL; @@ -892,7 +1240,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, rxq->rx_ring_phys_addr); return rxq; @@ -920,7 +1268,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "rx_fake_ring"; rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); if (rxq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx); return -ENOMEM; } @@ -957,7 +1305,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (txq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d tx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u tx ring!", q_info->idx); return NULL; } @@ -970,7 +1318,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, tx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (tx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!", q_info->idx); hns3_tx_queue_release(txq); return NULL; @@ -979,7 +1327,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, txq->tx_ring_phys_addr); /* Clear tx bd */ @@ -1014,12 +1362,13 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "tx_fake_ring"; txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); if (txq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx); return -ENOMEM; } /* Don't need alloc sw_ring, because upper applications don't use it */ txq->sw_ring = NULL; + txq->free = NULL; txq->hns = hns; txq->tx_deferred_start = false; @@ -1041,7 +1390,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) { uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues; void **rxq; - uint8_t i; + uint16_t i; if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) { /* first time configuration */ @@ -1088,7 +1437,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) { uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues; void **txq; - uint8_t i; + uint16_t i; if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) { /* first time configuration */ @@ -1140,13 +1489,12 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, int ret; /* Setup new number of fake RX/TX queues and reconfigure device. */ - hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q; tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q; ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q); if (ret) { hns3_err(hw, "Fail to configure fake rx queues: %d", ret); - goto cfg_fake_rx_q_fail; + return ret; } ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); @@ -1179,8 +1527,6 @@ setup_fake_rx_q_fail: (void)hns3_fake_tx_queue_config(hw, 0); cfg_fake_tx_q_fail: (void)hns3_fake_rx_queue_config(hw, 0); -cfg_fake_rx_q_fail: - hw->cfg_max_queues = 0; return ret; } @@ -1196,7 +1542,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns) if (dev_data->rx_queues) for (i = 0; i < dev_data->nb_rx_queues; i++) { rxq = dev_data->rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) + if (rxq == NULL) continue; hns3_rx_queue_release_mbufs(rxq); } @@ -1204,7 +1550,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns) if (dev_data->tx_queues) for (i = 0; i < dev_data->nb_tx_queues; i++) { txq = dev_data->tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) + if (txq == NULL) continue; hns3_tx_queue_release_mbufs(txq); } @@ -1238,7 +1584,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); - if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE) return -EINVAL; @@ -1253,14 +1598,53 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) } static int -hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp, uint16_t nb_desc, - uint16_t *buf_size) +hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, + uint16_t nb_desc) { - if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || - nb_desc % HNS3_ALIGN_RING_DESC) { - hns3_err(hw, "Number (%u) of rx descriptors is invalid", - nb_desc); + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode; + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + uint16_t min_vec_bds; + + /* + * HNS3 hardware network engine set scattered as default. If the driver + * is not work in scattered mode and the pkts greater than buf_size + * but smaller than max_rx_pkt_len will be distributed to multiple BDs. + * Driver cannot handle this situation. + */ + if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) { + hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater " + "than rx_buf_len if scattered is off."); + return -EINVAL; + } + + if (pkt_burst == hns3_recv_pkts_vec) { + min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH + + HNS3_DEFAULT_RX_BURST; + if (nb_desc < min_vec_bds || + nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) { + hns3_err(hw, "if Rx burst mode is vector, " + "number of descriptor is required to be " + "bigger than min vector bds:%u, and could be " + "divided by rxq rearm thresh:%u.", + min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH); + return -EINVAL; + } + } + return 0; +} + +static int +hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp, uint16_t nb_desc, + uint16_t *buf_size) +{ + int ret; + + if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || + nb_desc % HNS3_ALIGN_RING_DESC) { + hns3_err(hw, "Number (%u) of rx descriptors is invalid", + nb_desc); return -EINVAL; } @@ -1276,9 +1660,33 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, return -EINVAL; } + if (hw->data->dev_started) { + ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc); + if (ret) { + hns3_err(hw, "Rx queue runtime setup fail."); + return ret; + } + } + return 0; } +uint32_t +hns3_get_tqp_reg_offset(uint16_t queue_id) +{ + uint32_t reg_offset; + + /* Need an extend offset to config queue > 1024 */ + if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID) + reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE; + else + reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET + + (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) * + HNS3_TQP_REG_SIZE; + + return reg_offset; +} + int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *conf, @@ -1292,11 +1700,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, int rx_entry_len; int ret; - if (dev->data->dev_started) { - hns3_err(hw, "rx_queue_setup after dev_start no supported"); - return -EINVAL; - } - ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size); if (ret) return ret; @@ -1324,9 +1727,15 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->mb_pool = mp; rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ? conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH; + rxq->rx_deferred_start = conf->rx_deferred_start; + if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + hns3_warn(hw, "deferred start is not supported."); + rxq->rx_deferred_start = false; + } - rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc; + rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * + sizeof(struct hns3_entry); rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len, RTE_CACHE_LINE_SIZE, socket_id); if (rxq->sw_ring == NULL) { @@ -1337,13 +1746,30 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->next_to_use = 0; rxq->rx_free_hold = 0; + rxq->rx_rearm_start = 0; + rxq->rx_rearm_nb = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; rxq->port_id = dev->data->port_id; - rxq->pvid_state = hw->port_base_vlan_cfg.state; + /* + * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, + * the pvid_sw_discard_en in the queue struct should not be changed, + * because PVID-related operations do not need to be processed by PMD + * driver. For hns3 VF device, whether it needs to process PVID depends + * on the configuration of PF kernel mode netdevice driver. And the + * related PF configuration is delivered through the mailbox and finally + * reflectd in port_base_vlan_cfg. + */ + if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) + rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == + HNS3_PORT_BASE_VLAN_ENABLE; + else + rxq->pvid_sw_discard_en = false; rxq->configured = true; rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + idx * HNS3_TQP_REG_SIZE); + rxq->io_base = (void *)((char *)hw->io_base + + hns3_get_tqp_reg_offset(idx)); rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; @@ -1424,36 +1850,60 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, RTE_PTYPE_UNKNOWN }; if (dev->rx_pkt_burst == hns3_recv_pkts || - dev->rx_pkt_burst == hns3_recv_scattered_pkts) + dev->rx_pkt_burst == hns3_recv_scattered_pkts || + dev->rx_pkt_burst == hns3_recv_pkts_vec || + dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) return ptypes; return NULL; } -void -hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) -{ - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_ptype_table *tbl = &hns->ptype_tbl; - - memset(tbl, 0, sizeof(*tbl)); - - tbl->l2table[0] = RTE_PTYPE_L2_ETHER; - tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ; - tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN; - tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN; - - tbl->l3table[0] = RTE_PTYPE_L3_IPV4; - tbl->l3table[1] = RTE_PTYPE_L3_IPV6; - tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP; - tbl->l3table[3] = RTE_PTYPE_L2_ETHER; - tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT; - tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT; - tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP; +static void +hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) +{ + tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER; + tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER; + + tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN; + tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN; + + tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ; + tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ; tbl->l4table[0] = RTE_PTYPE_L4_UDP; tbl->l4table[1] = RTE_PTYPE_L4_TCP; @@ -1461,37 +1911,60 @@ hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) tbl->l4table[3] = RTE_PTYPE_L4_SCTP; tbl->l4table[4] = RTE_PTYPE_L4_IGMP; tbl->l4table[5] = RTE_PTYPE_L4_ICMP; +} +static void +hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) +{ tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER; tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN; tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ; tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4; tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6; - tbl->inner_l3table[2] = 0; - tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER; + /* There is not a ptype for inner ARP/RARP */ + tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN; + tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN; tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT; tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT; tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP; tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP; - tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE; + /* There is not a ptype for inner GRE */ + tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN; tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP; - tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP; + /* There is not a ptype for inner IGMP */ + tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN; tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP; + tbl->ol2table[0] = RTE_PTYPE_L2_ETHER; + tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN; + tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ; + tbl->ol3table[0] = RTE_PTYPE_L3_IPV4; tbl->ol3table[1] = RTE_PTYPE_L3_IPV6; - tbl->ol3table[2] = 0; - tbl->ol3table[3] = 0; + tbl->ol3table[2] = RTE_PTYPE_UNKNOWN; + tbl->ol3table[3] = RTE_PTYPE_UNKNOWN; tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT; tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT; - tbl->ol4table[0] = 0; + tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN; tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; } +void +hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_ptype_table *tbl = &hns->ptype_tbl; + + memset(tbl, 0, sizeof(*tbl)); + + hns3_init_non_tunnel_ptype_tbl(tbl); + hns3_init_tunnel_ptype_tbl(tbl); +} + static inline void hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, uint32_t l234_info, const struct hns3_desc *rxd) @@ -1526,7 +1999,7 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, }; strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S); - report_mode = report_type[rxq->pvid_state][strip_status]; + report_mode = report_type[rxq->pvid_sw_discard_en][strip_status]; switch (report_mode) { case HNS3_NO_STRP_VLAN_VLD: mb->vlan_tci = 0; @@ -1539,6 +2012,9 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag); return; + default: + mb->vlan_tci = 0; + return; } } @@ -1912,6 +2388,33 @@ pkt_err: return nb_rx; } +void __rte_weak +hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq) +{ +} + +int __rte_weak +hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev) +{ + return -ENOTSUP; +} + +uint16_t __rte_weak +hns3_recv_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __rte_weak +hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) @@ -1922,6 +2425,8 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, } burst_infos[] = { { hns3_recv_pkts, "Scalar" }, { hns3_recv_scattered_pkts, "Scalar Scattered" }, + { hns3_recv_pkts_vec, "Vector Neon" }, + { hns3_recv_pkts_vec_sve, "Vector Sve" }, }; eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; @@ -1940,12 +2445,26 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, return ret; } +static bool +hns3_check_sve_support(void) +{ +#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) + return true; +#endif + return false; +} + static eth_rx_burst_t hns3_get_rx_function(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; uint64_t offloads = dev->data->dev_conf.rxmode.offloads; + if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0) + return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve : + hns3_recv_pkts_vec; + if (hns->rx_simple_allowed && !dev->data->scattered_rx && (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0) return hns3_recv_pkts; @@ -1975,8 +2494,8 @@ hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf, if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh || rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP || free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) { - hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc " - "(%d) of tx descriptors for port=%d queue=%d check " + hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc " + "(%u) of tx descriptors for port=%u queue=%u check " "fail!", rs_thresh, free_thresh, nb_desc, hw->data->port_id, idx); @@ -2009,11 +2528,6 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, int tx_entry_len; int ret; - if (dev->data->dev_started) { - hns3_err(hw, "tx_queue_setup after dev_start no supported"); - return -EINVAL; - } - ret = hns3_tx_queue_conf_check(hw, conf, nb_desc, &tx_rs_thresh, &tx_free_thresh, idx); if (ret) @@ -2037,6 +2551,11 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } txq->tx_deferred_start = conf->tx_deferred_start; + if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + hns3_warn(hw, "deferred start is not supported."); + txq->tx_deferred_start = false; + } + tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc; txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len, RTE_CACHE_LINE_SIZE, socket_id); @@ -2052,14 +2571,38 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, txq->tx_bd_ready = txq->nb_tx_desc - 1; txq->tx_free_thresh = tx_free_thresh; txq->tx_rs_thresh = tx_rs_thresh; + txq->free = rte_zmalloc_socket("hns3 TX mbuf free array", + sizeof(struct rte_mbuf *) * txq->tx_rs_thresh, + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq->free) { + hns3_err(hw, "failed to allocate tx mbuf free array!"); + hns3_tx_queue_release(txq); + return -ENOMEM; + } + txq->port_id = dev->data->port_id; - txq->pvid_state = hw->port_base_vlan_cfg.state; + /* + * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, + * the pvid_sw_shift_en in the queue struct should not be changed, + * because PVID-related operations do not need to be processed by PMD + * driver. For hns3 VF device, whether it needs to process PVID depends + * on the configuration of PF kernel mode netdev driver. And the + * related PF configuration is delivered through the mailbox and finally + * reflectd in port_base_vlan_cfg. + */ + if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) + txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == + HNS3_PORT_BASE_VLAN_ENABLE; + else + txq->pvid_sw_shift_en = false; + txq->max_non_tso_bd_num = hw->max_non_tso_bd_num; txq->configured = true; - txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + - idx * HNS3_TQP_REG_SIZE); + txq->io_base = (void *)((char *)hw->io_base + + hns3_get_tqp_reg_offset(idx)); txq->io_tail_reg = (volatile void *)((char *)txq->io_base + HNS3_RING_TX_TAIL_REG); txq->min_tx_pkt_len = hw->min_tx_pkt_len; + txq->tso_mode = hw->tso_mode; txq->over_length_pkt_cnt = 0; txq->exceed_limit_bd_pkt_cnt = 0; txq->exceed_limit_bd_reassem_fail = 0; @@ -2109,44 +2652,6 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) txq->tx_bd_ready = tx_bd_ready; } -static int -hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags, - struct rte_mbuf *rxm, uint8_t *l2_len) -{ - uint64_t tun_flags; - uint8_t ol4_len; - uint32_t otmp; - - tun_flags = ol_flags & PKT_TX_TUNNEL_MASK; - if (tun_flags == 0) - return 0; - - otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec); - switch (tun_flags) { - case PKT_TX_TUNNEL_GENEVE: - case PKT_TX_TUNNEL_VXLAN: - *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN; - break; - case PKT_TX_TUNNEL_GRE: - /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. - */ - ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S); - *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT); - break; - default: - /* For non UDP / GRE tunneling, drop the tunnel packet */ - return -EINVAL; - } - hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - rxm->outer_l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp); - - return 0; -} - int hns3_config_gro(struct hns3_hw *hw, bool en) { @@ -2191,31 +2696,15 @@ hns3_pkt_is_tso(struct rte_mbuf *m) } static void -hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags, - uint32_t paylen, struct rte_mbuf *rxm) +hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm) { - uint8_t l2_len = rxm->l2_len; - uint32_t tmp; - if (!hns3_pkt_is_tso(rxm)) return; - if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len)) - return; - if (paylen <= rxm->tso_segsz) return; - tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len); - hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1); - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp); + desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B)); desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz); } @@ -2240,7 +2729,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, rxm->outer_l2_len + rxm->outer_l3_len : 0; paylen = rxm->pkt_len - hdr_len; desc->tx.paylen = rte_cpu_to_le_32(paylen); - hns3_set_tso(desc, ol_flags, paylen, rxm); + hns3_set_tso(desc, paylen, rxm); /* * Currently, hardware doesn't support more than two layers VLAN offload @@ -2254,7 +2743,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should * be added to the position close to the IP header when PVID is enabled. */ - if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT | + if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B)); @@ -2267,44 +2756,31 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, } if (ol_flags & PKT_TX_QINQ_PKT || - ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) { + ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) { desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B)); desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci); } } -static int -hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool, - uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf) +static inline int +hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf, + struct rte_mbuf **alloc_mbuf) { - struct rte_mbuf *new_mbuf = NULL; - struct rte_eth_dev *dev; - struct rte_mbuf *temp; - struct hns3_hw *hw; +#define MAX_NON_TSO_BD_PER_PKT 18 + struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT]; uint16_t i; /* Allocate enough mbufs */ - for (i = 0; i < nb_new_buf; i++) { - temp = rte_pktmbuf_alloc(mb_pool); - if (unlikely(temp == NULL)) { - dev = &rte_eth_devices[txq->port_id]; - hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - hns3_err(hw, "Failed to alloc TX mbuf port_id=%d," - "queue_id=%d in reassemble tx pkts.", - txq->port_id, txq->queue_id); - rte_pktmbuf_free(new_mbuf); - return -ENOMEM; - } - temp->next = new_mbuf; - new_mbuf = temp; - } - - if (new_mbuf == NULL) + if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf)) return -ENOMEM; - new_mbuf->nb_segs = nb_new_buf; - *alloc_mbuf = new_mbuf; + for (i = 0; i < nb_new_buf - 1; i++) + pkt_segs[i]->next = pkt_segs[i + 1]; + + pkt_segs[nb_new_buf - 1]->next = NULL; + pkt_segs[0]->nb_segs = nb_new_buf; + *alloc_mbuf = pkt_segs[0]; return 0; } @@ -2324,10 +2800,9 @@ hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt) } static int -hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, - struct rte_mbuf **new_pkt) +hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt, + uint8_t max_non_tso_bd_num) { - struct hns3_tx_queue *txq = tx_queue; struct rte_mempool *mb_pool; struct rte_mbuf *new_mbuf; struct rte_mbuf *temp_new; @@ -2339,7 +2814,6 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, uint16_t len_s; uint16_t len_d; uint16_t len; - uint16_t i; int ret; char *s; char *d; @@ -2347,7 +2821,7 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, mb_pool = tx_pkt->pool; buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM; nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1; - if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT) + if (nb_new_buf > max_non_tso_bd_num) return -EINVAL; last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size; @@ -2355,7 +2829,7 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, last_buf_len = buf_size; /* Allocate enough mbufs */ - ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf); + ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf); if (ret) return ret; @@ -2364,12 +2838,9 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, s = rte_pktmbuf_mtod(temp, char *); len_s = rte_pktmbuf_data_len(temp); temp_new = new_mbuf; - for (i = 0; i < nb_new_buf; i++) { + while (temp != NULL && temp_new != NULL) { d = rte_pktmbuf_mtod(temp_new, char *); - if (i < nb_new_buf - 1) - buf_len = buf_size; - else - buf_len = last_buf_len; + buf_len = temp_new->next == NULL ? last_buf_len : buf_size; len_d = buf_len; while (len_d) { @@ -2403,186 +2874,220 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, } static void -hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec) +hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec) { uint32_t tmp = *ol_type_vlan_len_msec; + uint64_t ol_flags = m->ol_flags; /* (outer) IP header type */ if (ol_flags & PKT_TX_OUTER_IPV4) { - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - hns3_set_field(tmp, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); else - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); } else if (ol_flags & PKT_TX_OUTER_IPV6) { - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV6); - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } - + /* OL3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->outer_l3_len >> HNS3_L3_LEN_UNIT); *ol_type_vlan_len_msec = tmp; } static int -hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, + uint32_t *type_cs_vlan_tso_len) { - uint32_t tmp = *ol_type_vlan_len_msec; - uint8_t l4_len; - - /* OL2 header size, defined in 2 bytes */ - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); +#define HNS3_NVGRE_HLEN 8 + uint32_t tmp_outer = *ol_type_vlan_len_msec; + uint32_t tmp_inner = *type_cs_vlan_tso_len; + uint64_t ol_flags = m->ol_flags; + uint16_t inner_l2_len; - /* L4TUNT: L4 Tunneling Type */ switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_VXLAN_GPE: case PKT_TX_TUNNEL_GENEVE: case PKT_TX_TUNNEL_VXLAN: - /* MAC in UDP tunnelling packet, include VxLAN */ - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(UDP) length and tunneling length. + * The inner l2 length of mbuf is the sum of outer l4 length, + * tunneling header length and inner l2 length for a tunnel + * packect. But in hns3 tx descriptor, the tunneling header + * length is contained in the field of outer L4 length. + * Therefore, driver need to calculate the outer L4 length and + * inner L2 length. */ - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - (uint8_t)RTE_ETHER_VXLAN_HLEN >> - HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)RTE_ETHER_VXLAN_HLEN >> + HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN; break; case PKT_TX_TUNNEL_GRE: - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. + * For NVGRE tunnel packect, the outer L4 is empty. So only + * fill the NVGRE header length to the outer L4 field. */ - l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len; - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - l4_len >> HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN; break; default: /* For non UDP / GRE tunneling, drop the tunnel packet */ return -EINVAL; } - *ol_type_vlan_len_msec = tmp; + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + inner_l2_len >> HNS3_L2_LEN_UNIT); + /* OL2 header size, defined in 2 bytes */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + m->outer_l2_len >> HNS3_L2_LEN_UNIT); + + *type_cs_vlan_tso_len = tmp_inner; + *ol_type_vlan_len_msec = tmp_outer; return 0; } static int -hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; - uint32_t value = 0; + uint32_t tmp_outer = 0; + uint32_t tmp_inner = 0; int ret; - hns3_parse_outer_params(ol_flags, &value); - ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens); - if (ret) - return -EINVAL; + /* + * The tunnel header is contained in the inner L2 header field of the + * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, + * there is a need that switching between them. To avoid multiple + * calculations, the length of the L2 header include the outer and + * inner, will be filled during the parsing of tunnel packects. + */ + if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) { + /* + * For non tunnel type the tunnel type id is 0, so no need to + * assign a value to it. Only the inner(normal) L2 header length + * is assigned. + */ + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT); + } else { + /* + * If outer csum is not offload, the outer length may be filled + * with 0. And the length of the outer header is added to the + * inner l2_len. It would lead a cksum error. So driver has to + * calculate the header length. + */ + if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) && + m->outer_l2_len == 0)) { + struct rte_net_hdr_lens hdr_len; + (void)rte_net_get_ptype(m, &hdr_len, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + m->outer_l3_len = hdr_len.l3_len; + m->outer_l2_len = hdr_len.l2_len; + m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len; + } + hns3_parse_outer_params(m, &tmp_outer); + ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner); + if (ret) + return -EINVAL; + } - desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value); + desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer); + desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner); return 0; } static void -hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; + uint32_t l3_type; uint32_t tmp; + tmp = *type_cs_vlan_tso_len; + if (ol_flags & PKT_TX_IPV4) + l3_type = HNS3_L3T_IPV4; + else if (ol_flags & PKT_TX_IPV6) + l3_type = HNS3_L3T_IPV6; + else + l3_type = HNS3_L3T_NONE; + + /* inner(/normal) L3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->l3_len >> HNS3_L3_LEN_UNIT); + + tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type); + /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IPV4) { - tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV4); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); - if (ol_flags & PKT_TX_IP_CKSUM) - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - *type_cs_vlan_tso_len = tmp; - } else if (ol_flags & PKT_TX_IPV6) { - tmp = *type_cs_vlan_tso_len; - /* L3T, IPv6 don't do checksum */ - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV6); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; - } + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= BIT(HNS3_TXD_L3CS_B); + *type_cs_vlan_tso_len = tmp; } static void -hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; uint32_t tmp; - /* Enable L4 checksum offloads */ - switch (ol_flags & PKT_TX_L4_MASK) { + switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { case PKT_TX_TCP_CKSUM: + case PKT_TX_TCP_SEG: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case PKT_TX_UDP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case PKT_TX_SCTP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: - break; + return; } + tmp |= BIT(HNS3_TXD_L4CS_B); + tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + m->l4_len >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; } static void -hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags) +hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; uint32_t value = 0; - /* inner(/normal) L2 header size, defined in 2 bytes */ - hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); - - hns3_parse_l3_cksum_params(ol_flags, &value); - hns3_parse_l4_cksum_params(ol_flags, &value); + hns3_parse_l3_cksum_params(m, &value); + hns3_parse_l4_cksum_params(m, &value); desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value); } static bool -hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) +hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, + uint32_t max_non_tso_bd_num) { struct rte_mbuf *m_first = tx_pkts; struct rte_mbuf *m_last = tx_pkts; @@ -2597,10 +3102,10 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) * frags greater than gso header len + mss, and the remaining 7 * consecutive frags greater than MSS except the last 7 frags. */ - if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT) + if (bd_num <= max_non_tso_bd_num) return false; - for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1; + for (i = 0; m_last && i < max_non_tso_bd_num - 1; i++, m_last = m_last->next) tot_len += m_last->data_len; @@ -2618,7 +3123,7 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) * ensure the sum of the data length of every 7 consecutive buffer * is greater than mss except the last one. */ - for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) { + for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) { tot_len -= m_first->data_len; tot_len += m_last->data_len; @@ -2636,26 +3141,29 @@ static void hns3_outer_header_cksum_prepare(struct rte_mbuf *m) { uint64_t ol_flags = m->ol_flags; - struct rte_ipv4_hdr *ipv4_hdr; - struct rte_udp_hdr *udp_hdr; - uint32_t paylen, hdr_len; + uint32_t paylen, hdr_len, l4_proto; if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) return; - if (ol_flags & PKT_TX_IPV4) { + if (ol_flags & PKT_TX_OUTER_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr; ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->outer_l2_len); - - if (ol_flags & PKT_TX_IP_CKSUM) + l4_proto = ipv4_hdr->next_proto_id; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; + } else { + struct rte_ipv6_hdr *ipv6_hdr; + ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->outer_l2_len); + l4_proto = ipv6_hdr->proto; } - - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM && - ol_flags & PKT_TX_TCP_SEG) { + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ + if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) { + struct rte_udp_hdr *udp_hdr; hdr_len = m->l2_len + m->l3_len + m->l4_len; - hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? - m->outer_l2_len + m->outer_l3_len : 0; + hdr_len += m->outer_l2_len + m->outer_l3_len; paylen = m->pkt_len - hdr_len; if (paylen <= m->tso_segsz) return; @@ -2712,7 +3220,7 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) struct rte_ether_hdr *eh; struct rte_vlan_hdr *vh; - if (!txq->pvid_state) + if (!txq->pvid_sw_shift_en) return 0; /* @@ -2747,43 +3255,66 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) } #endif -uint16_t -hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +static int +hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) { - struct rte_mbuf *m; - uint16_t i; int ret; - for (i = 0; i < nb_pkts; i++) { - m = tx_pkts[i]; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return ret; + } - if (hns3_pkt_is_tso(m) && - (hns3_pkt_need_linearized(m, m->nb_segs) || - hns3_check_tso_pkt_valid(m))) { + ret = hns3_vld_vlan_chk(tx_queue, m); + if (ret != 0) { + rte_errno = EINVAL; + return ret; + } +#endif + if (hns3_pkt_is_tso(m)) { + if (hns3_pkt_need_linearized(m, m->nb_segs, + tx_queue->max_non_tso_bd_num) || + hns3_check_tso_pkt_valid(m)) { rte_errno = EINVAL; - return i; + return -EINVAL; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG - ret = rte_validate_tx_offload(m); - if (ret != 0) { - rte_errno = -ret; - return i; + if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) { + /* + * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means + * hardware support recalculate the TCP pseudo header + * checksum of packets that need TSO, so network driver + * software not need to recalculate it. + */ + hns3_outer_header_cksum_prepare(m); + return 0; } + } - if (hns3_vld_vlan_chk(tx_queue, m)) { - rte_errno = EINVAL; - return i; - } -#endif - ret = rte_net_intel_cksum_prepare(m); - if (ret != 0) { - rte_errno = -ret; - return i; - } + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return ret; + } + + hns3_outer_header_cksum_prepare(m); + + return 0; +} + +uint16_t +hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *m; + uint16_t i; - hns3_outer_header_cksum_prepare(m); + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + if (hns3_prep_pkt_proc(tx_queue, m)) + return i; } return i; @@ -2791,20 +3322,25 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, static int hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens) + struct rte_mbuf *m) { - /* Fill in tunneling parameters if necessary */ - if (m->ol_flags & PKT_TX_TUNNEL_MASK) { - (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK); - if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags, - hdr_lens)) { + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + + /* Enable checksum offloading */ + if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { + /* Fill in tunneling parameters if necessary */ + if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { txq->unsupported_tunnel_pkt_cnt++; - return -EINVAL; + return -EINVAL; } + + hns3_txd_enable_checksum(txq, m, tx_desc_id); + } else { + /* clear the control bit */ + desc->tx.type_cs_vlan_tso_len = 0; + desc->tx.ol_type_vlan_len_msec = 0; } - /* Enable checksum offloading */ - if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) - hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags); return 0; } @@ -2813,6 +3349,7 @@ static int hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq) { + uint8_t max_non_tso_bd_num; struct rte_mbuf *new_pkt; int ret; @@ -2828,9 +3365,11 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, return -EINVAL; } - if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) { + max_non_tso_bd_num = txq->max_non_tso_bd_num; + if (unlikely(nb_buf > max_non_tso_bd_num)) { txq->exceed_limit_bd_pkt_cnt++; - ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt); + ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, + max_non_tso_bd_num); if (ret) { txq->exceed_limit_bd_reassem_fail++; return ret; @@ -2992,7 +3531,6 @@ hns3_xmit_pkts_simple(void *tx_queue, uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_net_hdr_lens hdr_lens = {0}; struct hns3_tx_queue *txq = tx_queue; struct hns3_entry *tx_bak_pkt; struct hns3_desc *tx_ring; @@ -3056,7 +3594,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq)) goto end_of_tx; - if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens)) + if (hns3_parse_cksum(txq, tx_next_use, m_seg)) goto end_of_tx; i = 0; @@ -3105,6 +3643,28 @@ end_of_tx: return nb_tx; } +int __rte_weak +hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev) +{ + return -ENOTSUP; +} + +uint16_t __rte_weak +hns3_xmit_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __rte_weak +hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) @@ -3116,6 +3676,10 @@ hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, info = "Scalar Simple"; else if (pkt_burst == hns3_xmit_pkts) info = "Scalar"; + else if (pkt_burst == hns3_xmit_pkts_vec) + info = "Vector Neon"; + else if (pkt_burst == hns3_xmit_pkts_vec_sve) + info = "Vector Sve"; if (info == NULL) return -EINVAL; @@ -3131,6 +3695,12 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) uint64_t offloads = dev->data->dev_conf.txmode.offloads; struct hns3_adapter *hns = dev->data->dev_private; + if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) { + *prep = NULL; + return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve : + hns3_xmit_pkts_vec; + } + if (hns->tx_simple_allowed && offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) { *prep = NULL; @@ -3175,6 +3745,8 @@ hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->mp = rxq->mb_pool; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->scattered_rx = dev->data->scattered_rx; + /* Report the HW Rx buffer length to user */ + qinfo->rx_buf_size = rxq->rx_buf_len; /* * If there are no available Rx buffer descriptors, incoming packets @@ -3198,3 +3770,139 @@ hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } + +int +hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + int ret; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); + if (ret) { + hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", + rx_queue_id, ret); + return ret; + } + + ret = hns3_init_rxq(hns, rx_queue_id); + if (ret) { + hns3_err(hw, "fail to init Rx queue %u, ret = %d.", + rx_queue_id, ret); + return ret; + } + + hns3_enable_rxq(rxq, true); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +static void +hns3_reset_sw_rxq(struct hns3_rx_queue *rxq) +{ + rxq->next_to_use = 0; + rxq->rx_rearm_start = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_nb = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc)); + hns3_rxq_vec_setup(rxq); +} + +int +hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + hns3_enable_rxq(rxq, false); + + hns3_rx_queue_release_mbufs(rxq); + + hns3_reset_sw_rxq(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + int ret; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); + if (ret) { + hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", + tx_queue_id, ret); + return ret; + } + + hns3_init_txq(txq); + hns3_enable_txq(txq, true); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +int +hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + hns3_enable_txq(txq, false); + hns3_tx_queue_release_mbufs(txq); + /* + * All the mbufs in sw_ring are released and all the pointers in sw_ring + * are set to NULL. If this queue is still called by upper layer, + * residual SW status of this txq may cause these pointers in sw_ring + * which have been set to NULL to be released again. To avoid it, + * reinit the txq. + */ + hns3_init_txq(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +uint32_t +hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + /* + * Number of BDs that have been processed by the driver + * but have not been notified to the hardware. + */ + uint32_t driver_hold_bd_num; + struct hns3_rx_queue *rxq; + uint32_t fbd_num; + + rxq = dev->data->rx_queues[rx_queue_id]; + fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG); + if (dev->rx_pkt_burst == hns3_recv_pkts_vec || + dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) + driver_hold_bd_num = rxq->rx_rearm_nb; + else + driver_hold_bd_num = rxq->rx_free_hold; + + if (fbd_num <= driver_hold_bd_num) + return 0; + else + return fbd_num - driver_hold_bd_num; +}