X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=c76e635ff112e0497bbbaa760475fe82c27ccf0b;hb=135155a8363d4279b4684ae4723e70bb6554b9e3;hp=fc1a256f3d9c2d423743f665b4ed3db82a68f094;hpb=091a0f95b5cd7f1159a6b23ac8fda97b53088b06;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index fc1a256f3d..c76e635ff1 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -2,27 +2,17 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include -#include -#include -#include -#include #include -#include #include #include -#include -#include -#include #include #include #include -#include -#include #include #include -#include +#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) +#include +#endif #include "hns3_ethdev.h" #include "hns3_rxtx.h" @@ -30,7 +20,7 @@ #include "hns3_logs.h" #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1) -#define DEFAULT_RX_FREE_THRESH 32 +#define HNS3_RX_RING_PREFETCTH_MASK 3 static void hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) @@ -38,14 +28,35 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) uint16_t i; /* Note: Fake rx queue will not enter here */ - if (rxq->sw_ring) { + if (rxq->sw_ring == NULL) + return; + + if (rxq->rx_rearm_nb == 0) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_ring[i].mbuf) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } else { + for (i = rxq->next_to_use; + i != rxq->rx_rearm_start; + i = (i + 1) % rxq->nb_rx_desc) { + if (rxq->sw_ring[i].mbuf != NULL) { rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } } + + for (i = 0; i < rxq->bulk_mbuf_num; i++) + rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]); + rxq->bulk_mbuf_num = 0; + + if (rxq->pkt_first_seg) { + rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + } } static void @@ -53,7 +64,7 @@ hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq) { uint16_t i; - /* Note: Fake rx queue will not enter here */ + /* Note: Fake tx queue will not enter here */ if (txq->sw_ring) { for (i = 0; i < txq->nb_tx_desc; i++) { if (txq->sw_ring[i].mbuf) { @@ -88,6 +99,8 @@ hns3_tx_queue_release(void *queue) rte_memzone_free(txq->mz); if (txq->sw_ring) rte_free(txq->sw_ring); + if (txq->free) + rte_free(txq->free); rte_free(txq); } } @@ -243,7 +256,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(mbuf == NULL)) { - hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!", + hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!", i); hns3_rx_queue_release_mbufs(rxq); return -ENOMEM; @@ -316,26 +329,26 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) } void -hns3_update_all_queues_pvid_state(struct hns3_hw *hw) +hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw) { uint16_t nb_rx_q = hw->data->nb_rx_queues; uint16_t nb_tx_q = hw->data->nb_tx_queues; struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; - int pvid_state; + bool pvid_en; int i; - pvid_state = hw->port_base_vlan_cfg.state; + pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE; for (i = 0; i < hw->cfg_max_queues; i++) { if (i < nb_rx_q) { rxq = hw->data->rx_queues[i]; if (rxq != NULL) - rxq->pvid_state = pvid_state; + rxq->pvid_sw_discard_en = pvid_en; } if (i < nb_tx_q) { txq = hw->data->tx_queues[i]; if (txq != NULL) - txq->pvid_state = pvid_state; + txq->pvid_sw_shift_en = pvid_en; } } } @@ -348,27 +361,179 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en) struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; uint32_t rcb_reg; + void *tqp_base; int i; for (i = 0; i < hw->cfg_max_queues; i++) { - if (i < nb_rx_q) - rxq = hw->data->rx_queues[i]; - else - rxq = hw->fkq_data.rx_queues[i - nb_rx_q]; - if (i < nb_tx_q) - txq = hw->data->tx_queues[i]; - else - txq = hw->fkq_data.tx_queues[i - nb_tx_q]; - if (rxq == NULL || txq == NULL || - (en && (rxq->rx_deferred_start || txq->tx_deferred_start))) - continue; + if (hns3_dev_indep_txrx_supported(hw)) { + rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; + txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; + /* + * After initialization, rxq and txq won't be NULL at + * the same time. + */ + if (rxq != NULL) + tqp_base = rxq->io_base; + else if (txq != NULL) + tqp_base = txq->io_base; + else + return; + } else { + rxq = i < nb_rx_q ? hw->data->rx_queues[i] : + hw->fkq_data.rx_queues[i - nb_rx_q]; - rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG); + tqp_base = rxq->io_base; + } + /* + * This is the master switch that used to control the enabling + * of a pair of Tx and Rx queues. Both the Rx and Tx point to + * the same register + */ + rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG); if (en) rcb_reg |= BIT(HNS3_RING_EN_B); else rcb_reg &= ~BIT(HNS3_RING_EN_B); - hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg); + hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg); + } +} + +static void +hns3_enable_txq(struct hns3_tx_queue *txq, bool en) +{ + struct hns3_hw *hw = &txq->hns->hw; + uint32_t reg; + + if (hns3_dev_indep_txrx_supported(hw)) { + reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); + else + reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg); + } + txq->enabled = en; +} + +static void +hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en) +{ + struct hns3_hw *hw = &rxq->hns->hw; + uint32_t reg; + + if (hns3_dev_indep_txrx_supported(hw)) { + reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); + else + reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg); + } + rxq->enabled = en; +} + +int +hns3_start_all_txqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq; + uint16_t i, j; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (!txq) { + hns3_err(hw, "Tx queue %u not available or setup.", i); + goto start_txqs_fail; + } + /* + * Tx queue is enabled by default. Therefore, the Tx queues + * needs to be disabled when deferred_start is set. There is + * another master switch used to control the enabling of a pair + * of Tx and Rx queues. And the master switch is disabled by + * default. + */ + if (txq->tx_deferred_start) + hns3_enable_txq(txq, false); + else + hns3_enable_txq(txq, true); + } + return 0; + +start_txqs_fail: + for (j = 0; j < i; j++) { + txq = hw->data->tx_queues[j]; + hns3_enable_txq(txq, false); + } + return -EINVAL; +} + +int +hns3_start_all_rxqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq; + uint16_t i, j; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (!rxq) { + hns3_err(hw, "Rx queue %u not available or setup.", i); + goto start_rxqs_fail; + } + /* + * Rx queue is enabled by default. Therefore, the Rx queues + * needs to be disabled when deferred_start is set. There is + * another master switch used to control the enabling of a pair + * of Tx and Rx queues. And the master switch is disabled by + * default. + */ + if (rxq->rx_deferred_start) + hns3_enable_rxq(rxq, false); + else + hns3_enable_rxq(rxq, true); + } + return 0; + +start_rxqs_fail: + for (j = 0; j < i; j++) { + rxq = hw->data->rx_queues[j]; + hns3_enable_rxq(rxq, false); + } + return -EINVAL; +} + +void +hns3_restore_tqp_enable_state(struct hns3_hw *hw) +{ + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq != NULL) + hns3_enable_rxq(rxq, rxq->enabled); + } + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq != NULL) + hns3_enable_txq(txq, txq->enabled); + } +} + +void +hns3_stop_all_txqs(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (!txq) + continue; + hns3_enable_txq(txq, false); } } @@ -382,7 +547,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false); - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); req->stream_id = 0; hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0); @@ -403,18 +568,19 @@ hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable) hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false); req = (struct hns3_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); - ret = hns3_cmd_send(hw, &desc, 1); if (ret) - hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret); + hns3_err(hw, "send tqp reset cmd error, queue_id = %u, " + "ret = %d", queue_id, ret); return ret; } static int -hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id) +hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id, + uint8_t *reset_status) { struct hns3_reset_tqp_queue_cmd *req; struct hns3_cmd_desc desc; @@ -423,23 +589,24 @@ hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id) hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true); req = (struct hns3_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->tqp_id = rte_cpu_to_le_16(queue_id); ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hns3_err(hw, "Get reset status error, ret =%d", ret); + hns3_err(hw, "get tqp reset status error, queue_id = %u, " + "ret = %d.", queue_id, ret); return ret; } - - return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + return ret; } static int -hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) +hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) { #define HNS3_TQP_RESET_TRY_MS 200 + uint8_t reset_status; uint64_t end; - int reset_status; int ret; ret = hns3_tqp_enable(hw, queue_id, false); @@ -456,21 +623,23 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret); return ret; } - ret = -ETIMEDOUT; end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS; do { /* Wait for tqp hw reset */ rte_delay_ms(HNS3_POLL_RESPONE_MS); - reset_status = hns3_get_reset_status(hw, queue_id); - if (reset_status) { - ret = 0; + ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status); + if (ret) + goto tqp_reset_fail; + + if (reset_status) break; - } } while (get_timeofday_ms() < end); - if (ret) { - hns3_err(hw, "Reset TQP fail, ret = %d", ret); - return ret; + if (!reset_status) { + ret = -ETIMEDOUT; + hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d", + queue_id, ret); + goto tqp_reset_fail; } ret = hns3_send_reset_tqp_cmd(hw, queue_id, false); @@ -478,6 +647,10 @@ hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret); return ret; + +tqp_reset_fail: + hns3_send_reset_tqp_cmd(hw, queue_id, false); + return ret; } static int @@ -493,28 +666,33 @@ hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) memcpy(msg_data, &queue_id, sizeof(uint16_t)); - return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, + ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, sizeof(msg_data), true, NULL, 0); + if (ret) + hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", + queue_id, ret); + return ret; } static int -hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id) +hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id) { struct hns3_hw *hw = &hns->hw; + if (hns->is_vf) return hns3vf_reset_tqp(hw, queue_id); else - return hns3_reset_tqp(hw, queue_id); + return hns3pf_reset_tqp(hw, queue_id); } int -hns3_reset_all_queues(struct hns3_adapter *hns) +hns3_reset_all_tqps(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; int ret, i; for (i = 0; i < hw->cfg_max_queues; i++) { - ret = hns3_reset_queue(hns, i); + ret = hns3_reset_tqp(hns, i); if (ret) { hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret); return ret; @@ -523,6 +701,121 @@ hns3_reset_all_queues(struct hns3_adapter *hns) return 0; } +static int +hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type, bool enable) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int queue_direction; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id); + queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; + req->queue_direction = rte_cpu_to_le_16(queue_direction); + hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "send queue reset cmd error, queue_id = %u, " + "queue_type = %s, ret = %d.", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); + return ret; +} + +static int +hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type, + uint8_t *reset_status) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int queue_direction; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id); + queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; + req->queue_direction = rte_cpu_to_le_16(queue_direction); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "get queue reset status error, queue_id = %u " + "queue_type = %s, ret = %d.", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); + return ret; + } + + *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); + return ret; +} + +static int +hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id, + enum hns3_ring_type queue_type) +{ +#define HNS3_QUEUE_RESET_TRY_MS 200 + struct hns3_tx_queue *txq; + struct hns3_rx_queue *rxq; + uint32_t reset_wait_times; + uint32_t max_wait_times; + uint8_t reset_status; + int ret; + + if (queue_type == HNS3_RING_TYPE_TX) { + txq = hw->data->tx_queues[queue_id]; + hns3_enable_txq(txq, false); + } else { + rxq = hw->data->rx_queues[queue_id]; + hns3_enable_rxq(rxq, false); + } + + ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true); + if (ret) { + hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret); + return ret; + } + + reset_wait_times = 0; + max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS; + while (reset_wait_times < max_wait_times) { + /* Wait for queue hw reset */ + rte_delay_ms(HNS3_POLL_RESPONE_MS); + ret = hns3_get_queue_reset_status(hw, queue_id, + queue_type, &reset_status); + if (ret) + goto queue_reset_fail; + + if (reset_status) + break; + reset_wait_times++; + } + + if (!reset_status) { + hns3_err(hw, "reset queue timeout, queue_id = %u, " + "queue_type = %s", queue_id, + queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx"); + ret = -ETIMEDOUT; + goto queue_reset_fail; + } + + ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); + if (ret) + hns3_err(hw, "deassert queue reset fail, ret = %d.", ret); + + return ret; + +queue_reset_fail: + hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); + return ret; +} + + void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, uint8_t gl_idx, uint16_t gl_value) @@ -536,7 +829,10 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, return; addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; - value = HNS3_GL_USEC_TO_REG(gl_value); + if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) + value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; + else + value = HNS3_GL_USEC_TO_REG(gl_value); hns3_write_dev(hw, addr, value); } @@ -557,6 +853,26 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) hns3_write_dev(hw, addr, value); } +void +hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) +{ + uint32_t addr; + + /* + * int_ql_max == 0 means the hardware does not support QL, + * QL regs config is not permitted if QL is not supported, + * here just return. + */ + if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) + return; + + addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + hns3_write_dev(hw, addr, ql_value); + + addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + hns3_write_dev(hw, addr, ql_value); +} + static void hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) { @@ -617,7 +933,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } static int -hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; @@ -628,36 +944,40 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx]; ret = hns3_alloc_rx_queue_mbufs(hw, rxq); if (ret) { - hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d", + hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.", idx, ret); return ret; } rxq->next_to_use = 0; - rxq->next_to_clean = 0; - rxq->nb_rx_hold = 0; + rxq->rx_rearm_start = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_nb = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; hns3_init_rx_queue_hw(rxq); + hns3_rxq_vec_setup(rxq); return 0; } static void -hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx]; rxq->next_to_use = 0; - rxq->next_to_clean = 0; - rxq->nb_rx_hold = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_start = 0; + rxq->rx_rearm_nb = 0; hns3_init_rx_queue_hw(rxq); } static void -hns3_init_tx_queue(struct hns3_tx_queue *queue) +hns3_init_txq(struct hns3_tx_queue *txq) { - struct hns3_tx_queue *txq = queue; struct hns3_desc *desc; int i; @@ -674,26 +994,6 @@ hns3_init_tx_queue(struct hns3_tx_queue *queue) hns3_init_tx_queue_hw(txq); } -static void -hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) -{ - struct hns3_hw *hw = &hns->hw; - struct hns3_tx_queue *txq; - - txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx]; - hns3_init_tx_queue(txq); -} - -static void -hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) -{ - struct hns3_hw *hw = &hns->hw; - struct hns3_tx_queue *txq; - - txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx]; - hns3_init_tx_queue(txq); -} - static void hns3_init_tx_ring_tc(struct hns3_adapter *hns) { @@ -720,38 +1020,41 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns) } static int -hns3_start_rx_queues(struct hns3_adapter *hns) +hns3_init_rx_queues(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; struct hns3_rx_queue *rxq; - int i, j; + uint16_t i, j; int ret; /* Initialize RSS for queues */ ret = hns3_config_rss(hns); if (ret) { - hns3_err(hw, "Failed to configure rss %d", ret); + hns3_err(hw, "failed to configure rss, ret = %d.", ret); return ret; } for (i = 0; i < hw->data->nb_rx_queues; i++) { rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) + if (!rxq) { + hns3_err(hw, "Rx queue %u not available or setup.", i); + goto out; + } + + if (rxq->rx_deferred_start) continue; - ret = hns3_dev_rx_queue_start(hns, i); + + ret = hns3_init_rxq(hns, i); if (ret) { - hns3_err(hw, "Failed to start No.%d rx queue: %d", i, + hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i, ret); goto out; } } - for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) { - rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) - continue; - hns3_fake_rx_queue_start(hns, i); - } + for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) + hns3_init_fake_rxq(hns, i); + return 0; out: @@ -763,73 +1066,137 @@ out: return ret; } -static void -hns3_start_tx_queues(struct hns3_adapter *hns) +static int +hns3_init_tx_queues(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; struct hns3_tx_queue *txq; - int i; + uint16_t i; for (i = 0; i < hw->data->nb_tx_queues; i++) { txq = (struct hns3_tx_queue *)hw->data->tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) + if (!txq) { + hns3_err(hw, "Tx queue %u not available or setup.", i); + return -EINVAL; + } + + if (txq->tx_deferred_start) continue; - hns3_dev_tx_queue_start(hns, i); + hns3_init_txq(txq); } for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) { txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) - continue; - hns3_fake_tx_queue_start(hns, i); + hns3_init_txq(txq); } - hns3_init_tx_ring_tc(hns); + + return 0; } /* - * Start all queues. - * Note: just init and setup queues, and don't enable queue rx&tx. + * Init all queues. + * Note: just init and setup queues, and don't enable tqps. */ int -hns3_start_queues(struct hns3_adapter *hns, bool reset_queue) +hns3_init_queues(struct hns3_adapter *hns, bool reset_queue) { struct hns3_hw *hw = &hns->hw; int ret; if (reset_queue) { - ret = hns3_reset_all_queues(hns); + ret = hns3_reset_all_tqps(hns); if (ret) { - hns3_err(hw, "Failed to reset all queues %d", ret); + hns3_err(hw, "failed to reset all queues, ret = %d.", + ret); return ret; } } - ret = hns3_start_rx_queues(hns); + ret = hns3_init_rx_queues(hns); if (ret) { - hns3_err(hw, "Failed to start rx queues: %d", ret); + hns3_err(hw, "failed to init rx queues, ret = %d.", ret); return ret; } - hns3_start_tx_queues(hns); + ret = hns3_init_tx_queues(hns); + if (ret) { + hns3_dev_release_mbufs(hns); + hns3_err(hw, "failed to init tx queues, ret = %d.", ret); + } - return 0; + return ret; +} + +void +hns3_start_tqps(struct hns3_hw *hw) +{ + struct hns3_tx_queue *txq; + struct hns3_rx_queue *rxq; + uint16_t i; + + hns3_enable_all_queues(hw, true); + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq->enabled) + hw->data->tx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq->enabled) + hw->data->rx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } +} + +void +hns3_stop_tqps(struct hns3_hw *hw) +{ + uint16_t i; + + hns3_enable_all_queues(hw, false); + + for (i = 0; i < hw->data->nb_tx_queues; i++) + hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + for (i = 0; i < hw->data->nb_rx_queues; i++) + hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } +/* + * Iterate over all Rx Queue, and call the callback() function for each Rx + * queue. + * + * @param[in] dev + * The target eth dev. + * @param[in] callback + * The function to call for each queue. + * if callback function return nonzero will stop iterate and return it's value + * @param[in] arg + * The arguments to provide the callback function with. + * + * @return + * 0 on success, otherwise with errno set. + */ int -hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue) +hns3_rxq_iterate(struct rte_eth_dev *dev, + int (*callback)(struct hns3_rx_queue *, void *), void *arg) { - struct hns3_hw *hw = &hns->hw; + uint32_t i; int ret; - hns3_enable_all_queues(hw, false); - if (reset_queue) { - ret = hns3_reset_all_queues(hns); - if (ret) { - hns3_err(hw, "Failed to reset all queues %d", ret); + if (dev->data->rx_queues == NULL) + return -EINVAL; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ret = callback(dev->data->rx_queues[i], arg); + if (ret != 0) return ret; - } } + return 0; } @@ -845,7 +1212,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (rxq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d rx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u rx ring!", q_info->idx); return NULL; } @@ -853,12 +1220,18 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, /* Allocate rx ring hardware descriptors. */ rxq->queue_id = q_info->idx; rxq->nb_rx_desc = q_info->nb_desc; - rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc); - rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, - rx_desc, HNS3_RING_BASE_ALIGN, + + /* + * Allocate a litter more memory because rx vector functions + * don't check boundaries each time. + */ + rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * + sizeof(struct hns3_desc); + rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, + rx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (rx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!", q_info->idx); hns3_rx_queue_release(rxq); return NULL; @@ -867,7 +1240,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, rxq->rx_ring_phys_addr); return rxq; @@ -895,7 +1268,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "rx_fake_ring"; rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); if (rxq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx); return -ENOMEM; } @@ -932,7 +1305,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (txq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d tx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u tx ring!", q_info->idx); return NULL; } @@ -945,7 +1318,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, tx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (tx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!", q_info->idx); hns3_tx_queue_release(txq); return NULL; @@ -954,7 +1327,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, txq->tx_ring_phys_addr); /* Clear tx bd */ @@ -989,12 +1362,13 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "tx_fake_ring"; txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); if (txq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx); return -ENOMEM; } /* Don't need alloc sw_ring, because upper applications don't use it */ txq->sw_ring = NULL; + txq->free = NULL; txq->hns = hns; txq->tx_deferred_start = false; @@ -1016,7 +1390,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) { uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues; void **rxq; - uint8_t i; + uint16_t i; if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) { /* first time configuration */ @@ -1063,7 +1437,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) { uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues; void **txq; - uint8_t i; + uint16_t i; if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) { /* first time configuration */ @@ -1115,13 +1489,12 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, int ret; /* Setup new number of fake RX/TX queues and reconfigure device. */ - hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q; tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q; ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q); if (ret) { hns3_err(hw, "Fail to configure fake rx queues: %d", ret); - goto cfg_fake_rx_q_fail; + return ret; } ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); @@ -1154,8 +1527,6 @@ setup_fake_rx_q_fail: (void)hns3_fake_tx_queue_config(hw, 0); cfg_fake_tx_q_fail: (void)hns3_fake_rx_queue_config(hw, 0); -cfg_fake_rx_q_fail: - hw->cfg_max_queues = 0; return ret; } @@ -1171,7 +1542,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns) if (dev_data->rx_queues) for (i = 0; i < dev_data->nb_rx_queues; i++) { rxq = dev_data->rx_queues[i]; - if (rxq == NULL || rxq->rx_deferred_start) + if (rxq == NULL) continue; hns3_rx_queue_release_mbufs(rxq); } @@ -1179,7 +1550,7 @@ hns3_dev_release_mbufs(struct hns3_adapter *hns) if (dev_data->tx_queues) for (i = 0; i < dev_data->nb_tx_queues; i++) { txq = dev_data->tx_queues[i]; - if (txq == NULL || txq->tx_deferred_start) + if (txq == NULL) continue; hns3_tx_queue_release_mbufs(txq); } @@ -1213,7 +1584,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); - if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE) return -EINVAL; @@ -1227,23 +1597,50 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) return 0; } -int -hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, - unsigned int socket_id, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +static int +hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, + uint16_t nb_desc) { - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; - struct hns3_queue_info q_info; - struct hns3_rx_queue *rxq; - uint16_t rx_buf_size; - int rx_entry_len; + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode; + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + uint16_t min_vec_bds; - if (dev->data->dev_started) { - hns3_err(hw, "rx_queue_setup after dev_start no supported"); + /* + * HNS3 hardware network engine set scattered as default. If the driver + * is not work in scattered mode and the pkts greater than buf_size + * but smaller than max_rx_pkt_len will be distributed to multiple BDs. + * Driver cannot handle this situation. + */ + if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) { + hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater " + "than rx_buf_len if scattered is off."); return -EINVAL; } + if (pkt_burst == hns3_recv_pkts_vec) { + min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH + + HNS3_DEFAULT_RX_BURST; + if (nb_desc < min_vec_bds || + nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) { + hns3_err(hw, "if Rx burst mode is vector, " + "number of descriptor is required to be " + "bigger than min vector bds:%u, and could be " + "divided by rxq rearm thresh:%u.", + min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH); + return -EINVAL; + } + } + return 0; +} + +static int +hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp, uint16_t nb_desc, + uint16_t *buf_size) +{ + int ret; + if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || nb_desc % HNS3_ALIGN_RING_DESC) { hns3_err(hw, "Number (%u) of rx descriptors is invalid", @@ -1252,10 +1649,60 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } if (conf->rx_drop_en == 0) - hns3_warn(hw, "if there are no available Rx descriptors," - "incoming packets are always dropped. input parameter" - " conf->rx_drop_en(%u) is uneffective.", - conf->rx_drop_en); + hns3_warn(hw, "if no descriptors available, packets are always " + "dropped and rx_drop_en (1) is fixed on"); + + if (hns3_rx_buf_len_calc(mp, buf_size)) { + hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! " + "minimal data room size (%u).", + rte_pktmbuf_data_room_size(mp), + HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM); + return -EINVAL; + } + + if (hw->data->dev_started) { + ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc); + if (ret) { + hns3_err(hw, "Rx queue runtime setup fail."); + return ret; + } + } + + return 0; +} + +uint32_t +hns3_get_tqp_reg_offset(uint16_t queue_id) +{ + uint32_t reg_offset; + + /* Need an extend offset to config queue > 1024 */ + if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID) + reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE; + else + reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET + + (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) * + HNS3_TQP_REG_SIZE; + + return reg_offset; +} + +int +hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + unsigned int socket_id, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_queue_info q_info; + struct hns3_rx_queue *rxq; + uint16_t rx_buf_size; + int rx_entry_len; + int ret; + + ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size); + if (ret) + return ret; if (dev->data->rx_queues[idx]) { hns3_rx_queue_release(dev->data->rx_queues[idx]); @@ -1268,14 +1715,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, q_info.type = "hns3 RX queue"; q_info.ring_name = "rx_ring"; - if (hns3_rx_buf_len_calc(mp, &rx_buf_size)) { - hns3_err(hw, "rxq mbufs' data room size:%u is not enough! " - "minimal data room size:%u.", - rte_pktmbuf_data_room_size(mp), - HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM); - return -EINVAL; - } - rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); if (rxq == NULL) { hns3_err(hw, @@ -1284,14 +1723,19 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } rxq->hns = hns; + rxq->ptype_tbl = &hns->ptype_tbl; rxq->mb_pool = mp; - if (conf->rx_free_thresh <= 0) - rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH; - else - rxq->rx_free_thresh = conf->rx_free_thresh; + rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ? + conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH; + rxq->rx_deferred_start = conf->rx_deferred_start; + if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + hns3_warn(hw, "deferred start is not supported."); + rxq->rx_deferred_start = false; + } - rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc; + rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * + sizeof(struct hns3_entry); rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len, RTE_CACHE_LINE_SIZE, socket_id); if (rxq->sw_ring == NULL) { @@ -1301,22 +1745,40 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } rxq->next_to_use = 0; - rxq->next_to_clean = 0; - rxq->nb_rx_hold = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_start = 0; + rxq->rx_rearm_nb = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; rxq->port_id = dev->data->port_id; - rxq->pvid_state = hw->port_base_vlan_cfg.state; + /* + * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, + * the pvid_sw_discard_en in the queue struct should not be changed, + * because PVID-related operations do not need to be processed by PMD + * driver. For hns3 VF device, whether it needs to process PVID depends + * on the configuration of PF kernel mode netdevice driver. And the + * related PF configuration is delivered through the mailbox and finally + * reflectd in port_base_vlan_cfg. + */ + if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) + rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == + HNS3_PORT_BASE_VLAN_ENABLE; + else + rxq->pvid_sw_discard_en = false; rxq->configured = true; rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + idx * HNS3_TQP_REG_SIZE); + rxq->io_base = (void *)((char *)hw->io_base + + hns3_get_tqp_reg_offset(idx)); + rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; rxq->l2_errors = 0; rxq->pkt_len_errors = 0; - rxq->l3_csum_erros = 0; - rxq->l4_csum_erros = 0; - rxq->ol3_csum_erros = 0; - rxq->ol4_csum_erros = 0; + rxq->l3_csum_errors = 0; + rxq->l4_csum_errors = 0; + rxq->ol3_csum_errors = 0; + rxq->ol4_csum_errors = 0; /* CRC len set here is used for amending packet length */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) @@ -1324,6 +1786,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, else rxq->crc_len = 0; + rxq->bulk_mbuf_num = 0; + rte_spinlock_lock(&hw->lock); dev->data->rx_queues[idx] = rxq; rte_spinlock_unlock(&hw->lock); @@ -1331,104 +1795,40 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return 0; } -static inline uint32_t -rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info) +void +hns3_rx_scattered_reset(struct rte_eth_dev *dev) { -#define HNS3_L2TBL_NUM 4 -#define HNS3_L3TBL_NUM 16 -#define HNS3_L4TBL_NUM 16 -#define HNS3_OL3TBL_NUM 16 -#define HNS3_OL4TBL_NUM 16 - uint32_t pkt_type = 0; - uint32_t l2id, l3id, l4id; - uint32_t ol3id, ol4id; - - static const uint32_t l2table[HNS3_L2TBL_NUM] = { - RTE_PTYPE_L2_ETHER, - RTE_PTYPE_L2_ETHER_QINQ, - RTE_PTYPE_L2_ETHER_VLAN, - RTE_PTYPE_L2_ETHER_VLAN - }; - - static const uint32_t l3table[HNS3_L3TBL_NUM] = { - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_L2_ETHER_ARP, - RTE_PTYPE_L2_ETHER, - RTE_PTYPE_L3_IPV4_EXT, - RTE_PTYPE_L3_IPV6_EXT, - RTE_PTYPE_L2_ETHER_LLDP, - 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - - static const uint32_t l4table[HNS3_L4TBL_NUM] = { - RTE_PTYPE_L4_UDP, - RTE_PTYPE_L4_TCP, - RTE_PTYPE_TUNNEL_GRE, - RTE_PTYPE_L4_SCTP, - RTE_PTYPE_L4_IGMP, - RTE_PTYPE_L4_ICMP, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - - static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = { - RTE_PTYPE_INNER_L2_ETHER, - RTE_PTYPE_INNER_L2_ETHER_VLAN, - RTE_PTYPE_INNER_L2_ETHER_QINQ, - 0 - }; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; - static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = { - RTE_PTYPE_INNER_L3_IPV4, - RTE_PTYPE_INNER_L3_IPV6, - 0, - RTE_PTYPE_INNER_L2_ETHER, - RTE_PTYPE_INNER_L3_IPV4_EXT, - RTE_PTYPE_INNER_L3_IPV6_EXT, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; + hw->rx_buf_len = 0; + dev->data->scattered_rx = false; +} - static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = { - RTE_PTYPE_INNER_L4_UDP, - RTE_PTYPE_INNER_L4_TCP, - RTE_PTYPE_TUNNEL_GRE, - RTE_PTYPE_INNER_L4_SCTP, - RTE_PTYPE_L4_IGMP, - RTE_PTYPE_INNER_L4_ICMP, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; +void +hns3_rx_scattered_calc(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_queue *rxq; + uint32_t queue_id; - static const uint32_t ol3table[HNS3_OL3TBL_NUM] = { - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, - 0, 0, - RTE_PTYPE_L3_IPV4_EXT, - RTE_PTYPE_L3_IPV6_EXT, - 0, 0, 0, 0, 0, 0, 0, 0, 0, - RTE_PTYPE_UNKNOWN - }; + if (dev->data->rx_queues == NULL) + return; - static const uint32_t ol4table[HNS3_OL4TBL_NUM] = { - 0, - RTE_PTYPE_TUNNEL_VXLAN, - RTE_PTYPE_TUNNEL_NVGRE, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { + rxq = dev->data->rx_queues[queue_id]; + if (hw->rx_buf_len == 0) + hw->rx_buf_len = rxq->rx_buf_len; + else + hw->rx_buf_len = RTE_MIN(hw->rx_buf_len, + rxq->rx_buf_len); + } - l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S); - l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); - l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); - ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); - ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); - - if (ol4table[ol4id]) - pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] | - inner_l4table[l4id] | ol3table[ol3id] | - ol4table[ol4id]); - else - pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]); - return pkt_type; + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER || + dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len) + dev->data->scattered_rx = true; } const uint32_t * @@ -1450,94 +1850,119 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, RTE_PTYPE_UNKNOWN }; - if (dev->rx_pkt_burst == hns3_recv_pkts) + if (dev->rx_pkt_burst == hns3_recv_pkts || + dev->rx_pkt_burst == hns3_recv_scattered_pkts || + dev->rx_pkt_burst == hns3_recv_pkts_vec || + dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) return ptypes; return NULL; } static void -hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count) -{ - rxq->next_to_use += count; - if (rxq->next_to_use >= rxq->nb_rx_desc) - rxq->next_to_use -= rxq->nb_rx_desc; +hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) +{ + tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER; + tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER; + + tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN; + tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN; + + tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4; + tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6; + tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP; + tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ; + tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT; + tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT; + tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP; + tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ; + + tbl->l4table[0] = RTE_PTYPE_L4_UDP; + tbl->l4table[1] = RTE_PTYPE_L4_TCP; + tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE; + tbl->l4table[3] = RTE_PTYPE_L4_SCTP; + tbl->l4table[4] = RTE_PTYPE_L4_IGMP; + tbl->l4table[5] = RTE_PTYPE_L4_ICMP; +} - hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count); +static void +hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) +{ + tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER; + tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN; + tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ; + + tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4; + tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6; + /* There is not a ptype for inner ARP/RARP */ + tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN; + tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN; + tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT; + tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT; + + tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP; + tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP; + /* There is not a ptype for inner GRE */ + tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN; + tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP; + /* There is not a ptype for inner IGMP */ + tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN; + tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP; + + tbl->ol2table[0] = RTE_PTYPE_L2_ETHER; + tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN; + tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ; + + tbl->ol3table[0] = RTE_PTYPE_L3_IPV4; + tbl->ol3table[1] = RTE_PTYPE_L3_IPV6; + tbl->ol3table[2] = RTE_PTYPE_UNKNOWN; + tbl->ol3table[3] = RTE_PTYPE_UNKNOWN; + tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT; + tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT; + + tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; + tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN; + tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; } -static int -hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, - uint32_t bd_base_info, uint32_t l234_info, - uint32_t *cksum_err) +void +hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) { - uint32_t tmp = 0; - - if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) { - rxq->l2_errors++; - return -EINVAL; - } - - if (unlikely(rxm->pkt_len == 0 || - (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) { - rxq->pkt_len_errors++; - return -EINVAL; - } - - if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) { - if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { - rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; - rxq->l3_csum_erros++; - tmp |= HNS3_L3_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { - rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; - rxq->l4_csum_erros++; - tmp |= HNS3_L4_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { - rxq->ol3_csum_erros++; - tmp |= HNS3_OUTER_L3_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { - rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; - rxq->ol4_csum_erros++; - tmp |= HNS3_OUTER_L4_CKSUM_ERR; - } - } - *cksum_err = tmp; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_ptype_table *tbl = &hns->ptype_tbl; - return 0; -} + memset(tbl, 0, sizeof(*tbl)); -static void -hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type, - const uint32_t cksum_err) -{ - if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) { - if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) && - (cksum_err & HNS3_L3_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) && - (cksum_err & HNS3_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_L4_MASK) && - (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; - } else { - if (likely(packet_type & RTE_PTYPE_L3_MASK) && - (cksum_err & HNS3_L3_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_L4_MASK) && - (cksum_err & HNS3_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - } + hns3_init_non_tunnel_ptype_tbl(tbl); + hns3_init_tunnel_ptype_tbl(tbl); } static inline void @@ -1574,7 +1999,7 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, }; strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S); - report_mode = report_type[rxq->pvid_state][strip_status]; + report_mode = report_type[rxq->pvid_sw_discard_en][strip_status]; switch (report_mode) { case HNS3_NO_STRP_VLAN_VLD: mb->vlan_tci = 0; @@ -1587,6 +2012,9 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag); return; + default: + mb->vlan_tci = 0; + return; } } @@ -1607,6 +2035,23 @@ recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg, rxm->data_len = (uint16_t)(data_len - crc_len); } +static inline struct rte_mbuf * +hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) +{ + int ret; + + if (likely(rxq->bulk_mbuf_num > 0)) + return rxq->bulk_mbuf[--rxq->bulk_mbuf_num]; + + ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf, + HNS3_BULK_ALLOC_MBUF_NUM); + if (likely(ret == 0)) { + rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM; + return rxq->bulk_mbuf[--rxq->bulk_mbuf_num]; + } else + return rte_mbuf_raw_alloc(rxq->mb_pool); +} + uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -1615,21 +2060,15 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct hns3_rx_queue *rxq; /* RX queue */ struct hns3_entry *sw_ring; struct hns3_entry *rxe; - struct rte_mbuf *first_seg; - struct rte_mbuf *last_seg; struct hns3_desc rxd; struct rte_mbuf *nmb; /* pointer of the new mbuf */ struct rte_mbuf *rxm; - struct rte_eth_dev *dev; uint32_t bd_base_info; uint32_t cksum_err; uint32_t l234_info; - uint32_t gro_size; uint32_t ol_info; uint64_t dma_addr; - uint16_t data_len; uint16_t nb_rx_bd; - uint16_t pkt_len; uint16_t nb_rx; uint16_t rx_id; int ret; @@ -1637,81 +2076,199 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_rx = 0; nb_rx_bd = 0; rxq = rx_queue; - - rx_id = rxq->next_to_clean; rx_ring = rxq->rx_ring; - first_seg = rxq->pkt_first_seg; - last_seg = rxq->pkt_last_seg; sw_ring = rxq->sw_ring; + rx_id = rxq->next_to_use; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); - if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) break; - /* - * The interactive process between software and hardware of - * receiving a new packet in hns3 network engine: - * 1. Hardware network engine firstly writes the packet content - * to the memory pointed by the 'addr' field of the Rx Buffer - * Descriptor, secondly fills the result of parsing the - * packet include the valid field into the Rx Buffer - * Descriptor in one write operation. - * 2. Driver reads the Rx BD's valid field in the loop to check - * whether it's valid, if valid then assign a new address to - * the addr field, clear the valid field, get the other - * information of the packet by parsing Rx BD's other fields, - * finally write back the number of Rx BDs processed by the - * driver to the HNS3_RING_RX_HEAD_REG register to inform - * hardware. - * In the above process, the ordering is very important. We must - * make sure that CPU read Rx BD's other fields only after the - * Rx BD is valid. - * - * There are two type of re-ordering: compiler re-ordering and - * CPU re-ordering under the ARMv8 architecture. - * 1. we use volatile to deal with compiler re-ordering, so you - * can see that rx_ring/rxdp defined with volatile. - * 2. we commonly use memory barrier to deal with CPU - * re-ordering, but the cost is high. - * - * In order to solve the high cost of using memory barrier, we - * use the data dependency order under the ARMv8 architecture, - * for example: - * instr01: load A - * instr02: load B <- A - * the instr02 will always execute after instr01. - * - * To construct the data dependency ordering, we use the - * following assignment: - * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - - * (1u<port_id; + rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_rx_bd++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + rte_prefetch0(sw_ring[rx_id].mbuf); + if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + + dma_addr = rte_mbuf_data_iova_default(nmb); + rxdp->addr = rte_cpu_to_le_64(dma_addr); + rxdp->rx.bd_base_info = 0; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) - + rxq->crc_len; + rxm->data_len = rxm->pkt_len; + rxm->port = rxq->port_id; + rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); + rxm->ol_flags = PKT_RX_RSS_HASH; + if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { + rxm->hash.fdir.hi = + rte_le_to_cpu_16(rxd.rx.fd_id); + rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + } + rxm->nb_segs = 1; + rxm->next = NULL; + + /* Load remained descriptor data and extract necessary fields */ + l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); + ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); + ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, + l234_info, &cksum_err); + if (unlikely(ret)) + goto pkt_err; + + rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info); + + if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) + hns3_rx_set_cksum_flag(rxm, rxm->packet_type, + cksum_err); + hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd); + + rx_pkts[nb_rx++] = rxm; + continue; +pkt_err: + rte_pktmbuf_free(rxm); + } + + rxq->next_to_use = rx_id; + rxq->rx_free_hold += nb_rx_bd; + if (rxq->rx_free_hold > rxq->rx_free_thresh) { + hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold); + rxq->rx_free_hold = 0; + } + + return nb_rx; +} + +uint16_t +hns3_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct hns3_desc *rx_ring; /* RX ring (desc) */ + volatile struct hns3_desc *rxdp; /* pointer of the current desc */ + struct hns3_rx_queue *rxq; /* RX queue */ + struct hns3_entry *sw_ring; + struct hns3_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct hns3_desc rxd; + struct rte_mbuf *nmb; /* pointer of the new mbuf */ + struct rte_mbuf *rxm; + struct rte_eth_dev *dev; + uint32_t bd_base_info; + uint32_t cksum_err; + uint32_t l234_info; + uint32_t gro_size; + uint32_t ol_info; + uint64_t dma_addr; + uint16_t nb_rx_bd; + uint16_t nb_rx; + uint16_t rx_id; + int ret; + + nb_rx = 0; + nb_rx_bd = 0; + rxq = rx_queue; + + rx_id = rxq->next_to_use; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) + break; + + /* + * The interactive process between software and hardware of + * receiving a new packet in hns3 network engine: + * 1. Hardware network engine firstly writes the packet content + * to the memory pointed by the 'addr' field of the Rx Buffer + * Descriptor, secondly fills the result of parsing the + * packet include the valid field into the Rx Buffer + * Descriptor in one write operation. + * 2. Driver reads the Rx BD's valid field in the loop to check + * whether it's valid, if valid then assign a new address to + * the addr field, clear the valid field, get the other + * information of the packet by parsing Rx BD's other fields, + * finally write back the number of Rx BDs processed by the + * driver to the HNS3_RING_RX_HEAD_REG register to inform + * hardware. + * In the above process, the ordering is very important. We must + * make sure that CPU read Rx BD's other fields only after the + * Rx BD is valid. + * + * There are two type of re-ordering: compiler re-ordering and + * CPU re-ordering under the ARMv8 architecture. + * 1. we use volatile to deal with compiler re-ordering, so you + * can see that rx_ring/rxdp defined with volatile. + * 2. we commonly use memory barrier to deal with CPU + * re-ordering, but the cost is high. + * + * In order to solve the high cost of using memory barrier, we + * use the data dependency order under the ARMv8 architecture, + * for example: + * instr01: load A + * instr02: load B <- A + * the instr02 will always execute after instr01. + * + * To construct the data dependency ordering, we use the + * following assignment: + * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - + * (1u<mb_pool); + nmb = hns3_rx_alloc_buffer(rxq); if (unlikely(nmb == NULL)) { dev = &rte_eth_devices[rxq->port_id]; dev->data->rx_mbuf_alloc_failed++; @@ -1725,7 +2282,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_id = 0; rte_prefetch0(sw_ring[rx_id].mbuf); - if ((rx_id & 0x3) == 0) { + if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) { rte_prefetch0(&rx_ring[rx_id]); rte_prefetch0(&sw_ring[rx_id]); } @@ -1737,15 +2294,6 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; - /* - * Load remained descriptor data and extract necessary fields. - * Data size from buffer description may contains CRC len, - * packet len should subtract it. - */ - data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size)); - l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); - ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); - if (first_seg == NULL) { first_seg = rxm; first_seg->nb_segs = 1; @@ -1755,10 +2303,11 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->data_len = data_len; + rxm->data_len = rte_le_to_cpu_16(rxd.rx.size); - if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { last_seg = rxm; + rxm->next = NULL; continue; } @@ -1767,8 +2316,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * buffer description may contains CRC len, packet len should * subtract it, same as data len. */ - pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)); - first_seg->pkt_len = pkt_len; + first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len); /* * This is the last buffer of the received packet. If the CRC @@ -1784,15 +2332,15 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(rxq->crc_len > 0)) { first_seg->pkt_len -= rxq->crc_len; recalculate_data_len(first_seg, last_seg, rxm, rxq, - data_len); + rxm->data_len); } first_seg->port = rxq->port_id; first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); first_seg->ol_flags = PKT_RX_RSS_HASH; - if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) { + if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { first_seg->hash.fdir.hi = - rte_le_to_cpu_32(rxd.rx.fd_id); + rte_le_to_cpu_16(rxd.rx.fd_id); first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } @@ -1803,13 +2351,15 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) first_seg->tso_segsz = gro_size; } + l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); + ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info, l234_info, &cksum_err); if (unlikely(ret)) goto pkt_err; - first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info, - ol_info); + first_seg->packet_type = hns3_rx_calc_ptype(rxq, + l234_info, ol_info); if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) hns3_rx_set_cksum_flag(first_seg, @@ -1825,41 +2375,163 @@ pkt_err: first_seg = NULL; } - rxq->next_to_clean = rx_id; + rxq->next_to_use = rx_id; rxq->pkt_first_seg = first_seg; rxq->pkt_last_seg = last_seg; - nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold; - if (nb_rx_bd > rxq->rx_free_thresh) { - hns3_clean_rx_buffers(rxq, nb_rx_bd); - nb_rx_bd = 0; + rxq->rx_free_hold += nb_rx_bd; + if (rxq->rx_free_hold > rxq->rx_free_thresh) { + hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold); + rxq->rx_free_hold = 0; } - rxq->nb_rx_hold = nb_rx_bd; return nb_rx; } +void __rte_weak +hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq) +{ +} + +int __rte_weak +hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev) +{ + return -ENOTSUP; +} + +uint16_t __rte_weak +hns3_recv_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __rte_weak +hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + static const struct { + eth_rx_burst_t pkt_burst; + const char *info; + } burst_infos[] = { + { hns3_recv_pkts, "Scalar" }, + { hns3_recv_scattered_pkts, "Scalar Scattered" }, + { hns3_recv_pkts_vec, "Vector Neon" }, + { hns3_recv_pkts_vec_sve, "Vector Sve" }, + }; + + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(burst_infos); i++) { + if (pkt_burst == burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + burst_infos[i].info); + ret = 0; + break; + } + } + + return ret; +} + +static bool +hns3_check_sve_support(void) +{ +#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) + return true; +#endif + return false; +} + +static eth_rx_burst_t +hns3_get_rx_function(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + uint64_t offloads = dev->data->dev_conf.rxmode.offloads; + + if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0) + return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve : + hns3_recv_pkts_vec; + + if (hns->rx_simple_allowed && !dev->data->scattered_rx && + (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0) + return hns3_recv_pkts; + + return hns3_recv_scattered_pkts; +} + +static int +hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf, + uint16_t nb_desc, uint16_t *tx_rs_thresh, + uint16_t *tx_free_thresh, uint16_t idx) +{ +#define HNS3_TX_RS_FREE_THRESH_GAP 8 + uint16_t rs_thresh, free_thresh, fast_free_thresh; + + if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || + nb_desc % HNS3_ALIGN_RING_DESC) { + hns3_err(hw, "number (%u) of tx descriptors is invalid", + nb_desc); + return -EINVAL; + } + + rs_thresh = (conf->tx_rs_thresh > 0) ? + conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH; + free_thresh = (conf->tx_free_thresh > 0) ? + conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH; + if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh || + rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP || + free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) { + hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc " + "(%u) of tx descriptors for port=%u queue=%u check " + "fail!", + rs_thresh, free_thresh, nb_desc, hw->data->port_id, + idx); + return -EINVAL; + } + + if (conf->tx_free_thresh == 0) { + /* Fast free Tx memory buffer to improve cache hit rate */ + fast_free_thresh = nb_desc - rs_thresh; + if (fast_free_thresh >= + HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH) + free_thresh = fast_free_thresh - + HNS3_TX_FAST_FREE_AHEAD; + } + + *tx_rs_thresh = rs_thresh; + *tx_free_thresh = free_thresh; + return 0; +} + int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *conf) { struct hns3_adapter *hns = dev->data->dev_private; + uint16_t tx_rs_thresh, tx_free_thresh; struct hns3_hw *hw = &hns->hw; struct hns3_queue_info q_info; struct hns3_tx_queue *txq; int tx_entry_len; + int ret; - if (dev->data->dev_started) { - hns3_err(hw, "tx_queue_setup after dev_start no supported"); - return -EINVAL; - } - - if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || - nb_desc % HNS3_ALIGN_RING_DESC) { - hns3_err(hw, "Number (%u) of tx descriptors is invalid", - nb_desc); - return -EINVAL; - } + ret = hns3_tx_queue_conf_check(hw, conf, nb_desc, + &tx_rs_thresh, &tx_free_thresh, idx); + if (ret) + return ret; if (dev->data->tx_queues[idx] != NULL) { hns3_tx_queue_release(dev->data->tx_queues[idx]); @@ -1879,6 +2551,11 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } txq->tx_deferred_start = conf->tx_deferred_start; + if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + hns3_warn(hw, "deferred start is not supported."); + txq->tx_deferred_start = false; + } + tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc; txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len, RTE_CACHE_LINE_SIZE, socket_id); @@ -1892,11 +2569,40 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, txq->next_to_use = 0; txq->next_to_clean = 0; txq->tx_bd_ready = txq->nb_tx_desc - 1; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_rs_thresh = tx_rs_thresh; + txq->free = rte_zmalloc_socket("hns3 TX mbuf free array", + sizeof(struct rte_mbuf *) * txq->tx_rs_thresh, + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq->free) { + hns3_err(hw, "failed to allocate tx mbuf free array!"); + hns3_tx_queue_release(txq); + return -ENOMEM; + } + txq->port_id = dev->data->port_id; - txq->pvid_state = hw->port_base_vlan_cfg.state; + /* + * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, + * the pvid_sw_shift_en in the queue struct should not be changed, + * because PVID-related operations do not need to be processed by PMD + * driver. For hns3 VF device, whether it needs to process PVID depends + * on the configuration of PF kernel mode netdev driver. And the + * related PF configuration is delivered through the mailbox and finally + * reflectd in port_base_vlan_cfg. + */ + if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) + txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == + HNS3_PORT_BASE_VLAN_ENABLE; + else + txq->pvid_sw_shift_en = false; + txq->max_non_tso_bd_num = hw->max_non_tso_bd_num; txq->configured = true; - txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + - idx * HNS3_TQP_REG_SIZE); + txq->io_base = (void *)((char *)hw->io_base + + hns3_get_tqp_reg_offset(idx)); + txq->io_tail_reg = (volatile void *)((char *)txq->io_base + + HNS3_RING_TX_TAIL_REG); + txq->min_tx_pkt_len = hw->min_tx_pkt_len; + txq->tso_mode = hw->tso_mode; txq->over_length_pkt_cnt = 0; txq->exceed_limit_bd_pkt_cnt = 0; txq->exceed_limit_bd_reassem_fail = 0; @@ -1910,12 +2616,6 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return 0; } -static inline void -hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num) -{ - hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num); -} - static void hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) { @@ -1927,7 +2627,8 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; struct rte_mbuf *mbuf; - while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) && + while ((!(desc->tx.tp_fe_sc_vld_ra_ri & + rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) && tx_next_use != tx_next_clean) { mbuf = tx_bak_pkt->mbuf; if (mbuf) { @@ -1951,44 +2652,6 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) txq->tx_bd_ready = tx_bd_ready; } -static int -hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags, - struct rte_mbuf *rxm, uint8_t *l2_len) -{ - uint64_t tun_flags; - uint8_t ol4_len; - uint32_t otmp; - - tun_flags = ol_flags & PKT_TX_TUNNEL_MASK; - if (tun_flags == 0) - return 0; - - otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec); - switch (tun_flags) { - case PKT_TX_TUNNEL_GENEVE: - case PKT_TX_TUNNEL_VXLAN: - *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN; - break; - case PKT_TX_TUNNEL_GRE: - /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. - */ - ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S); - *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT); - break; - default: - /* For non UDP / GRE tunneling, drop the tunnel packet */ - return -EINVAL; - } - hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - rxm->outer_l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp); - - return 0; -} - int hns3_config_gro(struct hns3_hw *hw, bool en) { @@ -2033,31 +2696,15 @@ hns3_pkt_is_tso(struct rte_mbuf *m) } static void -hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags, - uint32_t paylen, struct rte_mbuf *rxm) +hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm) { - uint8_t l2_len = rxm->l2_len; - uint32_t tmp; - if (!hns3_pkt_is_tso(rxm)) return; - if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len)) - return; - if (paylen <= rxm->tso_segsz) return; - tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len); - hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1); - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - l2_len >> HNS3_L2_LEN_UNIT); - desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp); + desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B)); desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz); } @@ -2082,7 +2729,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, rxm->outer_l2_len + rxm->outer_l3_len : 0; paylen = rxm->pkt_len - hdr_len; desc->tx.paylen = rte_cpu_to_le_32(paylen); - hns3_set_tso(desc, ol_flags, paylen, rxm); + hns3_set_tso(desc, paylen, rxm); /* * Currently, hardware doesn't support more than two layers VLAN offload @@ -2096,7 +2743,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should * be added to the position close to the IP header when PVID is enabled. */ - if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT | + if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B)); @@ -2109,44 +2756,31 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, } if (ol_flags & PKT_TX_QINQ_PKT || - ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) { + ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) { desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B)); desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci); } } -static int -hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool, - uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf) +static inline int +hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf, + struct rte_mbuf **alloc_mbuf) { - struct rte_mbuf *new_mbuf = NULL; - struct rte_eth_dev *dev; - struct rte_mbuf *temp; - struct hns3_hw *hw; +#define MAX_NON_TSO_BD_PER_PKT 18 + struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT]; uint16_t i; /* Allocate enough mbufs */ - for (i = 0; i < nb_new_buf; i++) { - temp = rte_pktmbuf_alloc(mb_pool); - if (unlikely(temp == NULL)) { - dev = &rte_eth_devices[txq->port_id]; - hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - hns3_err(hw, "Failed to alloc TX mbuf port_id=%d," - "queue_id=%d in reassemble tx pkts.", - txq->port_id, txq->queue_id); - rte_pktmbuf_free(new_mbuf); - return -ENOMEM; - } - temp->next = new_mbuf; - new_mbuf = temp; - } - - if (new_mbuf == NULL) + if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf)) return -ENOMEM; - new_mbuf->nb_segs = nb_new_buf; - *alloc_mbuf = new_mbuf; + for (i = 0; i < nb_new_buf - 1; i++) + pkt_segs[i]->next = pkt_segs[i + 1]; + + pkt_segs[nb_new_buf - 1]->next = NULL; + pkt_segs[0]->nb_segs = nb_new_buf; + *alloc_mbuf = pkt_segs[0]; return 0; } @@ -2166,10 +2800,9 @@ hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt) } static int -hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, - struct rte_mbuf **new_pkt) +hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt, + uint8_t max_non_tso_bd_num) { - struct hns3_tx_queue *txq = tx_queue; struct rte_mempool *mb_pool; struct rte_mbuf *new_mbuf; struct rte_mbuf *temp_new; @@ -2181,7 +2814,6 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, uint16_t len_s; uint16_t len_d; uint16_t len; - uint16_t i; int ret; char *s; char *d; @@ -2189,7 +2821,7 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, mb_pool = tx_pkt->pool; buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM; nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1; - if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT) + if (nb_new_buf > max_non_tso_bd_num) return -EINVAL; last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size; @@ -2197,7 +2829,7 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, last_buf_len = buf_size; /* Allocate enough mbufs */ - ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf); + ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf); if (ret) return ret; @@ -2206,12 +2838,9 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, s = rte_pktmbuf_mtod(temp, char *); len_s = rte_pktmbuf_data_len(temp); temp_new = new_mbuf; - for (i = 0; i < nb_new_buf; i++) { + while (temp != NULL && temp_new != NULL) { d = rte_pktmbuf_mtod(temp_new, char *); - if (i < nb_new_buf - 1) - buf_len = buf_size; - else - buf_len = last_buf_len; + buf_len = temp_new->next == NULL ? last_buf_len : buf_size; len_d = buf_len; while (len_d) { @@ -2245,186 +2874,220 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, } static void -hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec) +hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec) { uint32_t tmp = *ol_type_vlan_len_msec; + uint64_t ol_flags = m->ol_flags; /* (outer) IP header type */ if (ol_flags & PKT_TX_OUTER_IPV4) { - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - hns3_set_field(tmp, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); else - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); } else if (ol_flags & PKT_TX_OUTER_IPV6) { - hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV6); - /* OL3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); + tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } - + /* OL3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->outer_l3_len >> HNS3_L3_LEN_UNIT); *ol_type_vlan_len_msec = tmp; } static int -hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, + uint32_t *type_cs_vlan_tso_len) { - uint32_t tmp = *ol_type_vlan_len_msec; - uint8_t l4_len; - - /* OL2 header size, defined in 2 bytes */ - hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); +#define HNS3_NVGRE_HLEN 8 + uint32_t tmp_outer = *ol_type_vlan_len_msec; + uint32_t tmp_inner = *type_cs_vlan_tso_len; + uint64_t ol_flags = m->ol_flags; + uint16_t inner_l2_len; - /* L4TUNT: L4 Tunneling Type */ switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_VXLAN_GPE: case PKT_TX_TUNNEL_GENEVE: case PKT_TX_TUNNEL_VXLAN: - /* MAC in UDP tunnelling packet, include VxLAN */ - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(UDP) length and tunneling length. + * The inner l2 length of mbuf is the sum of outer l4 length, + * tunneling header length and inner l2 length for a tunnel + * packect. But in hns3 tx descriptor, the tunneling header + * length is contained in the field of outer L4 length. + * Therefore, driver need to calculate the outer L4 length and + * inner L2 length. */ - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - (uint8_t)RTE_ETHER_VXLAN_HLEN >> - HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)RTE_ETHER_VXLAN_HLEN >> + HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN; break; case PKT_TX_TUNNEL_GRE: - hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* - * OL4 header size, defined in 4 Bytes, it contains outer - * L4(GRE) length and tunneling length. + * For NVGRE tunnel packect, the outer L4 is empty. So only + * fill the NVGRE header length to the outer L4 field. */ - l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len; - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - l4_len >> HNS3_L4_LEN_UNIT); + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT); + + inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN; break; default: /* For non UDP / GRE tunneling, drop the tunnel packet */ return -EINVAL; } - *ol_type_vlan_len_msec = tmp; + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + inner_l2_len >> HNS3_L2_LEN_UNIT); + /* OL2 header size, defined in 2 bytes */ + tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + m->outer_l2_len >> HNS3_L2_LEN_UNIT); + + *type_cs_vlan_tso_len = tmp_inner; + *ol_type_vlan_len_msec = tmp_outer; return 0; } static int -hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags, - struct rte_net_hdr_lens *hdr_lens) +hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; - uint32_t value = 0; + uint32_t tmp_outer = 0; + uint32_t tmp_inner = 0; int ret; - hns3_parse_outer_params(ol_flags, &value); - ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens); - if (ret) - return -EINVAL; + /* + * The tunnel header is contained in the inner L2 header field of the + * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, + * there is a need that switching between them. To avoid multiple + * calculations, the length of the L2 header include the outer and + * inner, will be filled during the parsing of tunnel packects. + */ + if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) { + /* + * For non tunnel type the tunnel type id is 0, so no need to + * assign a value to it. Only the inner(normal) L2 header length + * is assigned. + */ + tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT); + } else { + /* + * If outer csum is not offload, the outer length may be filled + * with 0. And the length of the outer header is added to the + * inner l2_len. It would lead a cksum error. So driver has to + * calculate the header length. + */ + if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) && + m->outer_l2_len == 0)) { + struct rte_net_hdr_lens hdr_len; + (void)rte_net_get_ptype(m, &hdr_len, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + m->outer_l3_len = hdr_len.l3_len; + m->outer_l2_len = hdr_len.l2_len; + m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len; + } + hns3_parse_outer_params(m, &tmp_outer); + ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner); + if (ret) + return -EINVAL; + } - desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value); + desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer); + desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner); return 0; } static void -hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; + uint32_t l3_type; uint32_t tmp; + tmp = *type_cs_vlan_tso_len; + if (ol_flags & PKT_TX_IPV4) + l3_type = HNS3_L3T_IPV4; + else if (ol_flags & PKT_TX_IPV6) + l3_type = HNS3_L3T_IPV6; + else + l3_type = HNS3_L3T_NONE; + + /* inner(/normal) L3 header size, defined in 4 bytes */ + tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + m->l3_len >> HNS3_L3_LEN_UNIT); + + tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type); + /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IPV4) { - tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV4); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); - if (ol_flags & PKT_TX_IP_CKSUM) - hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); - *type_cs_vlan_tso_len = tmp; - } else if (ol_flags & PKT_TX_IPV6) { - tmp = *type_cs_vlan_tso_len; - /* L3T, IPv6 don't do checksum */ - hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, - HNS3_L3T_IPV6); - /* inner(/normal) L3 header size, defined in 4 bytes */ - hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, - sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; - } + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= BIT(HNS3_TXD_L3CS_B); + *type_cs_vlan_tso_len = tmp; } static void -hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) { + uint64_t ol_flags = m->ol_flags; uint32_t tmp; - /* Enable L4 checksum offloads */ - switch (ol_flags & PKT_TX_L4_MASK) { + switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { case PKT_TX_TCP_CKSUM: + case PKT_TX_TCP_SEG: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case PKT_TX_UDP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case PKT_TX_SCTP_CKSUM: tmp = *type_cs_vlan_tso_len; - hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); - hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT); - *type_cs_vlan_tso_len = tmp; + tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: - break; + return; } + tmp |= BIT(HNS3_TXD_L4CS_B); + tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + m->l4_len >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; } static void -hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - uint64_t ol_flags) +hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m, + uint16_t tx_desc_id) { struct hns3_desc *tx_ring = txq->tx_ring; struct hns3_desc *desc = &tx_ring[tx_desc_id]; uint32_t value = 0; - /* inner(/normal) L2 header size, defined in 2 bytes */ - hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, - sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); - - hns3_parse_l3_cksum_params(ol_flags, &value); - hns3_parse_l4_cksum_params(ol_flags, &value); + hns3_parse_l3_cksum_params(m, &value); + hns3_parse_l4_cksum_params(m, &value); desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value); } static bool -hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) +hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, + uint32_t max_non_tso_bd_num) { struct rte_mbuf *m_first = tx_pkts; struct rte_mbuf *m_last = tx_pkts; @@ -2439,10 +3102,10 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) * frags greater than gso header len + mss, and the remaining 7 * consecutive frags greater than MSS except the last 7 frags. */ - if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT) + if (bd_num <= max_non_tso_bd_num) return false; - for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1; + for (i = 0; m_last && i < max_non_tso_bd_num - 1; i++, m_last = m_last->next) tot_len += m_last->data_len; @@ -2460,7 +3123,7 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) * ensure the sum of the data length of every 7 consecutive buffer * is greater than mss except the last one. */ - for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) { + for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) { tot_len -= m_first->data_len; tot_len += m_last->data_len; @@ -2478,26 +3141,29 @@ static void hns3_outer_header_cksum_prepare(struct rte_mbuf *m) { uint64_t ol_flags = m->ol_flags; - struct rte_ipv4_hdr *ipv4_hdr; - struct rte_udp_hdr *udp_hdr; - uint32_t paylen, hdr_len; + uint32_t paylen, hdr_len, l4_proto; if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) return; - if (ol_flags & PKT_TX_IPV4) { + if (ol_flags & PKT_TX_OUTER_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr; ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->outer_l2_len); - - if (ol_flags & PKT_TX_IP_CKSUM) + l4_proto = ipv4_hdr->next_proto_id; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; + } else { + struct rte_ipv6_hdr *ipv6_hdr; + ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->outer_l2_len); + l4_proto = ipv6_hdr->proto; } - - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM && - ol_flags & PKT_TX_TCP_SEG) { + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ + if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) { + struct rte_udp_hdr *udp_hdr; hdr_len = m->l2_len + m->l3_len + m->l4_len; - hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? - m->outer_l2_len + m->outer_l3_len : 0; + hdr_len += m->outer_l2_len + m->outer_l3_len; paylen = m->pkt_len - hdr_len; if (paylen <= m->tso_segsz) return; @@ -2554,7 +3220,7 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) struct rte_ether_hdr *eh; struct rte_vlan_hdr *vh; - if (!txq->pvid_state) + if (!txq->pvid_sw_shift_en) return 0; /* @@ -2589,43 +3255,66 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) } #endif -uint16_t -hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +static int +hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) { - struct rte_mbuf *m; - uint16_t i; int ret; - for (i = 0; i < nb_pkts; i++) { - m = tx_pkts[i]; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return ret; + } - if (hns3_pkt_is_tso(m) && - (hns3_pkt_need_linearized(m, m->nb_segs) || - hns3_check_tso_pkt_valid(m))) { + ret = hns3_vld_vlan_chk(tx_queue, m); + if (ret != 0) { + rte_errno = EINVAL; + return ret; + } +#endif + if (hns3_pkt_is_tso(m)) { + if (hns3_pkt_need_linearized(m, m->nb_segs, + tx_queue->max_non_tso_bd_num) || + hns3_check_tso_pkt_valid(m)) { rte_errno = EINVAL; - return i; + return -EINVAL; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG - ret = rte_validate_tx_offload(m); - if (ret != 0) { - rte_errno = -ret; - return i; + if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) { + /* + * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means + * hardware support recalculate the TCP pseudo header + * checksum of packets that need TSO, so network driver + * software not need to recalculate it. + */ + hns3_outer_header_cksum_prepare(m); + return 0; } + } - if (hns3_vld_vlan_chk(tx_queue, m)) { - rte_errno = EINVAL; - return i; - } -#endif - ret = rte_net_intel_cksum_prepare(m); - if (ret != 0) { - rte_errno = -ret; - return i; - } + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return ret; + } + + hns3_outer_header_cksum_prepare(m); + + return 0; +} + +uint16_t +hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *m; + uint16_t i; - hns3_outer_header_cksum_prepare(m); + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + if (hns3_prep_pkt_proc(tx_queue, m)) + return i; } return i; @@ -2633,20 +3322,25 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, static int hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, - const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens) + struct rte_mbuf *m) { - /* Fill in tunneling parameters if necessary */ - if (m->ol_flags & PKT_TX_TUNNEL_MASK) { - (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK); - if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags, - hdr_lens)) { + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + + /* Enable checksum offloading */ + if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { + /* Fill in tunneling parameters if necessary */ + if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { txq->unsupported_tunnel_pkt_cnt++; - return -EINVAL; + return -EINVAL; } + + hns3_txd_enable_checksum(txq, m, tx_desc_id); + } else { + /* clear the control bit */ + desc->tx.type_cs_vlan_tso_len = 0; + desc->tx.ol_type_vlan_len_msec = 0; } - /* Enable checksum offloading */ - if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) - hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags); return 0; } @@ -2655,6 +3349,7 @@ static int hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq) { + uint8_t max_non_tso_bd_num; struct rte_mbuf *new_pkt; int ret; @@ -2670,9 +3365,11 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, return -EINVAL; } - if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) { + max_non_tso_bd_num = txq->max_non_tso_bd_num; + if (unlikely(nb_buf > max_non_tso_bd_num)) { txq->exceed_limit_bd_pkt_cnt++; - ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt); + ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, + max_non_tso_bd_num); if (ret) { txq->exceed_limit_bd_reassem_fail++; return ret; @@ -2683,10 +3380,157 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, return 0; } +static inline void +hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) +{ + struct hns3_entry *tx_entry; + struct hns3_desc *desc; + uint16_t tx_next_clean; + int i; + + while (1) { + if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh) + break; + + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) % + txq->nb_tx_desc; + desc = &txq->tx_ring[tx_next_clean]; + for (i = 0; i < txq->tx_rs_thresh; i++) { + if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) & + BIT(HNS3_TXD_VLD_B)) + return; + desc--; + } + + tx_entry = &txq->sw_ring[txq->next_to_clean]; + + for (i = 0; i < txq->tx_rs_thresh; i++) + rte_prefetch0((tx_entry + i)->mbuf); + for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) { + rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf); + tx_entry->mbuf = NULL; + } + + txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc; + txq->tx_bd_ready += txq->tx_rs_thresh; + } +} + +static inline void +hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts) +{ + tx_entry->mbuf = pkts[0]; +} + +static inline void +hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts) +{ + hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]); + hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]); + hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]); + hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]); +} + +static inline void +hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +{ +#define PER_LOOP_NUM 4 + const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint64_t dma_addr; + uint32_t i; + + for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) { + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->addr = rte_cpu_to_le_64(dma_addr); + txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len); + txdp->tx.paylen = 0; + txdp->tx.type_cs_vlan_tso_len = 0; + txdp->tx.ol_type_vlan_len_msec = 0; + txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); + } +} + +static inline void +hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +{ + const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint64_t dma_addr; + + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->addr = rte_cpu_to_le_64(dma_addr); + txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len); + txdp->tx.paylen = 0; + txdp->tx.type_cs_vlan_tso_len = 0; + txdp->tx.ol_type_vlan_len_msec = 0; + txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); +} + +static inline void +hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq, + struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ +#define PER_LOOP_NUM 4 +#define PER_LOOP_MASK (PER_LOOP_NUM - 1) + struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use]; + struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use]; + const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK)); + const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK)); + uint32_t i; + + for (i = 0; i < mainpart; i += PER_LOOP_NUM) { + hns3_tx_backup_4mbuf(tx_entry + i, pkts + i); + hns3_tx_setup_4bd(txdp + i, pkts + i); + } + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; i++) { + hns3_tx_backup_1mbuf(tx_entry + mainpart + i, + pkts + mainpart + i); + hns3_tx_setup_1bd(txdp + mainpart + i, + pkts + mainpart + i); + } + } +} + +uint16_t +hns3_xmit_pkts_simple(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct hns3_tx_queue *txq = tx_queue; + uint16_t nb_tx = 0; + + hns3_tx_free_buffer_simple(txq); + + nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); + if (unlikely(nb_pkts == 0)) { + if (txq->tx_bd_ready == 0) + txq->queue_full_cnt++; + return 0; + } + + txq->tx_bd_ready -= nb_pkts; + if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { + nb_tx = txq->nb_tx_desc - txq->next_to_use; + hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx); + txq->next_to_use = 0; + } + + hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); + txq->next_to_use += nb_pkts - nb_tx; + + hns3_write_reg_opt(txq->io_tail_reg, nb_pkts); + + return nb_pkts; +} + uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_net_hdr_lens hdr_lens = {0}; struct hns3_tx_queue *txq = tx_queue; struct hns3_entry *tx_bak_pkt; struct hns3_desc *tx_ring; @@ -2725,14 +3569,16 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* - * If packet length is less than minimum packet size, driver - * need to pad it. + * If packet length is less than minimum packet length supported + * by hardware in Tx direction, driver need to pad it to avoid + * error. */ - if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) { + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < + txq->min_tx_pkt_len)) { uint16_t add_len; char *appended; - add_len = HNS3_MIN_PKT_SIZE - + add_len = txq->min_tx_pkt_len - rte_pktmbuf_pkt_len(tx_pkt); appended = rte_pktmbuf_append(tx_pkt, add_len); if (appended == NULL) { @@ -2748,7 +3594,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq)) goto end_of_tx; - if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens)) + if (hns3_parse_cksum(txq, tx_next_use, m_seg)) goto end_of_tx; i = 0; @@ -2792,11 +3638,79 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) end_of_tx: if (likely(nb_tx)) - hns3_queue_xmit(txq, nb_hold); + hns3_write_reg_opt(txq->io_tail_reg, nb_hold); return nb_tx; } +int __rte_weak +hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev) +{ + return -ENOTSUP; +} + +uint16_t __rte_weak +hns3_xmit_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __rte_weak +hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + const char *info = NULL; + + if (pkt_burst == hns3_xmit_pkts_simple) + info = "Scalar Simple"; + else if (pkt_burst == hns3_xmit_pkts) + info = "Scalar"; + else if (pkt_burst == hns3_xmit_pkts_vec) + info = "Vector Neon"; + else if (pkt_burst == hns3_xmit_pkts_vec_sve) + info = "Vector Sve"; + + if (info == NULL) + return -EINVAL; + + snprintf(mode->info, sizeof(mode->info), "%s", info); + + return 0; +} + +static eth_tx_burst_t +hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) +{ + uint64_t offloads = dev->data->dev_conf.txmode.offloads; + struct hns3_adapter *hns = dev->data->dev_private; + + if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) { + *prep = NULL; + return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve : + hns3_xmit_pkts_vec; + } + + if (hns->tx_simple_allowed && + offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) { + *prep = NULL; + return hns3_xmit_pkts_simple; + } + + *prep = hns3_prep_pkts; + return hns3_xmit_pkts; +} + static uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, @@ -2808,12 +3722,13 @@ hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; + eth_tx_prep_t prep = NULL; if (hns->hw.adapter_state == HNS3_NIC_STARTED && rte_atomic16_read(&hns->hw.reset.resetting) == 0) { - eth_dev->rx_pkt_burst = hns3_recv_pkts; - eth_dev->tx_pkt_burst = hns3_xmit_pkts; - eth_dev->tx_pkt_prepare = hns3_prep_pkts; + eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); + eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep); + eth_dev->tx_pkt_prepare = prep; } else { eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst; eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst; @@ -2830,6 +3745,8 @@ hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->mp = rxq->mb_pool; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->scattered_rx = dev->data->scattered_rx; + /* Report the HW Rx buffer length to user */ + qinfo->rx_buf_size = rxq->rx_buf_len; /* * If there are no available Rx buffer descriptors, incoming packets @@ -2849,5 +3766,143 @@ hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->nb_desc = txq->nb_tx_desc; qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } + +int +hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + int ret; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); + if (ret) { + hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", + rx_queue_id, ret); + return ret; + } + + ret = hns3_init_rxq(hns, rx_queue_id); + if (ret) { + hns3_err(hw, "fail to init Rx queue %u, ret = %d.", + rx_queue_id, ret); + return ret; + } + + hns3_enable_rxq(rxq, true); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +static void +hns3_reset_sw_rxq(struct hns3_rx_queue *rxq) +{ + rxq->next_to_use = 0; + rxq->rx_rearm_start = 0; + rxq->rx_free_hold = 0; + rxq->rx_rearm_nb = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc)); + hns3_rxq_vec_setup(rxq); +} + +int +hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + hns3_enable_rxq(rxq, false); + + hns3_rx_queue_release_mbufs(rxq); + + hns3_reset_sw_rxq(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + int ret; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); + if (ret) { + hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", + tx_queue_id, ret); + return ret; + } + + hns3_init_txq(txq); + hns3_enable_txq(txq, true); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +int +hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + + if (!hns3_dev_indep_txrx_supported(hw)) + return -ENOTSUP; + + hns3_enable_txq(txq, false); + hns3_tx_queue_release_mbufs(txq); + /* + * All the mbufs in sw_ring are released and all the pointers in sw_ring + * are set to NULL. If this queue is still called by upper layer, + * residual SW status of this txq may cause these pointers in sw_ring + * which have been set to NULL to be released again. To avoid it, + * reinit the txq. + */ + hns3_init_txq(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +uint32_t +hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + /* + * Number of BDs that have been processed by the driver + * but have not been notified to the hardware. + */ + uint32_t driver_hold_bd_num; + struct hns3_rx_queue *rxq; + uint32_t fbd_num; + + rxq = dev->data->rx_queues[rx_queue_id]; + fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG); + if (dev->rx_pkt_burst == hns3_recv_pkts_vec || + dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) + driver_hold_bd_num = rxq->rx_rearm_nb; + else + driver_hold_bd_num = rxq->rx_free_hold; + + if (fbd_num <= driver_hold_bd_num) + return 0; + else + return fbd_num - driver_hold_bd_num; +}