/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <inttypes.h>
#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
-#include <rte_dev.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
+#include <rte_geneve.h>
#include <rte_vxlan.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_io.h>
-#include <rte_ip.h>
-#include <rte_gre.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#include <rte_pci.h>
+#if defined(RTE_ARCH_ARM64)
+#include <rte_cpuflags.h>
+#include <rte_vect.h>
+#endif
-#include "hns3_ethdev.h"
+#include "hns3_common.h"
#include "hns3_rxtx.h"
#include "hns3_regs.h"
#include "hns3_logs.h"
+#include "hns3_mp.h"
#define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
#define HNS3_RX_RING_PREFETCTH_MASK 3
{
uint16_t i;
- /* Note: Fake rx queue will not enter here */
+ /* Note: Fake tx queue will not enter here */
if (txq->sw_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
if (txq->sw_ring[i].mbuf) {
}
}
-void
-hns3_dev_rx_queue_release(void *queue)
+static void
+hns3_rx_queue_release_lock(void *queue)
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
}
void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
rte_spinlock_unlock(&hns->hw.lock);
}
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(mbuf == NULL)) {
- hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+ hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
i);
hns3_rx_queue_release_mbufs(rxq);
return -ENOMEM;
hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
- (uint32_t)((dma_addr >> 31) >> 1));
+ (uint32_t)(dma_addr >> 32));
hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
hns3_buf_size2type(rx_buf_len));
hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
- (uint32_t)((dma_addr >> 31) >> 1));
+ (uint32_t)(dma_addr >> 32));
hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
}
void
-hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
+hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
{
uint16_t nb_rx_q = hw->data->nb_rx_queues;
uint16_t nb_tx_q = hw->data->nb_tx_queues;
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
- int pvid_state;
+ bool pvid_en;
int i;
- pvid_state = hw->port_base_vlan_cfg.state;
+ pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
for (i = 0; i < hw->cfg_max_queues; i++) {
if (i < nb_rx_q) {
rxq = hw->data->rx_queues[i];
if (rxq != NULL)
- rxq->pvid_state = pvid_state;
+ rxq->pvid_sw_discard_en = pvid_en;
}
if (i < nb_tx_q) {
txq = hw->data->tx_queues[i];
if (txq != NULL)
- txq->pvid_state = pvid_state;
+ txq->pvid_sw_shift_en = pvid_en;
}
}
}
+static void
+hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
+{
+ uint32_t reg_offset;
+ uint32_t reg;
+
+ reg_offset = queue_type == HNS3_RING_TYPE_TX ?
+ HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
+ reg = hns3_read_reg(tqp_base, reg_offset);
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_reg(tqp_base, reg_offset, reg);
+}
+
void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
uint32_t rcb_reg;
+ void *tqp_base;
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (i < nb_rx_q)
- rxq = hw->data->rx_queues[i];
- else
- rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
- if (i < nb_tx_q)
- txq = hw->data->tx_queues[i];
- else
- txq = hw->fkq_data.tx_queues[i - nb_tx_q];
- if (rxq == NULL || txq == NULL ||
- (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
- continue;
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
+ txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+
+ tqp_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(i));
+ /*
+ * If queue struct is not initialized, it means the
+ * related HW ring has not been initialized yet.
+ * So, these queues should be disabled before enable
+ * the tqps to avoid a HW exception since the queues
+ * are enabled by default.
+ */
+ if (rxq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_RX);
+ if (txq == NULL)
+ hns3_stop_unused_queue(tqp_base,
+ HNS3_RING_TYPE_TX);
+ } else {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
+ hw->fkq_data.rx_queues[i - nb_rx_q];
- rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
+ tqp_base = rxq->io_base;
+ }
+ /*
+ * This is the master switch that used to control the enabling
+ * of a pair of Tx and Rx queues. Both the Rx and Tx point to
+ * the same register
+ */
+ rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
if (en)
rcb_reg |= BIT(HNS3_RING_EN_B);
else
rcb_reg &= ~BIT(HNS3_RING_EN_B);
- hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
+ hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
+ }
+}
+
+static void
+hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
+{
+ struct hns3_hw *hw = &txq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
+ reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
+ }
+ txq->enabled = en;
+}
+
+static void
+hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
+{
+ struct hns3_hw *hw = &rxq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
+ reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
+ }
+ rxq->enabled = en;
+}
+
+int
+hns3_start_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ goto start_txqs_fail;
+ }
+ /*
+ * Tx queue is enabled by default. Therefore, the Tx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (txq->tx_deferred_start)
+ hns3_enable_txq(txq, false);
+ else
+ hns3_enable_txq(txq, true);
+ }
+ return 0;
+
+start_txqs_fail:
+ for (j = 0; j < i; j++) {
+ txq = hw->data->tx_queues[j];
+ hns3_enable_txq(txq, false);
+ }
+ return -EINVAL;
+}
+
+int
+hns3_start_all_rxqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto start_rxqs_fail;
+ }
+ /*
+ * Rx queue is enabled by default. Therefore, the Rx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (rxq->rx_deferred_start)
+ hns3_enable_rxq(rxq, false);
+ else
+ hns3_enable_rxq(rxq, true);
+ }
+ return 0;
+
+start_rxqs_fail:
+ for (j = 0; j < i; j++) {
+ rxq = hw->data->rx_queues[j];
+ hns3_enable_rxq(rxq, false);
+ }
+ return -EINVAL;
+}
+
+void
+hns3_restore_tqp_enable_state(struct hns3_hw *hw)
+{
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq != NULL)
+ hns3_enable_rxq(rxq, rxq->enabled);
+ }
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq != NULL)
+ hns3_enable_txq(txq, txq->enabled);
+ }
+}
+
+void
+hns3_stop_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq)
+ continue;
+ hns3_enable_txq(txq, false);
}
}
req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
req->stream_id = 0;
hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
-
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
+ hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
+ "ret = %d", queue_id, ret);
return ret;
}
static int
-hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
+hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t *reset_status)
{
struct hns3_reset_tqp_queue_cmd *req;
struct hns3_cmd_desc desc;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Get reset status error, ret =%d", ret);
+ hns3_err(hw, "get tqp reset status error, queue_id = %u, "
+ "ret = %d.", queue_id, ret);
return ret;
}
-
- return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
}
static int
-hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
+hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
{
#define HNS3_TQP_RESET_TRY_MS 200
- uint64_t end;
- int reset_status;
+ uint16_t wait_time = 0;
+ uint8_t reset_status;
int ret;
- ret = hns3_tqp_enable(hw, queue_id, false);
- if (ret)
- return ret;
-
/*
* In current version VF is not supported when PF is driven by DPDK
* driver, all task queue pairs are mapped to PF function, so PF's queue
hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
return ret;
}
- ret = -ETIMEDOUT;
- end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
+
do {
/* Wait for tqp hw reset */
rte_delay_ms(HNS3_POLL_RESPONE_MS);
- reset_status = hns3_get_reset_status(hw, queue_id);
- if (reset_status) {
- ret = 0;
+ wait_time += HNS3_POLL_RESPONE_MS;
+ ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
+ if (ret)
+ goto tqp_reset_fail;
+
+ if (reset_status)
break;
- }
- } while (get_timeofday_ms() < end);
+ } while (wait_time < HNS3_TQP_RESET_TRY_MS);
- if (ret) {
- hns3_err(hw, "Reset TQP fail, ret = %d", ret);
- return ret;
+ if (!reset_status) {
+ ret = -ETIMEDOUT;
+ hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
+ queue_id, ret);
+ goto tqp_reset_fail;
}
ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
return ret;
+
+tqp_reset_fail:
+ hns3_send_reset_tqp_cmd(hw, queue_id, false);
+ return ret;
}
static int
uint8_t msg_data[2];
int ret;
- /* Disable VF's queue before send queue reset msg to PF */
- ret = hns3_tqp_enable(hw, queue_id, false);
+ memcpy(msg_data, &queue_id, sizeof(uint16_t));
+
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ sizeof(msg_data), true, NULL, 0);
+ if (ret)
+ hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
+ queue_id, ret);
+ return ret;
+}
+
+static int
+hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
+{
+ struct hns3_reset_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+ req = (struct hns3_reset_cmd *)desc.data;
+ hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1);
+
+ /*
+ * The start qid should be the global qid of the first tqp of the
+ * function which should be reset in this port. Since our PF not
+ * support take over of VFs, so we only need to reset function 0,
+ * and its start qid is always 0.
+ */
+ req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
+ req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
+ return ret;
+ }
+
+ *reset_status = req->fun_reset_rcb_return_status;
+ return 0;
+}
+
+static int
+hns3pf_reset_all_tqps(struct hns3_hw *hw)
+{
+#define HNS3_RESET_RCB_NOT_SUPPORT 0U
+#define HNS3_RESET_ALL_TQP_SUCCESS 1U
+ uint8_t reset_status;
+ int ret;
+ int i;
+
+ ret = hns3_reset_rcb_cmd(hw, &reset_status);
if (ret)
return ret;
- memcpy(msg_data, &queue_id, sizeof(uint16_t));
+ /*
+ * If the firmware version is low, it may not support the rcb reset
+ * which means reset all the tqps at a time. In this case, we should
+ * reset tqps one by one.
+ */
+ if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
+ for (i = 0; i < hw->cfg_max_queues; i++) {
+ ret = hns3pf_reset_tqp(hw, i);
+ if (ret) {
+ hns3_err(hw,
+ "fail to reset tqp, queue_id = %d, ret = %d.",
+ i, ret);
+ return ret;
+ }
+ }
+ } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
+ hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
+ reset_status);
+ return -EIO;
+ }
- return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
- sizeof(msg_data), true, NULL, 0);
+ return 0;
}
static int
-hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
+hns3vf_reset_all_tqps(struct hns3_hw *hw)
{
- struct hns3_hw *hw = &hns->hw;
- if (hns->is_vf)
- return hns3vf_reset_tqp(hw, queue_id);
- else
- return hns3_reset_tqp(hw, queue_id);
+#define HNS3VF_RESET_ALL_TQP_DONE 1U
+ uint8_t reset_status;
+ uint8_t msg_data[2];
+ int ret;
+ int i;
+
+ memset(msg_data, 0, sizeof(uint16_t));
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ sizeof(msg_data), true, &reset_status,
+ sizeof(reset_status));
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
+ return ret;
+ }
+
+ if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
+ return 0;
+
+ /*
+ * If the firmware version or kernel PF version is low, it may not
+ * support the rcb reset which means reset all the tqps at a time.
+ * In this case, we should reset tqps one by one.
+ */
+ for (i = 1; i < hw->cfg_max_queues; i++) {
+ ret = hns3vf_reset_tqp(hw, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int
-hns3_reset_all_queues(struct hns3_adapter *hns)
+hns3_reset_all_tqps(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
int ret, i;
+ /* Disable all queues before reset all queues */
for (i = 0; i < hw->cfg_max_queues; i++) {
- ret = hns3_reset_queue(hns, i);
+ ret = hns3_tqp_enable(hw, i, false);
if (ret) {
- hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
+ hns3_err(hw,
+ "fail to disable tqps before tqps reset, ret = %d.",
+ ret);
return ret;
}
}
- return 0;
+
+ if (hns->is_vf)
+ return hns3vf_reset_all_tqps(hw);
+ else
+ return hns3pf_reset_all_tqps(hw);
+}
+
+static int
+hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type, bool enable)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+ hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+}
+
+static int
+hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type,
+ uint8_t *reset_status)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get queue reset status error, queue_id = %u "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+ }
+
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
+}
+
+static int
+hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type)
+{
+#define HNS3_QUEUE_RESET_TRY_MS 200
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint32_t reset_wait_times;
+ uint32_t max_wait_times;
+ uint8_t reset_status;
+ int ret;
+
+ if (queue_type == HNS3_RING_TYPE_TX) {
+ txq = hw->data->tx_queues[queue_id];
+ hns3_enable_txq(txq, false);
+ } else {
+ rxq = hw->data->rx_queues[queue_id];
+ hns3_enable_rxq(rxq, false);
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
+ if (ret) {
+ hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
+ return ret;
+ }
+
+ reset_wait_times = 0;
+ max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
+ while (reset_wait_times < max_wait_times) {
+ /* Wait for queue hw reset */
+ rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ ret = hns3_get_queue_reset_status(hw, queue_id,
+ queue_type, &reset_status);
+ if (ret)
+ goto queue_reset_fail;
+
+ if (reset_status)
+ break;
+ reset_wait_times++;
+ }
+
+ if (!reset_status) {
+ hns3_err(hw, "reset queue timeout, queue_id = %u, "
+ "queue_type = %s", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
+ ret = -ETIMEDOUT;
+ goto queue_reset_fail;
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ if (ret)
+ hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
+
+ return ret;
+
+queue_reset_fail:
+ hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ return ret;
+}
+
+uint32_t
+hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queues > 64 */
+ if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
+ reg_offset = HNS3_TQP_INTR_REG_BASE +
+ tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+ else
+ reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
+ tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
+ tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+
+ return reg_offset;
}
void
if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
return;
- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
else
if (rl_value > HNS3_TQP_INTR_RL_MAX)
return;
- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = HNS3_RL_USEC_TO_REG(rl_value);
if (value > 0)
value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
{
uint32_t addr;
- if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+ /*
+ * int_ql_max == 0 means the hardware does not support QL,
+ * QL regs config is not permitted if QL is not supported,
+ * here just return.
+ */
+ if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
return;
- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
}
{
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (dev->data->dev_conf.intr_conf.rxq == 0)
}
static int
-hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
if (ret) {
- hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
+ hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
idx, ret);
return ret;
}
}
static void
-hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
}
static void
-hns3_init_tx_queue(struct hns3_tx_queue *queue)
+hns3_init_txq(struct hns3_tx_queue *txq)
{
- struct hns3_tx_queue *txq = queue;
struct hns3_desc *desc;
int i;
}
static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_tx_ring_tc(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_tx_queue *txq;
+ int i, num;
- txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
-static void
-hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
-
- txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
-static void
-hns3_init_tx_ring_tc(struct hns3_adapter *hns)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
- int i, num;
-
- for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
- struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
- int j;
+ for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+ struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
+ int j;
if (!tc_queue->enable)
continue;
}
static int
-hns3_start_rx_queues(struct hns3_adapter *hns)
+hns3_init_rx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
- int i, j;
+ uint16_t i, j;
int ret;
/* Initialize RSS for queues */
ret = hns3_config_rss(hns);
if (ret) {
- hns3_err(hw, "Failed to configure rss %d", ret);
+ hns3_err(hw, "failed to configure rss, ret = %d.", ret);
return ret;
}
for (i = 0; i < hw->data->nb_rx_queues; i++) {
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto out;
+ }
+
+ if (rxq->rx_deferred_start)
continue;
- ret = hns3_dev_rx_queue_start(hns, i);
+
+ ret = hns3_init_rxq(hns, i);
if (ret) {
- hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+ hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
ret);
goto out;
}
}
- for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
- rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
- continue;
- hns3_fake_rx_queue_start(hns, i);
- }
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
+ hns3_init_fake_rxq(hns, i);
+
return 0;
out:
return ret;
}
-static void
-hns3_start_tx_queues(struct hns3_adapter *hns)
+static int
+hns3_init_tx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_tx_queue *txq;
- int i;
+ uint16_t i;
for (i = 0; i < hw->data->nb_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ return -EINVAL;
+ }
+
+ if (txq->tx_deferred_start)
continue;
- hns3_dev_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
- continue;
- hns3_fake_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
-
hns3_init_tx_ring_tc(hns);
+
+ return 0;
}
/*
- * Start all queues.
- * Note: just init and setup queues, and don't enable queue rx&tx.
+ * Init all queues.
+ * Note: just init and setup queues, and don't enable tqps.
*/
int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
{
struct hns3_hw *hw = &hns->hw;
int ret;
if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
+ hns3_err(hw, "failed to reset all queues, ret = %d.",
+ ret);
return ret;
}
}
- ret = hns3_start_rx_queues(hns);
+ ret = hns3_init_rx_queues(hns);
if (ret) {
- hns3_err(hw, "Failed to start rx queues: %d", ret);
+ hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
return ret;
}
- hns3_start_tx_queues(hns);
+ ret = hns3_init_tx_queues(hns);
+ if (ret) {
+ hns3_dev_release_mbufs(hns);
+ hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
+ }
- return 0;
+ return ret;
}
-int
-hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+void
+hns3_start_tqps(struct hns3_hw *hw)
{
- struct hns3_hw *hw = &hns->hw;
- int ret;
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint16_t i;
- hns3_enable_all_queues(hw, false);
- if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
- if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
- return ret;
- }
+ hns3_enable_all_queues(hw, true);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq->enabled)
+ hw->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
}
- return 0;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq->enabled)
+ hw->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+}
+
+void
+hns3_stop_tqps(struct hns3_hw *hw)
+{
+ uint16_t i;
+
+ hns3_enable_all_queues(hw, false);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++)
+ hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++)
+ hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
/*
rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
q_info->idx);
return NULL;
}
rx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
q_info->idx);
hns3_rx_queue_release(rxq);
return NULL;
rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
rxq->rx_ring_phys_addr = rx_mz->iova;
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
rxq->rx_ring_phys_addr);
return rxq;
q_info.ring_name = "rx_fake_ring";
rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
return -ENOMEM;
}
txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
q_info->idx);
return NULL;
}
tx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
q_info->idx);
hns3_tx_queue_release(txq);
return NULL;
txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
txq->tx_ring_phys_addr = tx_mz->iova;
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
txq->tx_ring_phys_addr);
/* Clear tx bd */
q_info.ring_name = "tx_fake_ring";
txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
return -ENOMEM;
}
{
uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
void **rxq;
- uint8_t i;
+ uint16_t i;
if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
/* first time configuration */
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
{
uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
void **txq;
- uint8_t i;
+ uint16_t i;
if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
/* first time configuration */
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
uint16_t q;
int ret;
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
+ return 0;
+
/* Setup new number of fake RX/TX queues and reconfigure device. */
- hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
if (ret) {
hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
- goto cfg_fake_rx_q_fail;
+ return ret;
}
ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
(void)hns3_fake_tx_queue_config(hw, 0);
cfg_fake_tx_q_fail:
(void)hns3_fake_rx_queue_config(hw, 0);
-cfg_fake_rx_q_fail:
- hw->cfg_max_queues = 0;
return ret;
}
if (dev_data->rx_queues)
for (i = 0; i < dev_data->nb_rx_queues; i++) {
rxq = dev_data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (rxq == NULL)
continue;
hns3_rx_queue_release_mbufs(rxq);
}
if (dev_data->tx_queues)
for (i = 0; i < dev_data->nb_tx_queues; i++) {
txq = dev_data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (txq == NULL)
continue;
hns3_tx_queue_release_mbufs(txq);
}
vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
-
if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
return -EINVAL;
return 0;
}
+static int
+hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
+ uint16_t nb_desc)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
+ uint16_t min_vec_bds;
+
+ /*
+ * HNS3 hardware network engine set scattered as default. If the driver
+ * is not work in scattered mode and the pkts greater than buf_size
+ * but smaller than frame size will be distributed to multiple BDs.
+ * Driver cannot handle this situation.
+ */
+ if (!hw->data->scattered_rx && frame_size > buf_size) {
+ hns3_err(hw, "frame size is not allowed to be set greater "
+ "than rx_buf_len if scattered is off.");
+ return -EINVAL;
+ }
+
+ if (pkt_burst == hns3_recv_pkts_vec) {
+ min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
+ HNS3_DEFAULT_RX_BURST;
+ if (nb_desc < min_vec_bds ||
+ nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
+ hns3_err(hw, "if Rx burst mode is vector, "
+ "number of descriptor is required to be "
+ "bigger than min vector bds:%u, and could be "
+ "divided by rxq rearm thresh:%u.",
+ min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int
hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp, uint16_t nb_desc,
uint16_t *buf_size)
{
+ int ret;
+
if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
nb_desc % HNS3_ALIGN_RING_DESC) {
hns3_err(hw, "Number (%u) of rx descriptors is invalid",
return -EINVAL;
}
+ if (hw->data->dev_started) {
+ ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
+ if (ret) {
+ hns3_err(hw, "Rx queue runtime setup fail.");
+ return ret;
+ }
+ }
+
return 0;
}
+uint32_t
+hns3_get_tqp_reg_offset(uint16_t queue_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queue > 1024 */
+ if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
+ reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
+ else
+ reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
+ (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
+ HNS3_TQP_REG_SIZE;
+
+ return reg_offset;
+}
+
int
hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_rxconf *conf,
int rx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "rx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
if (ret)
return ret;
rxq->mb_pool = mp;
rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
+
rxq->rx_deferred_start = conf->rx_deferred_start;
+ if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ rxq->rx_deferred_start = false;
+ }
rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
sizeof(struct hns3_entry);
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
rxq->port_id = dev->data->port_id;
- rxq->pvid_state = hw->port_base_vlan_cfg.state;
+ /*
+ * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+ * the pvid_sw_discard_en in the queue struct should not be changed,
+ * because PVID-related operations do not need to be processed by PMD
+ * driver. For hns3 VF device, whether it needs to process PVID depends
+ * on the configuration of PF kernel mode netdevice driver. And the
+ * related PF configuration is delivered through the mailbox and finally
+ * reflectd in port_base_vlan_cfg.
+ */
+ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
+ HNS3_PORT_BASE_VLAN_ENABLE;
+ else
+ rxq->pvid_sw_discard_en = false;
+ rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ rxq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
- rxq->l2_errors = 0;
- rxq->pkt_len_errors = 0;
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
+ memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
+ memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
+ memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->rx_buf_len);
}
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
- dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
+ dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
dev->data->scattered_rx = true;
}
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L2_ETHER_VLAN,
- RTE_PTYPE_L2_ETHER_QINQ,
RTE_PTYPE_L2_ETHER_LLDP,
RTE_PTYPE_L2_ETHER_ARP,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_UNKNOWN
+ };
+ static const uint32_t adv_layout_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_IGMP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_ICMP,
RTE_PTYPE_UNKNOWN
};
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec)
- return ptypes;
+ dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
+ return adv_layout_ptypes;
+ else
+ return ptypes;
+ }
return NULL;
}
-void
-hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
+static void
+hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
{
- struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_ptype_table *tbl = &hns->ptype_tbl;
-
- memset(tbl, 0, sizeof(*tbl));
-
- tbl->l2table[0] = RTE_PTYPE_L2_ETHER;
- tbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;
- tbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;
- tbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;
-
- tbl->l3table[0] = RTE_PTYPE_L3_IPV4;
- tbl->l3table[1] = RTE_PTYPE_L3_IPV6;
+ tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
- tbl->l3table[3] = RTE_PTYPE_L2_ETHER;
- tbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;
- tbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;
+ tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
+ tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
tbl->l4table[0] = RTE_PTYPE_L4_UDP;
tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
+}
- tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
- tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
- tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
-
- tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
- tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
- tbl->inner_l3table[2] = 0;
- tbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;
- tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
- tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
+static void
+hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
+{
+ tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4;
+ tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6;
+ /* There is not a ptype for inner ARP/RARP */
+ tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
+ tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
+ tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT;
+ tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT;
tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
- tbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;
+ /* There is not a ptype for inner GRE */
+ tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
- tbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;
+ /* There is not a ptype for inner IGMP */
+ tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
- tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
- tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
- tbl->ol3table[2] = 0;
- tbl->ol3table[3] = 0;
- tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
- tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
+ tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+ tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
+ tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
+ tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
+ tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
- tbl->ol4table[0] = 0;
- tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
+ tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
+ tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
+static void
+hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
+{
+ uint32_t *ptype = tbl->ptype;
+
+ /* Non-tunnel L2 */
+ ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
+ ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
+ ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
+
+ /* Non-tunnel IPv4 */
+ ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
+ ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* The next ptype is PTP over IPv4 + UDP */
+ ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* Non-tunnel IPv6 */
+ ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
+ ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP;
+ ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_IGMP;
+ ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP;
+ /* Special for PTP over IPv6 + UDP */
+ ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT;
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP;
+ ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP;
+ /* The next ptype's inner L4 is IGMP */
+ ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP;
+}
+
+void
+hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_ptype_table *tbl = &hns->ptype_tbl;
+
+ memset(tbl, 0, sizeof(*tbl));
+
+ hns3_init_non_tunnel_ptype_tbl(tbl);
+ hns3_init_tunnel_ptype_tbl(tbl);
+ hns3_init_adv_layout_ptype(tbl);
+}
+
static inline void
hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
uint32_t l234_info, const struct hns3_desc *rxd)
};
strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S);
- report_mode = report_type[rxq->pvid_state][strip_status];
+ report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
switch (report_mode) {
case HNS3_NO_STRP_VLAN_VLD:
mb->vlan_tci = 0;
return;
case HNS3_INNER_STRP_VLAN_VLD:
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
return;
case HNS3_OUTER_STRP_VLAN_VLD:
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
return;
+ default:
+ mb->vlan_tci = 0;
+ return;
}
}
return rte_mbuf_raw_alloc(rxq->mb_pool);
}
+static inline void
+hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+ volatile struct hns3_desc *rxd)
+{
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+ uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
+
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
+ if (hns3_timestamp_rx_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = timestamp;
+ mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
+ }
+
+ pf->rx_timestamp = timestamp;
+}
+
uint16_t
-hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+hns3_recv_pkts_simple(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
volatile struct hns3_desc *rxdp; /* pointer of the current desc */
struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t ol_info;
uint64_t dma_addr;
}
rxm = rxe->mbuf;
+ rxm->ol_flags = 0;
rxe->mbuf = nmb;
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
+
dma_addr = rte_mbuf_data_iova_default(nmb);
rxdp->addr = rte_cpu_to_le_64(dma_addr);
rxdp->rx.bd_base_info = 0;
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- rxm->ol_flags = PKT_RX_RSS_HASH;
+ rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
- rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
rxm->nb_segs = 1;
rxm->next = NULL;
/* Load remained descriptor data and extract necessary fields */
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
- ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
if (unlikely(ret))
goto pkt_err;
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
- cksum_err);
+ if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += rxm->pkt_len;
+
rx_pkts[nb_rx++] = rxm;
continue;
pkt_err:
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t gro_size;
uint32_t ol_info;
continue;
}
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
+
/*
* The last buffer of the received packet. packet len from
* buffer description may contains CRC len, packet len should
first_seg->port = rxq->port_id;
first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- first_seg->ol_flags = PKT_RX_RSS_HASH;
+ first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
first_seg->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
- first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
if (gro_size != 0) {
- first_seg->ol_flags |= PKT_RX_LRO;
+ first_seg->ol_flags |= RTE_MBUF_F_RX_LRO;
first_seg->tso_segsz = gro_size;
}
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
- l234_info, &cksum_err);
+ l234_info);
if (unlikely(ret))
goto pkt_err;
first_seg->packet_type = hns3_rx_calc_ptype(rxq,
l234_info, ol_info);
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(first_seg,
- first_seg->packet_type,
- cksum_err);
+ if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
+ /* Increment bytes counter */
+ rxq->basic_stats.bytes += first_seg->pkt_len;
+
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
continue;
uint16_t __rte_weak
hns3_recv_pkts_vec(__rte_unused void *tx_queue,
- __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
+uint16_t __rte_weak
+hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
int
hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
eth_rx_burst_t pkt_burst;
const char *info;
} burst_infos[] = {
- { hns3_recv_pkts, "Scalar" },
+ { hns3_recv_pkts_simple, "Scalar Simple" },
{ hns3_recv_scattered_pkts, "Scalar Scattered" },
- { hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec_sve, "Vector Sve" },
};
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
return ret;
}
+static bool
+hns3_get_default_vec_support(void)
+{
+#if defined(RTE_ARCH_ARM64)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ return true;
+#endif
+ return false;
+}
+
+static bool
+hns3_get_sve_support(void)
+{
+#if defined(RTE_HAS_SVE_ACLE)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
+ return true;
+#endif
+ return false;
+}
+
static eth_rx_burst_t
hns3_get_rx_function(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
+ bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support;
- if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
- return hns3_recv_pkts_vec;
+ vec_support = hns3_rx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
+ simple_allowed = !dev->data->scattered_rx &&
+ (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
- if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
- (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
- return hns3_recv_pkts;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_recv_pkts_vec_sve;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
+ return hns3_recv_pkts_simple;
+ if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
+ return hns3_recv_scattered_pkts;
+
+ if (vec_allowed)
+ return hns3_recv_pkts_vec;
+ if (simple_allowed)
+ return hns3_recv_pkts_simple;
return hns3_recv_scattered_pkts;
}
if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
- hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
- "(%d) of tx descriptors for port=%d queue=%d check "
+ hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+ "(%u) of tx descriptors for port=%u queue=%u check "
"fail!",
rs_thresh, free_thresh, nb_desc, hw->data->port_id,
idx);
return 0;
}
+static void *
+hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000
+#define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64
+#define HNS3_TX_PUSH_PCI_BAR_INDEX 4
+
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
+
+ /*
+ * If device support Tx push then its PCIe bar45 must exist, and DPDK
+ * framework will mmap the bar45 default in PCI probe stage.
+ *
+ * In the bar45, the first half is for RoCE (RDMA over Converged
+ * Ethernet), and the second half is for NIC, every TQP occupy 64KB.
+ *
+ * The quick doorbell located at 64B offset in the TQP region.
+ */
+ return (char *)pci_dev->mem_resource[bar_id].addr +
+ (pci_dev->mem_resource[bar_id].len >> 1) +
+ HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
+ HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
+}
+
+void
+hns3_tx_push_init(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ volatile uint32_t *reg;
+ uint32_t val;
+
+ if (!hns3_dev_get_support(hw, TX_PUSH))
+ return;
+
+ reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
+ /*
+ * Because the size of bar45 is about 8GB size, it may take a long time
+ * to do the page fault in Tx process when work with vfio-pci, so use
+ * one read operation to make kernel setup page table mapping for bar45
+ * in the init stage.
+ * Note: the bar45 is readable but the result is all 1.
+ */
+ val = *reg;
+ RTE_SET_USED(val);
+}
+
+static void
+hns3_tx_push_queue_init(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ struct hns3_tx_queue *txq)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hns3_dev_get_support(hw, TX_PUSH)) {
+ txq->tx_push_enable = false;
+ return;
+ }
+
+ txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
+ queue_id);
+ txq->tx_push_enable = true;
+}
+
int
hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_txconf *conf)
int tx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "tx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
&tx_rs_thresh, &tx_free_thresh, idx);
if (ret)
}
txq->tx_deferred_start = conf->tx_deferred_start;
+ if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ txq->tx_deferred_start = false;
+ }
+
tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
RTE_CACHE_LINE_SIZE, socket_id);
}
txq->port_id = dev->data->port_id;
- txq->pvid_state = hw->port_base_vlan_cfg.state;
+ /*
+ * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+ * the pvid_sw_shift_en in the queue struct should not be changed,
+ * because PVID-related operations do not need to be processed by PMD
+ * driver. For hns3 VF device, whether it needs to process PVID depends
+ * on the configuration of PF kernel mode netdev driver. And the
+ * related PF configuration is delivered through the mailbox and finally
+ * reflectd in port_base_vlan_cfg.
+ */
+ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
+ HNS3_PORT_BASE_VLAN_ENABLE;
+ else
+ txq->pvid_sw_shift_en = false;
+ txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
txq->configured = true;
- txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
- idx * HNS3_TQP_REG_SIZE);
+ txq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
+ txq->tso_mode = hw->tso_mode;
+ txq->udp_cksum_mode = hw->udp_cksum_mode;
+ txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
+ memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
+ memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+
+ /*
+ * Call hns3_tx_push_queue_init after assigned io_tail_reg field because
+ * it may overwrite the io_tail_reg field.
+ */
+ hns3_tx_push_queue_init(dev, idx, txq);
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
return 0;
}
-static void
+static int
hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
{
uint16_t tx_next_clean = txq->next_to_clean;
- uint16_t tx_next_use = txq->next_to_use;
- uint16_t tx_bd_ready = txq->tx_bd_ready;
- uint16_t tx_bd_max = txq->nb_tx_desc;
- struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
+ uint16_t tx_next_use = txq->next_to_use;
+ struct hns3_entry *tx_entry = &txq->sw_ring[tx_next_clean];
struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
- struct rte_mbuf *mbuf;
+ int i;
- while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
- rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
- tx_next_use != tx_next_clean) {
- mbuf = tx_bak_pkt->mbuf;
- if (mbuf) {
- rte_pktmbuf_free_seg(mbuf);
- tx_bak_pkt->mbuf = NULL;
- }
+ if (tx_next_use >= tx_next_clean &&
+ tx_next_use < tx_next_clean + txq->tx_rs_thresh)
+ return -1;
- desc++;
- tx_bak_pkt++;
- tx_next_clean++;
- tx_bd_ready++;
-
- if (tx_next_clean >= tx_bd_max) {
- tx_next_clean = 0;
- desc = txq->tx_ring;
- tx_bak_pkt = txq->sw_ring;
- }
+ /*
+ * All mbufs can be released only when the VLD bits of all
+ * descriptors in a batch are cleared.
+ */
+ for (i = 0; i < txq->tx_rs_thresh; i++) {
+ if (desc[i].tx.tp_fe_sc_vld_ra_ri &
+ rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
+ return -1;
}
- txq->next_to_clean = tx_next_clean;
- txq->tx_bd_ready = tx_bd_ready;
-}
+ for (i = 0; i < txq->tx_rs_thresh; i++) {
+ rte_pktmbuf_free_seg(tx_entry[i].mbuf);
+ tx_entry[i].mbuf = NULL;
+ }
-static int
-hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
- struct rte_mbuf *rxm, uint8_t *l2_len)
-{
- uint64_t tun_flags;
- uint8_t ol4_len;
- uint32_t otmp;
+ /* Update numbers of available descriptor due to buffer freed */
+ txq->tx_bd_ready += txq->tx_rs_thresh;
+ txq->next_to_clean += txq->tx_rs_thresh;
+ if (txq->next_to_clean >= txq->nb_tx_desc)
+ txq->next_to_clean = 0;
- tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
- if (tun_flags == 0)
- return 0;
+ return 0;
+}
- otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
- switch (tun_flags) {
- case PKT_TX_TUNNEL_GENEVE:
- case PKT_TX_TUNNEL_VXLAN:
- *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
- break;
- case PKT_TX_TUNNEL_GRE:
- /*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(GRE) length and tunneling length.
- */
- ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S);
- *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
- break;
- default:
- /* For non UDP / GRE tunneling, drop the tunnel packet */
- return -EINVAL;
+static inline int
+hns3_tx_free_required_buffer(struct hns3_tx_queue *txq, uint16_t required_bds)
+{
+ while (required_bds > txq->tx_bd_ready) {
+ if (hns3_tx_free_useless_buffer(txq) != 0)
+ return -1;
}
- hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
- desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
-
return 0;
}
int ret;
offloads = hw->data->dev_conf.rxmode.offloads;
- gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
static inline bool
hns3_pkt_is_tso(struct rte_mbuf *m)
{
- return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+ return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
}
static void
-hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
- uint32_t paylen, struct rte_mbuf *rxm)
+hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
{
- uint8_t l2_len = rxm->l2_len;
- uint32_t tmp;
-
if (!hns3_pkt_is_tso(rxm))
return;
- if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
- return;
-
if (paylen <= rxm->tso_segsz)
return;
- tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
- hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
- hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
- hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- l2_len >> HNS3_L2_LEN_UNIT);
- desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
+ desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
}
{
desc->addr = rte_mbuf_data_iova(rxm);
desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
- desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
+ desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
}
static void
uint32_t paylen;
hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
- hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
- desc->tx.paylen = rte_cpu_to_le_32(paylen);
- hns3_set_tso(desc, ol_flags, paylen, rxm);
+ desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
+ hns3_set_tso(desc, paylen, rxm);
/*
* Currently, hardware doesn't support more than two layers VLAN offload
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
- if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
- PKT_TX_QINQ_PKT)) {
+ if (!txq->pvid_sw_shift_en &&
+ ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
- if (ol_flags & PKT_TX_QINQ_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_QINQ)
desc->tx.outer_vlan_tag =
rte_cpu_to_le_16(rxm->vlan_tci_outer);
else
rte_cpu_to_le_16(rxm->vlan_tci);
}
- if (ol_flags & PKT_TX_QINQ_PKT ||
- ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ ||
+ ((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
+
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ desc->tx.tp_fe_sc_vld_ra_ri |=
+ rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
static inline int
}
static int
-hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt)
+hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
+ uint8_t max_non_tso_bd_num)
{
struct rte_mempool *mb_pool;
struct rte_mbuf *new_mbuf;
mb_pool = tx_pkt->pool;
buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
- if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
+ if (nb_new_buf > max_non_tso_bd_num)
return -EINVAL;
last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
}
static void
-hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
+hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
{
uint32_t tmp = *ol_type_vlan_len_msec;
+ uint64_t ol_flags = m->ol_flags;
/* (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IPV4) {
- /* OL3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- hns3_set_field(tmp, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
else
- hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
- } else if (ol_flags & PKT_TX_OUTER_IPV6) {
- hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV6);
- /* OL3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
+ } else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
+ tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
}
-
+ /* OL3 header size, defined in 4 bytes */
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+ m->outer_l3_len >> HNS3_L3_LEN_UNIT);
*ol_type_vlan_len_msec = tmp;
}
static int
-hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
- struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
+ uint32_t *type_cs_vlan_tso_len)
{
- uint32_t tmp = *ol_type_vlan_len_msec;
- uint8_t l4_len;
-
- /* OL2 header size, defined in 2 bytes */
- hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
-
- /* L4TUNT: L4 Tunneling Type */
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_GENEVE:
- case PKT_TX_TUNNEL_VXLAN:
- /* MAC in UDP tunnelling packet, include VxLAN */
- hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+#define HNS3_NVGRE_HLEN 8
+ uint32_t tmp_outer = *ol_type_vlan_len_msec;
+ uint32_t tmp_inner = *type_cs_vlan_tso_len;
+ uint64_t ol_flags = m->ol_flags;
+ uint16_t inner_l2_len;
+
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
/*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(UDP) length and tunneling length.
+ * The inner l2 length of mbuf is the sum of outer l4 length,
+ * tunneling header length and inner l2 length for a tunnel
+ * packect. But in hns3 tx descriptor, the tunneling header
+ * length is contained in the field of outer L4 length.
+ * Therefore, driver need to calculate the outer L4 length and
+ * inner L2 length.
*/
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- (uint8_t)RTE_ETHER_VXLAN_HLEN >>
- HNS3_L4_LEN_UNIT);
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (uint8_t)RTE_ETHER_VXLAN_HLEN >>
+ HNS3_L4_LEN_UNIT);
+
+ inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
break;
- case PKT_TX_TUNNEL_GRE:
- hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
- * OL4 header size, defined in 4 Bytes, it contains outer
- * L4(GRE) length and tunneling length.
+ * For NVGRE tunnel packect, the outer L4 is empty. So only
+ * fill the NVGRE header length to the outer L4 field.
*/
- l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- l4_len >> HNS3_L4_LEN_UNIT);
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
+
+ inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
break;
default:
/* For non UDP / GRE tunneling, drop the tunnel packet */
return -EINVAL;
}
- *ol_type_vlan_len_msec = tmp;
+ tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ inner_l2_len >> HNS3_L2_LEN_UNIT);
+ /* OL2 header size, defined in 2 bytes */
+ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+ m->outer_l2_len >> HNS3_L2_LEN_UNIT);
+
+ *type_cs_vlan_tso_len = tmp_inner;
+ *ol_type_vlan_len_msec = tmp_outer;
return 0;
}
static int
-hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- uint64_t ol_flags,
- struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+ uint16_t tx_desc_id)
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
- uint32_t value = 0;
+ uint64_t ol_flags = m->ol_flags;
+ uint32_t tmp_outer = 0;
+ uint32_t tmp_inner = 0;
+ uint32_t tmp_ol4cs;
int ret;
- hns3_parse_outer_params(ol_flags, &value);
- ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
- if (ret)
- return -EINVAL;
+ /*
+ * The tunnel header is contained in the inner L2 header field of the
+ * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
+ * there is a need that switching between them. To avoid multiple
+ * calculations, the length of the L2 header include the outer and
+ * inner, will be filled during the parsing of tunnel packects.
+ */
+ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
+ /*
+ * For non tunnel type the tunnel type id is 0, so no need to
+ * assign a value to it. Only the inner(normal) L2 header length
+ * is assigned.
+ */
+ tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
+ } else {
+ /*
+ * If outer csum is not offload, the outer length may be filled
+ * with 0. And the length of the outer header is added to the
+ * inner l2_len. It would lead a cksum error. So driver has to
+ * calculate the header length.
+ */
+ if (unlikely(!(ol_flags &
+ (RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
+ m->outer_l2_len == 0)) {
+ struct rte_net_hdr_lens hdr_len;
+ (void)rte_net_get_ptype(m, &hdr_len,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+ m->outer_l3_len = hdr_len.l3_len;
+ m->outer_l2_len = hdr_len.l2_len;
+ m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
+ }
+ hns3_parse_outer_params(m, &tmp_outer);
+ ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
+ if (ret)
+ return -EINVAL;
+ }
- desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
+ desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
+ desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
+ tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ?
+ BIT(HNS3_TXD_OL4CS_B) : 0;
+ desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
return 0;
}
static void
-hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
{
+ uint64_t ol_flags = m->ol_flags;
+ uint32_t l3_type;
uint32_t tmp;
+ tmp = *type_cs_vlan_tso_len;
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
+ l3_type = HNS3_L3T_IPV4;
+ else if (ol_flags & RTE_MBUF_F_TX_IPV6)
+ l3_type = HNS3_L3T_IPV6;
+ else
+ l3_type = HNS3_L3T_NONE;
+
+ /* inner(/normal) L3 header size, defined in 4 bytes */
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+ m->l3_len >> HNS3_L3_LEN_UNIT);
+
+ tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
+
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IPV4) {
- tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
- HNS3_L3T_IPV4);
- /* inner(/normal) L3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
- if (ol_flags & PKT_TX_IP_CKSUM)
- hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
- *type_cs_vlan_tso_len = tmp;
- } else if (ol_flags & PKT_TX_IPV6) {
- tmp = *type_cs_vlan_tso_len;
- /* L3T, IPv6 don't do checksum */
- hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
- HNS3_L3T_IPV6);
- /* inner(/normal) L3 header size, defined in 4 bytes */
- hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
- sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
- }
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ tmp |= BIT(HNS3_TXD_L3CS_B);
+ *type_cs_vlan_tso_len = tmp;
}
static void
-hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
{
+ uint64_t ol_flags = m->ol_flags;
uint32_t tmp;
-
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) {
+ case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
tmp = *type_cs_vlan_tso_len;
- hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
- hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
- hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
- *type_cs_vlan_tso_len = tmp;
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
- break;
+ return;
}
+ tmp |= BIT(HNS3_TXD_L4CS_B);
+ tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ m->l4_len >> HNS3_L4_LEN_UNIT);
+ *type_cs_vlan_tso_len = tmp;
}
static void
-hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- uint64_t ol_flags)
+hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+ uint16_t tx_desc_id)
{
struct hns3_desc *tx_ring = txq->tx_ring;
struct hns3_desc *desc = &tx_ring[tx_desc_id];
uint32_t value = 0;
- /* inner(/normal) L2 header size, defined in 2 bytes */
- hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
- sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
-
- hns3_parse_l3_cksum_params(ol_flags, &value);
- hns3_parse_l4_cksum_params(ol_flags, &value);
+ hns3_parse_l3_cksum_params(m, &value);
+ hns3_parse_l4_cksum_params(m, &value);
desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
}
static bool
-hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
+hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
+ uint32_t max_non_tso_bd_num)
{
struct rte_mbuf *m_first = tx_pkts;
struct rte_mbuf *m_last = tx_pkts;
* frags greater than gso header len + mss, and the remaining 7
* consecutive frags greater than MSS except the last 7 frags.
*/
- if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
+ if (bd_num <= max_non_tso_bd_num)
return false;
- for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
+ for (i = 0; m_last && i < max_non_tso_bd_num - 1;
i++, m_last = m_last->next)
tot_len += m_last->data_len;
/* ensure the first 8 frags is greater than mss + header */
hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
- hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
return true;
* ensure the sum of the data length of every 7 consecutive buffer
* is greater than mss except the last one.
*/
- for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
+ for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
tot_len -= m_first->data_len;
tot_len += m_last->data_len;
return false;
}
+static bool
+hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv4_hdr->next_proto_id;
+ return false;
+}
+
+static bool
+hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
+ uint32_t *l4_proto)
+{
+ struct rte_ipv6_hdr *ipv6_hdr;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->outer_l2_len);
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+ struct rte_udp_hdr *udp_hdr;
+ /*
+ * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * header for TSO packets
+ */
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ return true;
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->outer_l2_len + m->outer_l3_len);
+ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+
+ return true;
+ }
+ *l4_proto = ipv6_hdr->proto;
+ return false;
+}
+
static void
hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
{
uint64_t ol_flags = m->ol_flags;
- struct rte_ipv4_hdr *ipv4_hdr;
+ uint32_t paylen, hdr_len, l4_proto;
struct rte_udp_hdr *udp_hdr;
- uint32_t paylen, hdr_len;
- if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
return;
- if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- m->outer_l2_len);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+ if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
+ } else {
+ if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
+ return;
}
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM &&
- ol_flags & PKT_TX_TCP_SEG) {
+ /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
+ if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
hdr_len = m->l2_len + m->l3_len + m->l4_len;
- hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
- m->outer_l2_len + m->outer_l3_len : 0;
+ hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
if (paylen <= m->tso_segsz)
return;
return -EINVAL;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
- hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m->outer_l2_len + m->outer_l3_len : 0;
if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
return -EINVAL;
struct rte_ether_hdr *eh;
struct rte_vlan_hdr *vh;
- if (!txq->pvid_state)
+ if (!txq->pvid_sw_shift_en)
return 0;
/*
* implementation function named hns3_prep_pkts to inform users that
* these packets will be discarded.
*/
- if (m->ol_flags & PKT_TX_QINQ_PKT)
+ if (m->ol_flags & RTE_MBUF_F_TX_QINQ)
return -EINVAL;
eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
- if (m->ol_flags & PKT_TX_VLAN_PKT)
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN)
return -EINVAL;
/* Ensure the incoming packet is not a QinQ packet */
}
#endif
-uint16_t
-hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+hns3_udp_cksum_help(struct rte_mbuf *m)
{
- struct rte_mbuf *m;
- uint16_t i;
- int ret;
+ uint64_t ol_flags = m->ol_flags;
+ uint16_t cksum = 0;
+ uint32_t l4_len;
- for (i = 0; i < nb_pkts; i++) {
- m = tx_pkts[i];
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+ struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
+ } else {
+ struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv6_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
+ }
- if (hns3_pkt_is_tso(m) &&
- (hns3_pkt_need_linearized(m, m->nb_segs) ||
- hns3_check_tso_pkt_valid(m))) {
- rte_errno = EINVAL;
- return i;
- }
+ rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
+
+ cksum = ~cksum;
+ /*
+ * RFC 768:If the computed checksum is zero for UDP, it is transmitted
+ * as all ones
+ */
+ if (cksum == 0)
+ cksum = 0xffff;
+
+ return (uint16_t)cksum;
+}
+
+static bool
+hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t dst_port;
+
+ if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
+ ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ||
+ (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM)
+ return true;
+ /*
+ * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
+ * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
+ * offload is set and the tunnel mask has not been set, the CKSUM will
+ * be wrong since the header length is wrong and driver should complete
+ * the CKSUM to avoid CKSUM error.
+ */
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->l2_len + m->l3_len);
+ dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
+ switch (dst_port) {
+ case RTE_VXLAN_DEFAULT_PORT:
+ case RTE_VXLAN_GPE_DEFAULT_PORT:
+ case RTE_GENEVE_DEFAULT_PORT:
+ udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
+ m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK;
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int
+hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
+{
+ int ret;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- ret = rte_validate_tx_offload(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
- }
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
- if (hns3_vld_vlan_chk(tx_queue, m)) {
+ ret = hns3_vld_vlan_chk(tx_queue, m);
+ if (ret != 0) {
+ rte_errno = EINVAL;
+ return ret;
+ }
+#endif
+ if (hns3_pkt_is_tso(m)) {
+ if (hns3_pkt_need_linearized(m, m->nb_segs,
+ tx_queue->max_non_tso_bd_num) ||
+ hns3_check_tso_pkt_valid(m)) {
rte_errno = EINVAL;
- return i;
+ return -EINVAL;
}
-#endif
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
+
+ if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
+ /*
+ * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
+ * hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver
+ * software not need to recalculate it.
+ */
+ hns3_outer_header_cksum_prepare(m);
+ return 0;
}
+ }
- hns3_outer_header_cksum_prepare(m);
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
+
+ if (!hns3_validate_tunnel_cksum(tx_queue, m))
+ return 0;
+
+ hns3_outer_header_cksum_prepare(m);
+
+ return 0;
+}
+
+uint16_t
+hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *m;
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (hns3_prep_pkt_proc(tx_queue, m))
+ return i;
}
return i;
static int
hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
- const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
-{
- /* Fill in tunneling parameters if necessary */
- if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
- (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
- if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
- hdr_lens)) {
- txq->unsupported_tunnel_pkt_cnt++;
- return -EINVAL;
+ struct rte_mbuf *m)
+{
+ struct hns3_desc *tx_ring = txq->tx_ring;
+ struct hns3_desc *desc = &tx_ring[tx_desc_id];
+
+ /* Enable checksum offloading */
+ if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
+ /* Fill in tunneling parameters if necessary */
+ if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
+ txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
+ return -EINVAL;
}
+
+ hns3_txd_enable_checksum(txq, m, tx_desc_id);
+ } else {
+ /* clear the control bit */
+ desc->tx.type_cs_vlan_tso_len = 0;
+ desc->tx.ol_type_vlan_len_msec = 0;
}
- /* Enable checksum offloading */
- if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
- hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
return 0;
}
hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
{
+ uint8_t max_non_tso_bd_num;
struct rte_mbuf *new_pkt;
int ret;
* driver support, the packet will be ignored.
*/
if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
- txq->over_length_pkt_cnt++;
+ txq->dfx_stats.over_length_pkt_cnt++;
return -EINVAL;
}
- if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
- txq->exceed_limit_bd_pkt_cnt++;
- ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt);
+ max_non_tso_bd_num = txq->max_non_tso_bd_num;
+ if (unlikely(nb_buf > max_non_tso_bd_num)) {
+ txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
+ ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
+ max_non_tso_bd_num);
if (ret) {
- txq->exceed_limit_bd_reassem_fail++;
+ txq->dfx_stats.exceed_limit_bd_reassem_fail++;
return ret;
}
*m_seg = new_pkt;
tx_entry = &txq->sw_ring[txq->next_to_clean];
+ if (txq->mbuf_fast_free_en) {
+ rte_mempool_put_bulk(tx_entry->mbuf->pool,
+ (void **)tx_entry, txq->tx_rs_thresh);
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ tx_entry[i].mbuf = NULL;
+ goto update_field;
+ }
+
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((tx_entry + i)->mbuf);
for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
tx_entry->mbuf = NULL;
}
+update_field:
txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
txq->tx_bd_ready += txq->tx_rs_thresh;
}
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
dma_addr = rte_mbuf_data_iova(*pkts);
txdp->addr = rte_cpu_to_le_64(dma_addr);
txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
- txdp->tx.paylen = 0;
+ txdp->tx.paylen_fd_dop_ol4cs = 0;
txdp->tx.type_cs_vlan_tso_len = 0;
txdp->tx.ol_type_vlan_len_msec = 0;
txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
hns3_tx_setup_4bd(txdp + i, pkts + i);
+
+ /* Increment bytes counter */
+ uint32_t j;
+ for (j = 0; j < PER_LOOP_NUM; j++)
+ txq->basic_stats.bytes += pkts[i + j]->pkt_len;
}
if (unlikely(leftover > 0)) {
for (i = 0; i < leftover; i++) {
pkts + mainpart + i);
hns3_tx_setup_1bd(txdp + mainpart + i,
pkts + mainpart + i);
+
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
}
}
}
nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_pkts == 0)) {
if (txq->tx_bd_ready == 0)
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
txq->next_to_use += nb_pkts - nb_tx;
- hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
+ hns3_write_txq_tail_reg(txq, nb_pkts);
return nb_pkts;
}
uint16_t
hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct rte_net_hdr_lens hdr_lens = {0};
struct hns3_tx_queue *txq = tx_queue;
struct hns3_entry *tx_bak_pkt;
struct hns3_desc *tx_ring;
uint16_t nb_tx;
uint16_t i;
- /* free useless buffer */
- hns3_tx_free_useless_buffer(txq);
+ if (txq->tx_bd_ready < txq->tx_free_thresh)
+ (void)hns3_tx_free_useless_buffer(txq);
tx_next_use = txq->next_to_use;
tx_bd_max = txq->nb_tx_desc;
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
- txq->queue_full_cnt++;
- if (nb_tx == 0)
- return 0;
-
- goto end_of_tx;
+ /* Try to release the required MBUF, but avoid releasing
+ * all MBUFs, otherwise, the MBUFs will be released for
+ * a long time and may cause jitter.
+ */
+ if (hns3_tx_free_required_buffer(txq, nb_buf) != 0) {
+ txq->dfx_stats.queue_full_cnt++;
+ goto end_of_tx;
+ }
}
/*
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
if (appended == NULL) {
- txq->pkt_padding_fail_cnt++;
+ txq->dfx_stats.pkt_padding_fail_cnt++;
break;
}
if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
goto end_of_tx;
- if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
+ if (hns3_parse_cksum(txq, tx_next_use, m_seg))
goto end_of_tx;
i = 0;
desc->tx.tp_fe_sc_vld_ra_ri |=
rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
+ /* Increment bytes counter */
+ txq->basic_stats.bytes += tx_pkt->pkt_len;
nb_hold += i;
txq->next_to_use = tx_next_use;
txq->tx_bd_ready -= i;
end_of_tx:
if (likely(nb_tx))
- hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
+ hns3_write_txq_tail_reg(txq, nb_hold);
return nb_tx;
}
return 0;
}
+uint16_t __rte_weak
+hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int
hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
info = "Scalar";
else if (pkt_burst == hns3_xmit_pkts_vec)
info = "Vector Neon";
+ else if (pkt_burst == hns3_xmit_pkts_vec_sve)
+ info = "Vector Sve";
if (info == NULL)
return -EINVAL;
return 0;
}
-static eth_tx_burst_t
-hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
+static bool
+hns3_tx_check_simple_support(struct rte_eth_dev *dev)
{
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_get_support(hw, PTP))
+ return false;
+
+ return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
+}
+
+static bool
+hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
+{
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_SET_USED(dev);
+ /* always perform tx_prepare when debug */
+ return true;
+#else
+#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
+
+ uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
+ if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
+ return true;
+
+ return false;
+#endif
+}
+
+eth_tx_burst_t
+hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
+{
struct hns3_adapter *hns = dev->data->dev_private;
+ bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support, tx_prepare_needed;
+
+ vec_support = hns3_tx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
+ simple_allowed = hns3_tx_check_simple_support(dev);
+ tx_prepare_needed = hns3_get_tx_prep_needed(dev);
+
+ *prep = NULL;
- if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
- *prep = NULL;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
return hns3_xmit_pkts_vec;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
+ return hns3_xmit_pkts_vec_sve;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
+ return hns3_xmit_pkts_simple;
+ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
+ return hns3_xmit_pkts;
}
- if (hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
- *prep = NULL;
+ if (vec_allowed)
+ return hns3_xmit_pkts_vec;
+ if (simple_allowed)
return hns3_xmit_pkts_simple;
- }
- *prep = hns3_prep_pkts;
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
-static uint16_t
+uint16_t
hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
return 0;
}
+static void
+hns3_trace_rxtx_function(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_burst_mode rx_mode;
+ struct rte_eth_burst_mode tx_mode;
+
+ memset(&rx_mode, 0, sizeof(rx_mode));
+ memset(&tx_mode, 0, sizeof(tx_mode));
+ (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
+ (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
+
+ hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
+ rx_mode.info, tx_mode.info);
+}
+
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct hns3_adapter *hns = eth_dev->data->dev_private;
eth_tx_prep_t prep = NULL;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
+ __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
- eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
+ eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
+ eth_dev->tx_pkt_burst = hw->set_link_down ?
+ hns3_dummy_rxtx_burst :
+ hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
+ eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
+ hns3_trace_rxtx_function(eth_dev);
} else {
eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
- eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
+ eth_dev->tx_pkt_prepare = NULL;
}
}
qinfo->mp = rxq->mb_pool;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->scattered_rx = dev->data->scattered_rx;
+ /* Report the HW Rx buffer length to user */
+ qinfo->rx_buf_size = rxq->rx_buf_len;
/*
* If there are no available Rx buffer descriptors, incoming packets
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ ret = hns3_init_rxq(hns, rx_queue_id);
+ if (ret) {
+ hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ hns3_enable_rxq(rxq, true);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static void
+hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
+{
+ rxq->next_to_use = 0;
+ rxq->rx_rearm_start = 0;
+ rxq->rx_free_hold = 0;
+ rxq->rx_rearm_nb = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
+ hns3_rxq_vec_setup(rxq);
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ hns3_enable_rxq(rxq, false);
+
+ hns3_rx_queue_release_mbufs(rxq);
+
+ hns3_reset_sw_rxq(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+ int ret;
+
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+ tx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ hns3_init_txq(txq);
+ hns3_enable_txq(txq, true);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ hns3_enable_txq(txq, false);
+ hns3_tx_queue_release_mbufs(txq);
+ /*
+ * All the mbufs in sw_ring are released and all the pointers in sw_ring
+ * are set to NULL. If this queue is still called by upper layer,
+ * residual SW status of this txq may cause these pointers in sw_ring
+ * which have been set to NULL to be released again. To avoid it,
+ * reinit the txq.
+ */
+ hns3_init_txq(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+{
+ uint16_t round_free_cnt;
+ uint32_t idx;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ if (txq->tx_rs_thresh == 0)
+ return 0;
+
+ round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh);
+ for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) {
+ if (hns3_tx_free_useless_buffer(txq) != 0)
+ break;
+ }
+
+ return RTE_MIN(idx, free_cnt);
+}
+
+int
+hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+
+ if (dev->tx_pkt_burst == hns3_xmit_pkts)
+ return hns3_tx_done_cleanup_full(q, free_cnt);
+ else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
+ return 0;
+ else
+ return -ENOTSUP;
+}
+
+int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *rxdp;
+ struct hns3_rx_queue *rxq;
+ struct rte_eth_dev *dev;
+ uint32_t bd_base_info;
+ uint16_t desc_id;
+
+ rxq = (struct hns3_rx_queue *)rx_queue;
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+ rxdp = &rxq->rx_ring[desc_id];
+ bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+ dev = &rte_eth_devices[rxq->port_id];
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
+ dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return RTE_ETH_RX_DESC_AVAIL;
+ else
+ return RTE_ETH_RX_DESC_DONE;
+}
+
+int
+hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *txdp;
+ struct hns3_tx_queue *txq;
+ struct rte_eth_dev *dev;
+ uint16_t desc_id;
+
+ txq = (struct hns3_tx_queue *)tx_queue;
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[txq->port_id];
+ if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
+ dev->tx_pkt_burst != hns3_xmit_pkts &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec)
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
+ txdp = &txq->tx_ring[desc_id];
+ if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
+uint32_t
+hns3_rx_queue_count(void *rx_queue)
+{
+ /*
+ * Number of BDs that have been processed by the driver
+ * but have not been notified to the hardware.
+ */
+ uint32_t driver_hold_bd_num;
+ struct hns3_rx_queue *rxq;
+ const struct rte_eth_dev *dev;
+ uint32_t fbd_num;
+
+ rxq = rx_queue;
+ dev = &rte_eth_devices[rxq->port_id];
+
+ fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
+ if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
+ driver_hold_bd_num = rxq->rx_rearm_nb;
+ else
+ driver_hold_bd_num = rxq->rx_free_hold;
+
+ if (fbd_num <= driver_hold_bd_num)
+ return 0;
+ else
+ return fbd_num - driver_hold_bd_num;
+}
+
+void
+hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
+{
+ /*
+ * If the hardware support rxd advanced layout, then driver enable it
+ * default.
+ */
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
+ hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+void
+hns3_stop_tx_datapath(struct rte_eth_dev *dev)
+{
+ dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+ dev->tx_pkt_prepare = NULL;
+ rte_wmb();
+ /* Disable tx datapath on secondary process. */
+ hns3_mp_req_stop_tx(dev);
+ /* Prevent crashes when queues are still in use. */
+ rte_delay_ms(dev->data->nb_tx_queues);
+}
+
+void
+hns3_start_tx_datapath(struct rte_eth_dev *dev)
+{
+ eth_tx_prep_t prep = NULL;
+
+ dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
+ dev->tx_pkt_prepare = prep;
+ hns3_mp_req_start_tx(dev);
+}