#include <rte_net.h>
#include <rte_malloc.h>
#include <rte_pci.h>
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#include <rte_cpuflags.h>
+#endif
#include "hns3_ethdev.h"
#include "hns3_rxtx.h"
if (rxq->rx_rearm_nb == 0) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_ring[i].mbuf != NULL)
+ if (rxq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
}
} else {
for (i = rxq->next_to_use;
i != rxq->rx_rearm_start;
i = (i + 1) % rxq->nb_rx_desc) {
- if (rxq->sw_ring[i].mbuf != NULL)
+ if (rxq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
}
}
{
uint16_t i;
- /* Note: Fake rx queue will not enter here */
+ /* Note: Fake tx queue will not enter here */
if (txq->sw_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
if (txq->sw_ring[i].mbuf) {
}
void
-hns3_update_all_queues_pvid_state(struct hns3_hw *hw)
+hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
{
uint16_t nb_rx_q = hw->data->nb_rx_queues;
uint16_t nb_tx_q = hw->data->nb_tx_queues;
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
- int pvid_state;
+ bool pvid_en;
int i;
- pvid_state = hw->port_base_vlan_cfg.state;
+ pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
for (i = 0; i < hw->cfg_max_queues; i++) {
if (i < nb_rx_q) {
rxq = hw->data->rx_queues[i];
if (rxq != NULL)
- rxq->pvid_state = pvid_state;
+ rxq->pvid_sw_discard_en = pvid_en;
}
if (i < nb_tx_q) {
txq = hw->data->tx_queues[i];
if (txq != NULL)
- txq->pvid_state = pvid_state;
+ txq->pvid_sw_shift_en = pvid_en;
}
}
}
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
uint32_t rcb_reg;
+ void *tqp_base;
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (i < nb_rx_q)
- rxq = hw->data->rx_queues[i];
- else
- rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
- if (i < nb_tx_q)
- txq = hw->data->tx_queues[i];
- else
- txq = hw->fkq_data.tx_queues[i - nb_tx_q];
- if (rxq == NULL || txq == NULL ||
- (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
- continue;
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
+ txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+ /*
+ * After initialization, rxq and txq won't be NULL at
+ * the same time.
+ */
+ if (rxq != NULL)
+ tqp_base = rxq->io_base;
+ else if (txq != NULL)
+ tqp_base = txq->io_base;
+ else
+ return;
+ } else {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
+ hw->fkq_data.rx_queues[i - nb_rx_q];
- rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
+ tqp_base = rxq->io_base;
+ }
+ /*
+ * This is the master switch that used to control the enabling
+ * of a pair of Tx and Rx queues. Both the Rx and Tx point to
+ * the same register
+ */
+ rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
if (en)
rcb_reg |= BIT(HNS3_RING_EN_B);
else
rcb_reg &= ~BIT(HNS3_RING_EN_B);
- hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
+ hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
+ }
+}
+
+static void
+hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
+{
+ struct hns3_hw *hw = &txq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
+ }
+ txq->enabled = en;
+}
+
+static void
+hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
+{
+ struct hns3_hw *hw = &rxq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
+ }
+ rxq->enabled = en;
+}
+
+int
+hns3_start_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ goto start_txqs_fail;
+ }
+ /*
+ * Tx queue is enabled by default. Therefore, the Tx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (txq->tx_deferred_start)
+ hns3_enable_txq(txq, false);
+ else
+ hns3_enable_txq(txq, true);
+ }
+ return 0;
+
+start_txqs_fail:
+ for (j = 0; j < i; j++) {
+ txq = hw->data->tx_queues[j];
+ hns3_enable_txq(txq, false);
+ }
+ return -EINVAL;
+}
+
+int
+hns3_start_all_rxqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto start_rxqs_fail;
+ }
+ /*
+ * Rx queue is enabled by default. Therefore, the Rx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (rxq->rx_deferred_start)
+ hns3_enable_rxq(rxq, false);
+ else
+ hns3_enable_rxq(rxq, true);
+ }
+ return 0;
+
+start_rxqs_fail:
+ for (j = 0; j < i; j++) {
+ rxq = hw->data->rx_queues[j];
+ hns3_enable_rxq(rxq, false);
+ }
+ return -EINVAL;
+}
+
+void
+hns3_stop_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq)
+ continue;
+ hns3_enable_txq(txq, false);
}
}
req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
req->stream_id = 0;
hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
-
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
+ hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
+ "ret = %d", queue_id, ret);
return ret;
}
static int
-hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
+hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t *reset_status)
{
struct hns3_reset_tqp_queue_cmd *req;
struct hns3_cmd_desc desc;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Get reset status error, ret =%d", ret);
+ hns3_err(hw, "get tqp reset status error, queue_id = %u, "
+ "ret = %d.", queue_id, ret);
return ret;
}
-
- return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
}
static int
-hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
+hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
{
#define HNS3_TQP_RESET_TRY_MS 200
+ uint8_t reset_status;
uint64_t end;
- int reset_status;
int ret;
ret = hns3_tqp_enable(hw, queue_id, false);
hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
return ret;
}
- ret = -ETIMEDOUT;
end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
do {
/* Wait for tqp hw reset */
rte_delay_ms(HNS3_POLL_RESPONE_MS);
- reset_status = hns3_get_reset_status(hw, queue_id);
- if (reset_status) {
- ret = 0;
+ ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
+ if (ret)
+ goto tqp_reset_fail;
+
+ if (reset_status)
break;
- }
} while (get_timeofday_ms() < end);
- if (ret) {
- hns3_err(hw, "Reset TQP fail, ret = %d", ret);
- return ret;
+ if (!reset_status) {
+ ret = -ETIMEDOUT;
+ hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
+ queue_id, ret);
+ goto tqp_reset_fail;
}
ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
return ret;
+
+tqp_reset_fail:
+ hns3_send_reset_tqp_cmd(hw, queue_id, false);
+ return ret;
}
static int
memcpy(msg_data, &queue_id, sizeof(uint16_t));
- return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
sizeof(msg_data), true, NULL, 0);
+ if (ret)
+ hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
+ queue_id, ret);
+ return ret;
}
static int
-hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
+hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
{
struct hns3_hw *hw = &hns->hw;
+
if (hns->is_vf)
return hns3vf_reset_tqp(hw, queue_id);
else
- return hns3_reset_tqp(hw, queue_id);
+ return hns3pf_reset_tqp(hw, queue_id);
}
int
-hns3_reset_all_queues(struct hns3_adapter *hns)
+hns3_reset_all_tqps(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
int ret, i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- ret = hns3_reset_queue(hns, i);
+ ret = hns3_reset_tqp(hns, i);
if (ret) {
hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
return ret;
return 0;
}
+static int
+hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type, bool enable)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+ hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+}
+
+static int
+hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type,
+ uint8_t *reset_status)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get queue reset status error, queue_id = %u "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+ }
+
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
+}
+
+static int
+hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type)
+{
+#define HNS3_QUEUE_RESET_TRY_MS 200
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint32_t reset_wait_times;
+ uint32_t max_wait_times;
+ uint8_t reset_status;
+ int ret;
+
+ if (queue_type == HNS3_RING_TYPE_TX) {
+ txq = hw->data->tx_queues[queue_id];
+ hns3_enable_txq(txq, false);
+ } else {
+ rxq = hw->data->rx_queues[queue_id];
+ hns3_enable_rxq(rxq, false);
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
+ if (ret) {
+ hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
+ return ret;
+ }
+
+ reset_wait_times = 0;
+ max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
+ while (reset_wait_times < max_wait_times) {
+ /* Wait for queue hw reset */
+ rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ ret = hns3_get_queue_reset_status(hw, queue_id,
+ queue_type, &reset_status);
+ if (ret)
+ goto queue_reset_fail;
+
+ if (reset_status)
+ break;
+ reset_wait_times++;
+ }
+
+ if (!reset_status) {
+ hns3_err(hw, "reset queue timeout, queue_id = %u, "
+ "queue_type = %s", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
+ ret = -ETIMEDOUT;
+ goto queue_reset_fail;
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ if (ret)
+ hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
+
+ return ret;
+
+queue_reset_fail:
+ hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ return ret;
+}
+
+
void
hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
uint8_t gl_idx, uint16_t gl_value)
}
static int
-hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
if (ret) {
- hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
+ hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
idx, ret);
return ret;
}
}
static void
-hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
}
static void
-hns3_init_tx_queue(struct hns3_tx_queue *queue)
+hns3_init_txq(struct hns3_tx_queue *txq)
{
- struct hns3_tx_queue *txq = queue;
struct hns3_desc *desc;
int i;
hns3_init_tx_queue_hw(txq);
}
-static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
-
- txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
-static void
-hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
-
- txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
static void
hns3_init_tx_ring_tc(struct hns3_adapter *hns)
{
}
static int
-hns3_start_rx_queues(struct hns3_adapter *hns)
+hns3_init_rx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
- int i, j;
+ uint16_t i, j;
int ret;
/* Initialize RSS for queues */
ret = hns3_config_rss(hns);
if (ret) {
- hns3_err(hw, "Failed to configure rss %d", ret);
+ hns3_err(hw, "failed to configure rss, ret = %d.", ret);
return ret;
}
for (i = 0; i < hw->data->nb_rx_queues; i++) {
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto out;
+ }
+
+ if (rxq->rx_deferred_start)
continue;
- ret = hns3_dev_rx_queue_start(hns, i);
+
+ ret = hns3_init_rxq(hns, i);
if (ret) {
- hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+ hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
ret);
goto out;
}
}
- for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
- rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
- continue;
- hns3_fake_rx_queue_start(hns, i);
- }
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
+ hns3_init_fake_rxq(hns, i);
+
return 0;
out:
return ret;
}
-static void
-hns3_start_tx_queues(struct hns3_adapter *hns)
+static int
+hns3_init_tx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_tx_queue *txq;
- int i;
+ uint16_t i;
for (i = 0; i < hw->data->nb_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ return -EINVAL;
+ }
+
+ if (txq->tx_deferred_start)
continue;
- hns3_dev_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
- continue;
- hns3_fake_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
-
hns3_init_tx_ring_tc(hns);
+
+ return 0;
}
/*
- * Start all queues.
- * Note: just init and setup queues, and don't enable queue rx&tx.
+ * Init all queues.
+ * Note: just init and setup queues, and don't enable tqps.
*/
int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
{
struct hns3_hw *hw = &hns->hw;
int ret;
if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
+ hns3_err(hw, "failed to reset all queues, ret = %d.",
+ ret);
return ret;
}
}
- ret = hns3_start_rx_queues(hns);
+ ret = hns3_init_rx_queues(hns);
if (ret) {
- hns3_err(hw, "Failed to start rx queues: %d", ret);
+ hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
return ret;
}
- hns3_start_tx_queues(hns);
+ ret = hns3_init_tx_queues(hns);
+ if (ret) {
+ hns3_dev_release_mbufs(hns);
+ hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
+ }
- return 0;
+ return ret;
}
-int
-hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+void
+hns3_start_tqps(struct hns3_hw *hw)
{
- struct hns3_hw *hw = &hns->hw;
- int ret;
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint16_t i;
- hns3_enable_all_queues(hw, false);
- if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
- if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
- return ret;
- }
+ hns3_enable_all_queues(hw, true);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq->enabled)
+ hw->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq->enabled)
+ hw->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
}
- return 0;
+}
+
+void
+hns3_stop_tqps(struct hns3_hw *hw)
+{
+ uint16_t i;
+
+ hns3_enable_all_queues(hw, false);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++)
+ hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++)
+ hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
/*
int ret;
/* Setup new number of fake RX/TX queues and reconfigure device. */
- hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
if (ret) {
hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
- goto cfg_fake_rx_q_fail;
+ return ret;
}
ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
(void)hns3_fake_tx_queue_config(hw, 0);
cfg_fake_tx_q_fail:
(void)hns3_fake_rx_queue_config(hw, 0);
-cfg_fake_rx_q_fail:
- hw->cfg_max_queues = 0;
return ret;
}
if (dev_data->rx_queues)
for (i = 0; i < dev_data->nb_rx_queues; i++) {
rxq = dev_data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (rxq == NULL)
continue;
hns3_rx_queue_release_mbufs(rxq);
}
if (dev_data->tx_queues)
for (i = 0; i < dev_data->nb_tx_queues; i++) {
txq = dev_data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (txq == NULL)
continue;
hns3_tx_queue_release_mbufs(txq);
}
return 0;
}
+static int
+hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
+ uint16_t nb_desc)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ uint16_t min_vec_bds;
+
+ /*
+ * HNS3 hardware network engine set scattered as default. If the driver
+ * is not work in scattered mode and the pkts greater than buf_size
+ * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
+ * Driver cannot handle this situation.
+ */
+ if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
+ hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
+ "than rx_buf_len if scattered is off.");
+ return -EINVAL;
+ }
+
+ if (pkt_burst == hns3_recv_pkts_vec) {
+ min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
+ HNS3_DEFAULT_RX_BURST;
+ if (nb_desc < min_vec_bds ||
+ nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
+ hns3_err(hw, "if Rx burst mode is vector, "
+ "number of descriptor is required to be "
+ "bigger than min vector bds:%u, and could be "
+ "divided by rxq rearm thresh:%u.",
+ min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int
hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp, uint16_t nb_desc,
uint16_t *buf_size)
{
+ int ret;
+
if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
nb_desc % HNS3_ALIGN_RING_DESC) {
hns3_err(hw, "Number (%u) of rx descriptors is invalid",
return -EINVAL;
}
+ if (hw->data->dev_started) {
+ ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
+ if (ret) {
+ hns3_err(hw, "Rx queue runtime setup fail.");
+ return ret;
+ }
+ }
+
return 0;
}
+uint32_t
+hns3_get_tqp_reg_offset(uint16_t queue_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queue > 1024 */
+ if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
+ reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
+ else
+ reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
+ (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
+ HNS3_TQP_REG_SIZE;
+
+ return reg_offset;
+}
+
int
hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_rxconf *conf,
int rx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "rx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
if (ret)
return ret;
rxq->mb_pool = mp;
rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
+
rxq->rx_deferred_start = conf->rx_deferred_start;
+ if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ rxq->rx_deferred_start = false;
+ }
rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
sizeof(struct hns3_entry);
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
rxq->port_id = dev->data->port_id;
- rxq->pvid_state = hw->port_base_vlan_cfg.state;
+ /*
+ * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+ * the pvid_sw_discard_en in the queue struct should not be changed,
+ * because PVID-related operations do not need to be processed by PMD
+ * driver. For hns3 VF device, whether it needs to process PVID depends
+ * on the configuration of PF kernel mode netdevice driver. And the
+ * related PF configuration is delivered through the mailbox and finally
+ * reflectd in port_base_vlan_cfg.
+ */
+ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
+ HNS3_PORT_BASE_VLAN_ENABLE;
+ else
+ rxq->pvid_sw_discard_en = false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ rxq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
if (dev->rx_pkt_burst == hns3_recv_pkts ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec)
+ dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
return ptypes;
return NULL;
};
strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S);
- report_mode = report_type[rxq->pvid_state][strip_status];
+ report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
switch (report_mode) {
case HNS3_NO_STRP_VLAN_VLD:
mb->vlan_tci = 0;
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
return;
+ default:
+ mb->vlan_tci = 0;
+ return;
}
}
uint16_t __rte_weak
hns3_recv_pkts_vec(__rte_unused void *tx_queue,
- __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
+uint16_t __rte_weak
+hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
int
hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
{ hns3_recv_pkts, "Scalar" },
{ hns3_recv_scattered_pkts, "Scalar Scattered" },
{ hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec_sve, "Vector Sve" },
};
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
return ret;
}
+static bool
+hns3_check_sve_support(void)
+{
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
+ return true;
+#endif
+ return false;
+}
+
static eth_rx_burst_t
hns3_get_rx_function(struct rte_eth_dev *dev)
{
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
- return hns3_recv_pkts_vec;
+ return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
+ hns3_recv_pkts_vec;
if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
(offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
int tx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "tx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
&tx_rs_thresh, &tx_free_thresh, idx);
if (ret)
}
txq->tx_deferred_start = conf->tx_deferred_start;
+ if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ txq->tx_deferred_start = false;
+ }
+
tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
RTE_CACHE_LINE_SIZE, socket_id);
}
txq->port_id = dev->data->port_id;
- txq->pvid_state = hw->port_base_vlan_cfg.state;
+ /*
+ * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
+ * the pvid_sw_shift_en in the queue struct should not be changed,
+ * because PVID-related operations do not need to be processed by PMD
+ * driver. For hns3 VF device, whether it needs to process PVID depends
+ * on the configuration of PF kernel mode netdev driver. And the
+ * related PF configuration is delivered through the mailbox and finally
+ * reflectd in port_base_vlan_cfg.
+ */
+ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
+ txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
+ HNS3_PORT_BASE_VLAN_ENABLE;
+ else
+ txq->pvid_sw_shift_en = false;
+ txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
txq->configured = true;
- txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
- idx * HNS3_TQP_REG_SIZE);
+ txq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
+ txq->tso_mode = hw->tso_mode;
txq->over_length_pkt_cnt = 0;
txq->exceed_limit_bd_pkt_cnt = 0;
txq->exceed_limit_bd_reassem_fail = 0;
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
- if (!txq->pvid_state && ol_flags & (PKT_TX_VLAN_PKT |
+ if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
PKT_TX_QINQ_PKT)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
}
if (ol_flags & PKT_TX_QINQ_PKT ||
- ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_state)) {
+ ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
}
-static int
-hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool,
- uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf)
+static inline int
+hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
+ struct rte_mbuf **alloc_mbuf)
{
- struct rte_mbuf *new_mbuf = NULL;
- struct rte_eth_dev *dev;
- struct rte_mbuf *temp;
- struct hns3_hw *hw;
+#define MAX_NON_TSO_BD_PER_PKT 18
+ struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
uint16_t i;
/* Allocate enough mbufs */
- for (i = 0; i < nb_new_buf; i++) {
- temp = rte_pktmbuf_alloc(mb_pool);
- if (unlikely(temp == NULL)) {
- dev = &rte_eth_devices[txq->port_id];
- hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hns3_err(hw, "Failed to alloc TX mbuf port_id=%d,"
- "queue_id=%d in reassemble tx pkts.",
- txq->port_id, txq->queue_id);
- rte_pktmbuf_free(new_mbuf);
- return -ENOMEM;
- }
- temp->next = new_mbuf;
- new_mbuf = temp;
- }
-
- if (new_mbuf == NULL)
+ if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
return -ENOMEM;
- new_mbuf->nb_segs = nb_new_buf;
- *alloc_mbuf = new_mbuf;
+ for (i = 0; i < nb_new_buf - 1; i++)
+ pkt_segs[i]->next = pkt_segs[i + 1];
+
+ pkt_segs[nb_new_buf - 1]->next = NULL;
+ pkt_segs[0]->nb_segs = nb_new_buf;
+ *alloc_mbuf = pkt_segs[0];
return 0;
}
}
static int
-hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
- struct rte_mbuf **new_pkt)
+hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
+ uint8_t max_non_tso_bd_num)
{
- struct hns3_tx_queue *txq = tx_queue;
struct rte_mempool *mb_pool;
struct rte_mbuf *new_mbuf;
struct rte_mbuf *temp_new;
uint16_t len_s;
uint16_t len_d;
uint16_t len;
- uint16_t i;
int ret;
char *s;
char *d;
mb_pool = tx_pkt->pool;
buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
- if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
+ if (nb_new_buf > max_non_tso_bd_num)
return -EINVAL;
last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
last_buf_len = buf_size;
/* Allocate enough mbufs */
- ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf);
+ ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
if (ret)
return ret;
s = rte_pktmbuf_mtod(temp, char *);
len_s = rte_pktmbuf_data_len(temp);
temp_new = new_mbuf;
- for (i = 0; i < nb_new_buf; i++) {
+ while (temp != NULL && temp_new != NULL) {
d = rte_pktmbuf_mtod(temp_new, char *);
- if (i < nb_new_buf - 1)
- buf_len = buf_size;
- else
- buf_len = last_buf_len;
+ buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
len_d = buf_len;
while (len_d) {
}
static bool
-hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num)
+hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
+ uint32_t max_non_tso_bd_num)
{
struct rte_mbuf *m_first = tx_pkts;
struct rte_mbuf *m_last = tx_pkts;
* frags greater than gso header len + mss, and the remaining 7
* consecutive frags greater than MSS except the last 7 frags.
*/
- if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT)
+ if (bd_num <= max_non_tso_bd_num)
return false;
- for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1;
+ for (i = 0; m_last && i < max_non_tso_bd_num - 1;
i++, m_last = m_last->next)
tot_len += m_last->data_len;
* ensure the sum of the data length of every 7 consecutive buffer
* is greater than mss except the last one.
*/
- for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) {
+ for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
tot_len -= m_first->data_len;
tot_len += m_last->data_len;
struct rte_ether_hdr *eh;
struct rte_vlan_hdr *vh;
- if (!txq->pvid_state)
+ if (!txq->pvid_sw_shift_en)
return 0;
/*
}
#endif
-uint16_t
-hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+static int
+hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
{
- struct rte_mbuf *m;
- uint16_t i;
int ret;
- for (i = 0; i < nb_pkts; i++) {
- m = tx_pkts[i];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
- if (hns3_pkt_is_tso(m) &&
- (hns3_pkt_need_linearized(m, m->nb_segs) ||
- hns3_check_tso_pkt_valid(m))) {
+ ret = hns3_vld_vlan_chk(tx_queue, m);
+ if (ret != 0) {
+ rte_errno = EINVAL;
+ return ret;
+ }
+#endif
+ if (hns3_pkt_is_tso(m)) {
+ if (hns3_pkt_need_linearized(m, m->nb_segs,
+ tx_queue->max_non_tso_bd_num) ||
+ hns3_check_tso_pkt_valid(m)) {
rte_errno = EINVAL;
- return i;
+ return -EINVAL;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- ret = rte_validate_tx_offload(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
+ if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
+ /*
+ * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
+ * hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver
+ * software not need to recalculate it.
+ */
+ hns3_outer_header_cksum_prepare(m);
+ return 0;
}
+ }
- if (hns3_vld_vlan_chk(tx_queue, m)) {
- rte_errno = EINVAL;
- return i;
- }
-#endif
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = -ret;
- return i;
- }
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return ret;
+ }
+
+ hns3_outer_header_cksum_prepare(m);
- hns3_outer_header_cksum_prepare(m);
+ return 0;
+}
+
+uint16_t
+hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *m;
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (hns3_prep_pkt_proc(tx_queue, m))
+ return i;
}
return i;
hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
{
+ uint8_t max_non_tso_bd_num;
struct rte_mbuf *new_pkt;
int ret;
return -EINVAL;
}
- if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+ max_non_tso_bd_num = txq->max_non_tso_bd_num;
+ if (unlikely(nb_buf > max_non_tso_bd_num)) {
txq->exceed_limit_bd_pkt_cnt++;
- ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
+ ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
+ max_non_tso_bd_num);
if (ret) {
txq->exceed_limit_bd_reassem_fail++;
return ret;
return 0;
}
+uint16_t __rte_weak
+hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int
hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
info = "Scalar";
else if (pkt_burst == hns3_xmit_pkts_vec)
info = "Vector Neon";
+ else if (pkt_burst == hns3_xmit_pkts_vec_sve)
+ info = "Vector Sve";
if (info == NULL)
return -EINVAL;
if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
*prep = NULL;
- return hns3_xmit_pkts_vec;
+ return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
+ hns3_xmit_pkts_vec;
}
if (hns->tx_simple_allowed &&
qinfo->mp = rxq->mb_pool;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->scattered_rx = dev->data->scattered_rx;
+ /* Report the HW Rx buffer length to user */
+ qinfo->rx_buf_size = rxq->rx_buf_len;
/*
* If there are no available Rx buffer descriptors, incoming packets
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ ret = hns3_init_rxq(hns, rx_queue_id);
+ if (ret) {
+ hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_enable_rxq(rxq, true);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_rxq(rxq, false);
+ hns3_rx_queue_release_mbufs(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+ tx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_init_txq(txq);
+ hns3_enable_txq(txq, true);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_txq(txq, false);
+ hns3_tx_queue_release_mbufs(txq);
+ /*
+ * All the mbufs in sw_ring are released and all the pointers in sw_ring
+ * are set to NULL. If this queue is still called by upper layer,
+ * residual SW status of this txq may cause these pointers in sw_ring
+ * which have been set to NULL to be released again. To avoid it,
+ * reinit the txq.
+ */
+ hns3_init_txq(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}