#include "hns3_rxtx.h"
#include "hns3_regs.h"
#include "hns3_logs.h"
+#include "hns3_mp.h"
#define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
#define HNS3_RX_RING_PREFETCTH_MASK 3
}
}
-void
-hns3_dev_rx_queue_release(void *queue)
+static void
+hns3_rx_queue_release_lock(void *queue)
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
}
void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
rte_spinlock_unlock(&hns->hw.lock);
}
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
struct hns3_hw *hw = &txq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
struct hns3_hw *hw = &rxq->hns->hw;
uint32_t reg;
- if (hns3_dev_indep_txrx_supported(hw)) {
+ if (hns3_dev_get_support(hw, INDEP_TXRX)) {
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
if (en)
reg |= BIT(HNS3_RING_EN_B);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
req = (struct hns3_reset_cmd *)desc.data;
- hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
+ hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1);
/*
* The start qid should be the global qid of the first tqp of the
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
uint16_t q;
int ret;
- if (hns3_dev_indep_txrx_supported(hw))
+ if (hns3_dev_get_support(hw, INDEP_TXRX))
return 0;
/* Setup new number of fake RX/TX queues and reconfigure device. */
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
rxq->rx_deferred_start = conf->rx_deferred_start;
- if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
rxq->rx_deferred_start = false;
}
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
- rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
+ rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
return adv_layout_ptypes;
else
return ptypes;
static bool
hns3_get_sve_support(void)
{
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_HAS_SVE_ACLE)
if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
return false;
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
volatile uint32_t *reg;
uint32_t val;
- if (!hns3_dev_tx_push_supported(hw))
+ if (!hns3_dev_get_support(hw, TX_PUSH))
return;
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
struct hns3_tx_queue *txq)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hns3_dev_tx_push_supported(hw)) {
+ if (!hns3_dev_get_support(hw, TX_PUSH)) {
txq->tx_push_enable = false;
return;
}
}
txq->tx_deferred_start = conf->tx_deferred_start;
- if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
hns3_warn(hw, "deferred start is not supported.");
txq->tx_deferred_start = false;
}
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (hns3_dev_ptp_supported(hw))
+ if (hns3_dev_get_support(hw, PTP))
return false;
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
#endif
}
-static eth_tx_burst_t
+eth_tx_burst_t
hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
struct hns3_adapter *hns = dev->data->dev_private;
return hns3_xmit_pkts;
}
-static uint16_t
+uint16_t
hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct hns3_adapter *hns = eth_dev->data->dev_private;
eth_tx_prep_t prep = NULL;
__atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
- eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
+ eth_dev->tx_pkt_burst = hw->set_link_down ?
+ hns3_dummy_rxtx_burst :
+ hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
hns3_trace_rxtx_function(eth_dev);
} else {
eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
- eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
+ eth_dev->tx_pkt_prepare = NULL;
}
}
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
int ret;
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (!hns3_dev_indep_txrx_supported(hw))
+ if (!hns3_dev_get_support(hw, INDEP_TXRX))
return -ENOTSUP;
rte_spinlock_lock(&hw->lock);
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
- if (hns3_dev_rxd_adv_layout_supported(hw))
+ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}
+
+void
+hns3_stop_tx_datapath(struct rte_eth_dev *dev)
+{
+ dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
+ dev->tx_pkt_prepare = NULL;
+ rte_wmb();
+ /* Disable tx datapath on secondary process. */
+ hns3_mp_req_stop_tx(dev);
+ /* Prevent crashes when queues are still in use. */
+ rte_delay_ms(dev->data->nb_tx_queues);
+}
+
+void
+hns3_start_tx_datapath(struct rte_eth_dev *dev)
+{
+ eth_tx_prep_t prep = NULL;
+
+ dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep);
+ dev->tx_pkt_prepare = prep;
+ hns3_mp_req_start_tx(dev);
+}