#include <rte_io.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_ARCH_ARM64)
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#endif
#include "hns3_ethdev.h"
hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
{
#define HNS3_TQP_RESET_TRY_MS 200
+ uint16_t wait_time = 0;
uint8_t reset_status;
- uint64_t end;
int ret;
/*
hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
return ret;
}
- end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
+
do {
/* Wait for tqp hw reset */
rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ wait_time += HNS3_POLL_RESPONE_MS;
ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
if (ret)
goto tqp_reset_fail;
if (reset_status)
break;
- } while (get_timeofday_ms() < end);
+ } while (wait_time < HNS3_TQP_RESET_TRY_MS);
if (!reset_status) {
ret = -ETIMEDOUT;
uint16_t q;
int ret;
+ if (hns3_dev_indep_txrx_supported(hw))
+ return 0;
+
/* Setup new number of fake RX/TX queues and reconfigure device. */
rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L2_ETHER_VLAN,
- RTE_PTYPE_L2_ETHER_QINQ,
RTE_PTYPE_L2_ETHER_LLDP,
RTE_PTYPE_L2_ETHER_ARP,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_GRE,
RTE_PTYPE_INNER_L2_ETHER,
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
- RTE_PTYPE_INNER_L2_ETHER_QINQ,
RTE_PTYPE_INNER_L3_IPV4,
RTE_PTYPE_INNER_L3_IPV6,
RTE_PTYPE_INNER_L3_IPV4_EXT,
RTE_PTYPE_TUNNEL_NVGRE,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t adv_layout_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_IGMP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
- return ptypes;
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ return adv_layout_ptypes;
+ else
+ return ptypes;
+ }
return NULL;
}
tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
- tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
+ tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
RTE_PTYPE_L4_UDP;
ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv4 */
- ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv6 */
- ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
}
uint16_t
-hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+hns3_recv_pkts_simple(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
volatile struct hns3_desc *rxdp; /* pointer of the current desc */
struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t ol_info;
uint64_t dma_addr;
/* Load remained descriptor data and extract necessary fields */
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
- ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
if (unlikely(ret))
goto pkt_err;
if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
- cksum_err);
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
/* Increment bytes counter */
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t gro_size;
uint32_t ol_info;
continue;
}
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
+
/*
* The last buffer of the received packet. packet len from
* buffer description may contains CRC len, packet len should
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
- l234_info, &cksum_err);
+ l234_info);
if (unlikely(ret))
goto pkt_err;
first_seg->packet_type = hns3_rx_calc_ptype(rxq,
l234_info, ol_info);
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(first_seg,
- first_seg->packet_type,
- cksum_err);
+ if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
/* Increment bytes counter */
eth_rx_burst_t pkt_burst;
const char *info;
} burst_infos[] = {
- { hns3_recv_pkts, "Scalar" },
+ { hns3_recv_pkts_simple, "Scalar Simple" },
{ hns3_recv_scattered_pkts, "Scalar Scattered" },
- { hns3_recv_pkts_vec, "Vector Neon" },
- { hns3_recv_pkts_vec_sve, "Vector Sve" },
+ { hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec_sve, "Vector Sve" },
};
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
}
static bool
-hns3_check_sve_support(void)
+hns3_get_default_vec_support(void)
{
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_ARCH_ARM64)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ return true;
+#endif
+ return false;
+}
+
+static bool
+hns3_get_sve_support(void)
+{
+#if defined(RTE_HAS_SVE_ACLE)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
+ return false;
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
return true;
#endif
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support;
- vec_allowed = hns3_rx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
+ vec_support = hns3_rx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = !dev->data->scattered_rx &&
(offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
return hns3_recv_pkts_vec_sve;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
return hns3_recv_scattered_pkts;
if (vec_allowed)
return hns3_recv_pkts_vec;
if (simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
return hns3_recv_scattered_pkts;
}
return 0;
}
+static void *
+hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000
+#define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64
+#define HNS3_TX_PUSH_PCI_BAR_INDEX 4
+
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
+
+ /*
+ * If device support Tx push then its PCIe bar45 must exist, and DPDK
+ * framework will mmap the bar45 default in PCI probe stage.
+ *
+ * In the bar45, the first half is for RoCE (RDMA over Converged
+ * Ethernet), and the second half is for NIC, every TQP occupy 64KB.
+ *
+ * The quick doorbell located at 64B offset in the TQP region.
+ */
+ return (char *)pci_dev->mem_resource[bar_id].addr +
+ (pci_dev->mem_resource[bar_id].len >> 1) +
+ HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
+ HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
+}
+
+void
+hns3_tx_push_init(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ volatile uint32_t *reg;
+ uint32_t val;
+
+ if (!hns3_dev_tx_push_supported(hw))
+ return;
+
+ reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
+ /*
+ * Because the size of bar45 is about 8GB size, it may take a long time
+ * to do the page fault in Tx process when work with vfio-pci, so use
+ * one read operation to make kernel setup page table mapping for bar45
+ * in the init stage.
+ * Note: the bar45 is readable but the result is all 1.
+ */
+ val = *reg;
+ RTE_SET_USED(val);
+}
+
+static void
+hns3_tx_push_queue_init(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ struct hns3_tx_queue *txq)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hns3_dev_tx_push_supported(hw)) {
+ txq->tx_push_enable = false;
+ return;
+ }
+
+ txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
+ queue_id);
+ txq->tx_push_enable = true;
+}
+
int
hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_txconf *conf)
memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+ /*
+ * Call hns3_tx_push_queue_init after assigned io_tail_reg field because
+ * it may overwrite the io_tail_reg field.
+ */
+ hns3_tx_push_queue_init(dev, idx, txq);
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
txq->next_to_use += nb_pkts - nb_tx;
- hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
+ hns3_write_txq_tail_reg(txq, nb_pkts);
return nb_pkts;
}
end_of_tx:
if (likely(nb_tx))
- hns3_write_reg_opt(txq->io_tail_reg, nb_hold);
+ hns3_write_txq_tail_reg(txq, nb_hold);
return nb_tx;
}
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
}
+static bool
+hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
+{
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_SET_USED(dev);
+ /* always perform tx_prepare when debug */
+ return true;
+#else
+#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+
+ uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
+ if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
+ return true;
+
+ return false;
+#endif
+}
+
static eth_tx_burst_t
hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support, tx_prepare_needed;
- vec_allowed = hns3_tx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
+ vec_support = hns3_tx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = hns3_tx_check_simple_support(dev);
+ tx_prepare_needed = hns3_get_tx_prep_needed(dev);
*prep = NULL;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
return hns3_xmit_pkts_simple;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
- *prep = hns3_prep_pkts;
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
if (simple_allowed)
return hns3_xmit_pkts_simple;
- *prep = hns3_prep_pkts;
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
if (ret) {
hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (ret) {
hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_enable_rxq(rxq, true);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_rxq(rxq, false);
hns3_rx_queue_release_mbufs(rxq);
hns3_reset_sw_rxq(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
if (ret) {
hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
tx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_init_txq(txq);
hns3_enable_txq(txq, true);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_txq(txq, false);
hns3_tx_queue_release_mbufs(txq);
/*
*/
hns3_init_txq(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
rxdp = &rxq->rx_ring[desc_id];
bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
dev = &rte_eth_devices[rxq->port_id];
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
return RTE_ETH_RX_DESC_UNAVAIL;