#include <rte_io.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_ARCH_ARM64)
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#endif
#include "hns3_ethdev.h"
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L2_ETHER_VLAN,
- RTE_PTYPE_L2_ETHER_QINQ,
RTE_PTYPE_L2_ETHER_LLDP,
RTE_PTYPE_L2_ETHER_ARP,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_GRE,
RTE_PTYPE_INNER_L2_ETHER,
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
- RTE_PTYPE_INNER_L2_ETHER_QINQ,
RTE_PTYPE_INNER_L3_IPV4,
RTE_PTYPE_INNER_L3_IPV6,
RTE_PTYPE_INNER_L3_IPV4_EXT,
RTE_PTYPE_TUNNEL_NVGRE,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t adv_layout_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_IGMP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
- return ptypes;
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ return adv_layout_ptypes;
+ else
+ return ptypes;
+ }
return NULL;
}
tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
- tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
+ tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
RTE_PTYPE_L4_UDP;
ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv4 */
- ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv6 */
- ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
}
uint16_t
-hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+hns3_recv_pkts_simple(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
volatile struct hns3_desc *rxdp; /* pointer of the current desc */
struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t ol_info;
uint64_t dma_addr;
/* Load remained descriptor data and extract necessary fields */
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
- ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
if (unlikely(ret))
goto pkt_err;
if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
- cksum_err);
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
/* Increment bytes counter */
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t gro_size;
uint32_t ol_info;
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
- l234_info, &cksum_err);
+ l234_info);
if (unlikely(ret))
goto pkt_err;
first_seg->packet_type = hns3_rx_calc_ptype(rxq,
l234_info, ol_info);
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(first_seg,
- first_seg->packet_type,
- cksum_err);
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
/* Increment bytes counter */
eth_rx_burst_t pkt_burst;
const char *info;
} burst_infos[] = {
- { hns3_recv_pkts, "Scalar" },
+ { hns3_recv_pkts_simple, "Scalar Simple" },
{ hns3_recv_scattered_pkts, "Scalar Scattered" },
- { hns3_recv_pkts_vec, "Vector Neon" },
- { hns3_recv_pkts_vec_sve, "Vector Sve" },
+ { hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec_sve, "Vector Sve" },
};
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
}
static bool
-hns3_check_sve_support(void)
+hns3_get_default_vec_support(void)
+{
+#if defined(RTE_ARCH_ARM64)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ return true;
+#endif
+ return false;
+}
+
+static bool
+hns3_get_sve_support(void)
{
#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
+ return false;
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
return true;
#endif
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support;
- vec_allowed = hns3_rx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
+ vec_support = hns3_rx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = !dev->data->scattered_rx &&
(offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
return hns3_recv_pkts_vec_sve;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
return hns3_recv_scattered_pkts;
if (vec_allowed)
return hns3_recv_pkts_vec;
if (simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
return hns3_recv_scattered_pkts;
}
{
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support;
- vec_allowed = hns3_tx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
+ vec_support = hns3_tx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = hns3_tx_check_simple_support(dev);
*prep = NULL;
rxdp = &rxq->rx_ring[desc_id];
bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
dev = &rte_eth_devices[rxq->port_id];
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
return RTE_ETH_RX_DESC_UNAVAIL;