raw/ioat: expand descriptor struct to full 64 bytes
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index bc3640d..3881a72 100644 (file)
@@ -13,6 +13,7 @@
 #include <rte_malloc.h>
 #if defined(RTE_ARCH_ARM64)
 #include <rte_cpuflags.h>
+#include <rte_vect.h>
 #endif
 
 #include "hns3_ethdev.h"
@@ -1962,8 +1963,6 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        static const uint32_t ptypes[] = {
                RTE_PTYPE_L2_ETHER,
-               RTE_PTYPE_L2_ETHER_VLAN,
-               RTE_PTYPE_L2_ETHER_QINQ,
                RTE_PTYPE_L2_ETHER_LLDP,
                RTE_PTYPE_L2_ETHER_ARP,
                RTE_PTYPE_L3_IPV4,
@@ -1977,8 +1976,6 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_L4_UDP,
                RTE_PTYPE_TUNNEL_GRE,
                RTE_PTYPE_INNER_L2_ETHER,
-               RTE_PTYPE_INNER_L2_ETHER_VLAN,
-               RTE_PTYPE_INNER_L2_ETHER_QINQ,
                RTE_PTYPE_INNER_L3_IPV4,
                RTE_PTYPE_INNER_L3_IPV6,
                RTE_PTYPE_INNER_L3_IPV4_EXT,
@@ -2021,7 +2018,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        };
        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (dev->rx_pkt_burst == hns3_recv_pkts ||
+       if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
            dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
            dev->rx_pkt_burst == hns3_recv_pkts_vec ||
            dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
@@ -2393,7 +2390,9 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
 }
 
 uint16_t
-hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+hns3_recv_pkts_simple(void *rx_queue,
+                     struct rte_mbuf **rx_pkts,
+                     uint16_t nb_pkts)
 {
        volatile struct hns3_desc *rx_ring;  /* RX ring (desc) */
        volatile struct hns3_desc *rxdp;     /* pointer of the current desc */
@@ -2404,7 +2403,6 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        struct rte_mbuf *nmb;           /* pointer of the new mbuf */
        struct rte_mbuf *rxm;
        uint32_t bd_base_info;
-       uint32_t cksum_err;
        uint32_t l234_info;
        uint32_t ol_info;
        uint64_t dma_addr;
@@ -2479,8 +2477,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                /* Load remained descriptor data and extract necessary fields */
                l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
                ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
-               ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
-                                        l234_info, &cksum_err);
+               ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
                if (unlikely(ret))
                        goto pkt_err;
 
@@ -2489,9 +2486,6 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
                        rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
 
-               if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
-                       hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
-                                              cksum_err);
                hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
 
                /* Increment bytes counter  */
@@ -2530,7 +2524,6 @@ hns3_recv_scattered_pkts(void *rx_queue,
        struct rte_mbuf *rxm;
        struct rte_eth_dev *dev;
        uint32_t bd_base_info;
-       uint32_t cksum_err;
        uint32_t l234_info;
        uint32_t gro_size;
        uint32_t ol_info;
@@ -2704,17 +2697,13 @@ hns3_recv_scattered_pkts(void *rx_queue,
                l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
                ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
                ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
-                                        l234_info, &cksum_err);
+                                        l234_info);
                if (unlikely(ret))
                        goto pkt_err;
 
                first_seg->packet_type = hns3_rx_calc_ptype(rxq,
                                                l234_info, ol_info);
 
-               if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
-                       hns3_rx_set_cksum_flag(first_seg,
-                                              first_seg->packet_type,
-                                              cksum_err);
                hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
 
                /* Increment bytes counter */
@@ -2776,10 +2765,10 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                eth_rx_burst_t pkt_burst;
                const char *info;
        } burst_infos[] = {
-               { hns3_recv_pkts,               "Scalar" },
+               { hns3_recv_pkts_simple,        "Scalar Simple" },
                { hns3_recv_scattered_pkts,     "Scalar Scattered" },
-               { hns3_recv_pkts_vec,           "Vector Neon" },
-               { hns3_recv_pkts_vec_sve,       "Vector Sve" },
+               { hns3_recv_pkts_vec,           "Vector Neon"   },
+               { hns3_recv_pkts_vec_sve,       "Vector Sve"    },
        };
 
        eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
@@ -2802,6 +2791,8 @@ static bool
 hns3_get_default_vec_support(void)
 {
 #if defined(RTE_ARCH_ARM64)
+       if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+               return false;
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
                return true;
 #endif
@@ -2812,6 +2803,8 @@ static bool
 hns3_get_sve_support(void)
 {
 #if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+       if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
+               return false;
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
                return true;
 #endif
@@ -2837,14 +2830,14 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
        if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
                return hns3_recv_pkts_vec_sve;
        if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
-               return hns3_recv_pkts;
+               return hns3_recv_pkts_simple;
        if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
                return hns3_recv_scattered_pkts;
 
        if (vec_allowed)
                return hns3_recv_pkts_vec;
        if (simple_allowed)
-               return hns3_recv_pkts;
+               return hns3_recv_pkts_simple;
 
        return hns3_recv_scattered_pkts;
 }
@@ -4521,7 +4514,7 @@ hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
        rxdp = &rxq->rx_ring[desc_id];
        bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
        dev = &rte_eth_devices[rxq->port_id];
-       if (dev->rx_pkt_burst == hns3_recv_pkts ||
+       if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
            dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
                if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
                        return RTE_ETH_RX_DESC_UNAVAIL;