ethdev: remove legacy FDIR filter type support
[dpdk.git] / drivers / net / sfc / sfc_ef100_rx.c
index c0e70c9..5e76160 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "efx_types.h"
 #include "efx_regs_ef100.h"
+#include "efx.h"
 
 #include "sfc_debug.h"
 #include "sfc_tweak.h"
@@ -55,17 +56,24 @@ struct sfc_ef100_rxq {
 #define SFC_EF100_RXQ_STARTED          0x1
 #define SFC_EF100_RXQ_NOT_RUNNING      0x2
 #define SFC_EF100_RXQ_EXCEPTION                0x4
+#define SFC_EF100_RXQ_RSS_HASH         0x10
+#define SFC_EF100_RXQ_USER_MARK                0x20
+#define SFC_EF100_RXQ_FLAG_INTR_EN     0x40
        unsigned int                    ptr_mask;
        unsigned int                    evq_phase_bit_shift;
        unsigned int                    ready_pkts;
        unsigned int                    completed;
        unsigned int                    evq_read_ptr;
+       unsigned int                    evq_read_ptr_primed;
        volatile efx_qword_t            *evq_hw_ring;
        struct sfc_ef100_rx_sw_desc     *sw_ring;
        uint64_t                        rearm_data;
        uint16_t                        buf_size;
        uint16_t                        prefix_size;
 
+       unsigned int                    evq_hw_index;
+       volatile void                   *evq_prime;
+
        /* Used on refill */
        unsigned int                    added;
        unsigned int                    max_fill_level;
@@ -84,6 +92,14 @@ sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
        return container_of(dp_rxq, struct sfc_ef100_rxq, dp);
 }
 
+static void
+sfc_ef100_rx_qprime(struct sfc_ef100_rxq *rxq)
+{
+       sfc_ef100_evq_prime(rxq->evq_prime, rxq->evq_hw_index,
+                           rxq->evq_read_ptr & rxq->ptr_mask);
+       rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
+}
+
 static inline void
 sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
 {
@@ -177,8 +193,190 @@ sfc_ef100_rx_qrefill(struct sfc_ef100_rxq *rxq)
        sfc_ef100_rx_qpush(rxq, added);
 }
 
+static inline uint64_t
+sfc_ef100_rx_nt_or_inner_l4_csum(const efx_word_t class)
+{
+       return EFX_WORD_FIELD(class,
+                             ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM) ==
+               ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
+               PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
+}
+
+static inline uint64_t
+sfc_ef100_rx_tun_outer_l4_csum(const efx_word_t class)
+{
+       return EFX_WORD_FIELD(class,
+                             ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM) ==
+               ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
+               PKT_RX_OUTER_L4_CKSUM_GOOD : PKT_RX_OUTER_L4_CKSUM_GOOD;
+}
+
+static uint32_t
+sfc_ef100_rx_class_decode(const efx_word_t class, uint64_t *ol_flags)
+{
+       uint32_t ptype;
+       bool no_tunnel = false;
+
+       if (unlikely(EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS) !=
+                    ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN))
+               return 0;
+
+       switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN)) {
+       case 0:
+               ptype = RTE_PTYPE_L2_ETHER;
+               break;
+       case 1:
+               ptype = RTE_PTYPE_L2_ETHER_VLAN;
+               break;
+       default:
+               ptype = RTE_PTYPE_L2_ETHER_QINQ;
+               break;
+       }
+
+       switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS)) {
+       case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE:
+               no_tunnel = true;
+               break;
+       case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN:
+               ptype |= RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+               *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
+               break;
+       case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE:
+               ptype |= RTE_PTYPE_TUNNEL_NVGRE;
+               break;
+       case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE:
+               ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
+               *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
+               break;
+       default:
+               /*
+                * Driver does not know the tunnel, but it is
+                * still a tunnel and NT_OR_INNER refer to inner
+                * frame.
+                */
+               no_tunnel = false;
+       }
+
+       if (no_tunnel) {
+               bool l4_valid = true;
+
+               switch (EFX_WORD_FIELD(class,
+                       ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
+                       ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                       *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
+                       ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                       *ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
+                       ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                       break;
+               default:
+                       l4_valid = false;
+               }
+
+               if (l4_valid) {
+                       switch (EFX_WORD_FIELD(class,
+                               ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
+                               ptype |= RTE_PTYPE_L4_TCP;
+                               *ol_flags |=
+                                       sfc_ef100_rx_nt_or_inner_l4_csum(class);
+                               break;
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
+                               ptype |= RTE_PTYPE_L4_UDP;
+                               *ol_flags |=
+                                       sfc_ef100_rx_nt_or_inner_l4_csum(class);
+                               break;
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
+                               ptype |= RTE_PTYPE_L4_FRAG;
+                               break;
+                       }
+               }
+       } else {
+               bool l4_valid = true;
+
+               switch (EFX_WORD_FIELD(class,
+                       ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS)) {
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
+                       ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
+                       ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                       *ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
+                       ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                       break;
+               }
+
+               switch (EFX_WORD_FIELD(class,
+                       ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
+                       ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+                       *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
+                       ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+                       *ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                       break;
+               case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
+                       ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+                       break;
+               default:
+                       l4_valid = false;
+                       break;
+               }
+
+               if (l4_valid) {
+                       switch (EFX_WORD_FIELD(class,
+                               ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
+                               ptype |= RTE_PTYPE_INNER_L4_TCP;
+                               *ol_flags |=
+                                       sfc_ef100_rx_nt_or_inner_l4_csum(class);
+                               break;
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
+                               ptype |= RTE_PTYPE_INNER_L4_UDP;
+                               *ol_flags |=
+                                       sfc_ef100_rx_nt_or_inner_l4_csum(class);
+                               break;
+                       case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
+                               ptype |= RTE_PTYPE_INNER_L4_FRAG;
+                               break;
+                       }
+               }
+       }
+
+       return ptype;
+}
+
+/*
+ * Below function relies on the following fields in Rx prefix.
+ * Some fields are mandatory, some fields are optional.
+ * See sfc_ef100_rx_qstart() below.
+ */
+static const efx_rx_prefix_layout_t sfc_ef100_rx_prefix_layout = {
+       .erpl_fields    = {
+#define        SFC_EF100_RX_PREFIX_FIELD(_name, _big_endian) \
+       EFX_RX_PREFIX_FIELD(_name, ESF_GZ_RX_PREFIX_ ## _name, _big_endian)
+
+               SFC_EF100_RX_PREFIX_FIELD(LENGTH, B_FALSE),
+               SFC_EF100_RX_PREFIX_FIELD(RSS_HASH_VALID, B_FALSE),
+               SFC_EF100_RX_PREFIX_FIELD(USER_FLAG, B_FALSE),
+               SFC_EF100_RX_PREFIX_FIELD(CLASS, B_FALSE),
+               SFC_EF100_RX_PREFIX_FIELD(RSS_HASH, B_FALSE),
+               SFC_EF100_RX_PREFIX_FIELD(USER_MARK, B_FALSE),
+
+#undef SFC_EF100_RX_PREFIX_FIELD
+       }
+};
+
 static bool
-sfc_ef100_rx_prefix_to_offloads(const efx_oword_t *rx_prefix,
+sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq *rxq,
+                               const efx_oword_t *rx_prefix,
                                struct rte_mbuf *m)
 {
        const efx_word_t *class;
@@ -195,6 +393,25 @@ sfc_ef100_rx_prefix_to_offloads(const efx_oword_t *rx_prefix,
                     ESE_GZ_RH_HCLASS_L2_STATUS_OK))
                return false;
 
+       m->packet_type = sfc_ef100_rx_class_decode(*class, &ol_flags);
+
+       if ((rxq->flags & SFC_EF100_RXQ_RSS_HASH) &&
+           EFX_TEST_OWORD_BIT(rx_prefix[0],
+                              ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN)) {
+               ol_flags |= PKT_RX_RSS_HASH;
+               /* EFX_OWORD_FIELD converts little-endian to CPU */
+               m->hash.rss = EFX_OWORD_FIELD(rx_prefix[0],
+                                             ESF_GZ_RX_PREFIX_RSS_HASH);
+       }
+
+       if ((rxq->flags & SFC_EF100_RXQ_USER_MARK) &&
+           EFX_TEST_OWORD_BIT(rx_prefix[0], ESF_GZ_RX_PREFIX_USER_FLAG_LBN)) {
+               ol_flags |= PKT_RX_FDIR_ID;
+               /* EFX_OWORD_FIELD converts little-endian to CPU */
+               m->hash.fdir.hi = EFX_OWORD_FIELD(rx_prefix[0],
+                                                 ESF_GZ_RX_PREFIX_USER_MARK);
+       }
+
        m->ol_flags = ol_flags;
        return true;
 }
@@ -281,7 +498,7 @@ sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq *rxq,
                seg_len = RTE_MIN(pkt_len, rxq->buf_size - rxq->prefix_size);
                rte_pktmbuf_data_len(pkt) = seg_len;
 
-               deliver = sfc_ef100_rx_prefix_to_offloads(rx_prefix, pkt);
+               deliver = sfc_ef100_rx_prefix_to_offloads(rxq, rx_prefix, pkt);
 
                lastseg = pkt;
                while ((pkt_len -= seg_len) > 0) {
@@ -366,6 +583,10 @@ sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        /* It is not a problem if we refill in the case of exception */
        sfc_ef100_rx_qrefill(rxq);
 
+       if ((rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN) &&
+           rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
+               sfc_ef100_rx_qprime(rxq);
+
 done:
        return nb_pkts - (rx_pkts_end - rx_pkts);
 }
@@ -374,6 +595,22 @@ static const uint32_t *
 sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
 {
        static const uint32_t ef100_native_ptypes[] = {
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L2_ETHER_VLAN,
+               RTE_PTYPE_L2_ETHER_QINQ,
+               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_TUNNEL_VXLAN,
+               RTE_PTYPE_TUNNEL_NVGRE,
+               RTE_PTYPE_TUNNEL_GENEVE,
+               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_INNER_L4_FRAG,
                RTE_PTYPE_UNKNOWN
        };
 
@@ -489,8 +726,6 @@ sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,
        rxq->evq_hw_ring = info->evq_hw_ring;
        rxq->max_fill_level = info->max_fill_level;
        rxq->refill_threshold = info->refill_threshold;
-       rxq->rearm_data =
-               sfc_ef100_mk_mbuf_rearm_data(port_id, info->prefix_size);
        rxq->prefix_size = info->prefix_size;
        rxq->buf_size = info->buf_size;
        rxq->refill_mb_pool = info->refill_mb_pool;
@@ -499,6 +734,11 @@ sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,
                        ER_GZ_RX_RING_DOORBELL_OFST +
                        (info->hw_index << info->vi_window_shift);
 
+       rxq->evq_hw_index = info->evq_hw_index;
+       rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
+                        info->fcw_offset +
+                        ER_GZ_EVQ_INT_PRIME_OFST;
+
        sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell);
 
        *dp_rxqp = &rxq->dp;
@@ -524,13 +764,46 @@ sfc_ef100_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
 
 static sfc_dp_rx_qstart_t sfc_ef100_rx_qstart;
 static int
-sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
+                   const efx_rx_prefix_layout_t *pinfo)
 {
        struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+       uint32_t unsup_rx_prefix_fields;
 
        SFC_ASSERT(rxq->completed == 0);
        SFC_ASSERT(rxq->added == 0);
 
+       /* Prefix must fit into reserved Rx buffer space */
+       if (pinfo->erpl_length > rxq->prefix_size)
+               return ENOTSUP;
+
+       unsup_rx_prefix_fields =
+               efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
+
+       /* LENGTH and CLASS filds must always be present */
+       if ((unsup_rx_prefix_fields &
+            ((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
+             (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
+               return ENOTSUP;
+
+       if ((unsup_rx_prefix_fields &
+            ((1U << EFX_RX_PREFIX_FIELD_RSS_HASH_VALID) |
+             (1U << EFX_RX_PREFIX_FIELD_RSS_HASH))) == 0)
+               rxq->flags |= SFC_EF100_RXQ_RSS_HASH;
+       else
+               rxq->flags &= ~SFC_EF100_RXQ_RSS_HASH;
+
+       if ((unsup_rx_prefix_fields &
+            ((1U << EFX_RX_PREFIX_FIELD_USER_FLAG) |
+             (1U << EFX_RX_PREFIX_FIELD_USER_MARK))) == 0)
+               rxq->flags |= SFC_EF100_RXQ_USER_MARK;
+       else
+               rxq->flags &= ~SFC_EF100_RXQ_USER_MARK;
+
+       rxq->prefix_size = pinfo->erpl_length;
+       rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id,
+                                                      rxq->prefix_size);
+
        sfc_ef100_rx_qrefill(rxq);
 
        rxq->evq_read_ptr = evq_read_ptr;
@@ -538,6 +811,9 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
        rxq->flags |= SFC_EF100_RXQ_STARTED;
        rxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);
 
+       if (rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN)
+               sfc_ef100_rx_qprime(rxq);
+
        return 0;
 }
 
@@ -588,15 +864,43 @@ sfc_ef100_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
        rxq->flags &= ~SFC_EF100_RXQ_STARTED;
 }
 
+static sfc_dp_rx_intr_enable_t sfc_ef100_rx_intr_enable;
+static int
+sfc_ef100_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
+{
+       struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+
+       rxq->flags |= SFC_EF100_RXQ_FLAG_INTR_EN;
+       if (rxq->flags & SFC_EF100_RXQ_STARTED)
+               sfc_ef100_rx_qprime(rxq);
+       return 0;
+}
+
+static sfc_dp_rx_intr_disable_t sfc_ef100_rx_intr_disable;
+static int
+sfc_ef100_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
+{
+       struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
+
+       /* Cannot disarm, just disable rearm */
+       rxq->flags &= ~SFC_EF100_RXQ_FLAG_INTR_EN;
+       return 0;
+}
+
 struct sfc_dp_rx sfc_ef100_rx = {
        .dp = {
                .name           = SFC_KVARG_DATAPATH_EF100,
                .type           = SFC_DP_RX,
                .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF100,
        },
-       .features               = SFC_DP_RX_FEAT_MULTI_PROCESS,
+       .features               = SFC_DP_RX_FEAT_MULTI_PROCESS |
+                                 SFC_DP_RX_FEAT_INTR,
        .dev_offload_capa       = 0,
-       .queue_offload_capa     = DEV_RX_OFFLOAD_SCATTER,
+       .queue_offload_capa     = DEV_RX_OFFLOAD_CHECKSUM |
+                                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
+                                 DEV_RX_OFFLOAD_SCATTER |
+                                 DEV_RX_OFFLOAD_RSS_HASH,
        .get_dev_info           = sfc_ef100_rx_get_dev_info,
        .qsize_up_rings         = sfc_ef100_rx_qsize_up_rings,
        .qcreate                = sfc_ef100_rx_qcreate,
@@ -608,5 +912,7 @@ struct sfc_dp_rx sfc_ef100_rx = {
        .supported_ptypes_get   = sfc_ef100_supported_ptypes_get,
        .qdesc_npending         = sfc_ef100_rx_qdesc_npending,
        .qdesc_status           = sfc_ef100_rx_qdesc_status,
+       .intr_enable            = sfc_ef100_rx_intr_enable,
+       .intr_disable           = sfc_ef100_rx_intr_disable,
        .pkt_burst              = sfc_ef100_recv_pkts,
 };