net/ngbe: support MAC filters
[dpdk.git] / drivers / net / sfc / sfc_rx.c
index 597785a..e2be84b 100644 (file)
 
 #include "sfc.h"
 #include "sfc_debug.h"
+#include "sfc_flow_tunnel.h"
 #include "sfc_log.h"
 #include "sfc_ev.h"
 #include "sfc_rx.h"
+#include "sfc_mae_counter.h"
 #include "sfc_kvargs.h"
 #include "sfc_tweak.h"
 
@@ -52,6 +54,15 @@ sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
        rxq_info->state &= ~SFC_RXQ_FLUSHING;
 }
 
+/* This returns the running counter, which is not bounded by ring size */
+unsigned int
+sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
+{
+       SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
+
+       return sa->priv.dp_rx->get_pushed(dp_rxq);
+}
+
 static int
 sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
 {
@@ -128,6 +139,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
        SFC_ASSERT(added != rxq->added);
        rxq->added = added;
        efx_rx_qpush(rxq->common, added, &rxq->pushed);
+       rxq->dp.dpq.rx_dbells++;
 }
 
 static uint64_t
@@ -137,15 +149,15 @@ sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
 
        switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
        case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
-               mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+               mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
                break;
        case EFX_PKT_IPV4:
-               mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+               mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
                break;
        default:
-               RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
-               SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
-                          PKT_RX_IP_CKSUM_UNKNOWN);
+               RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
+               SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
+                          RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
                break;
        }
 
@@ -153,16 +165,16 @@ sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
                 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
        case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
        case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
-               mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+               mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
                break;
        case EFX_PKT_TCP:
        case EFX_PKT_UDP:
-               mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+               mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                break;
        default:
-               RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
-               SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
-                          PKT_RX_L4_CKSUM_UNKNOWN);
+               RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
+               SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
+                          RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
                break;
        }
 
@@ -213,7 +225,7 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
                                                      EFX_RX_HASHALG_TOEPLITZ,
                                                      mbuf_data);
 
-               m->ol_flags |= PKT_RX_RSS_HASH;
+               m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
        }
 }
 
@@ -636,9 +648,9 @@ struct sfc_dp_rx sfc_efx_rx = {
                .hw_fw_caps     = SFC_DP_HW_FW_CAP_RX_EFX,
        },
        .features               = SFC_DP_RX_FEAT_INTR,
-       .dev_offload_capa       = DEV_RX_OFFLOAD_CHECKSUM |
-                                 DEV_RX_OFFLOAD_RSS_HASH,
-       .queue_offload_capa     = DEV_RX_OFFLOAD_SCATTER,
+       .dev_offload_capa       = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+                                 RTE_ETH_RX_OFFLOAD_RSS_HASH,
+       .queue_offload_capa     = RTE_ETH_RX_OFFLOAD_SCATTER,
        .qsize_up_rings         = sfc_efx_rx_qsize_up_rings,
        .qcreate                = sfc_efx_rx_qcreate,
        .qdestroy               = sfc_efx_rx_qdestroy,
@@ -919,7 +931,7 @@ sfc_rx_get_offload_mask(struct sfc_adapter *sa)
        uint64_t no_caps = 0;
 
        if (encp->enc_tunnel_encapsulations_supported == 0)
-               no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+               no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
        return ~no_caps;
 }
@@ -929,8 +941,6 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
 {
        uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
 
-       caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
        return caps & sfc_rx_get_offload_mask(sa);
 }
 
@@ -1130,7 +1140,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
 
        if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
                                  encp->enc_rx_prefix_size,
-                                 (offloads & DEV_RX_OFFLOAD_SCATTER),
+                                 (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
                                  encp->enc_rx_scatter_max,
                                  &error)) {
                sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
@@ -1155,18 +1165,25 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
        else
                rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
-       rxq_info->type_flags =
-               (offloads & DEV_RX_OFFLOAD_SCATTER) ?
+       rxq_info->type_flags |=
+               (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
                EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
        if ((encp->enc_tunnel_encapsulations_supported != 0) &&
            (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
-            DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+            RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
                rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
 
-       if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+       if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
                rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
 
+       if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
+               rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
+
+       if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
+           sfc_flow_tunnel_is_active(sa))
+               rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
+
        rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
                          evq_entries, socket_id, &evq);
        if (rc != 0)
@@ -1194,7 +1211,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
        rxq_info->refill_mb_pool = mb_pool;
 
        if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
-           (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+           (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
                rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
        else
                rxq_info->rxq_flags = 0;
@@ -1214,6 +1231,12 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
        info.buf_size = buf_size;
        info.batch_max = encp->enc_rx_batch_max;
        info.prefix_size = encp->enc_rx_prefix_size;
+
+       if (sfc_flow_tunnel_is_active(sa))
+               info.user_mark_mask = SFC_FT_USER_MARK_MASK;
+       else
+               info.user_mark_mask = UINT32_MAX;
+
        info.flags = rxq_info->rxq_flags;
        info.rxq_entries = rxq_info->entries;
        info.rxq_hw_ring = rxq->mem.esm_base;
@@ -1290,19 +1313,19 @@ sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
  * Mapping between RTE RSS hash functions and their EFX counterparts.
  */
 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
-       { ETH_RSS_NONFRAG_IPV4_TCP,
+       { RTE_ETH_RSS_NONFRAG_IPV4_TCP,
          EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
-       { ETH_RSS_NONFRAG_IPV4_UDP,
+       { RTE_ETH_RSS_NONFRAG_IPV4_UDP,
          EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
-       { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+       { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
          EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
-       { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+       { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
          EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
-       { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+       { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
          EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
          EFX_RX_HASH(IPV4, 2TUPLE) },
-       { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
-         ETH_RSS_IPV6_EX,
+       { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+         RTE_ETH_RSS_IPV6_EX,
          EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
          EFX_RX_HASH(IPV6, 2TUPLE) }
 };
@@ -1594,8 +1617,9 @@ sfc_rx_stop(struct sfc_adapter *sa)
        efx_rx_fini(sa->nic);
 }
 
-static int
-sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
+int
+sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
+                 unsigned int extra_efx_type_flags)
 {
        struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
        struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
@@ -1606,6 +1630,7 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
        SFC_ASSERT(rte_is_power_of_2(max_entries));
 
        rxq_info->max_entries = max_entries;
+       rxq_info->type_flags = extra_efx_type_flags;
 
        return 0;
 }
@@ -1620,10 +1645,10 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
        int rc = 0;
 
        switch (rxmode->mq_mode) {
-       case ETH_MQ_RX_NONE:
+       case RTE_ETH_MQ_RX_NONE:
                /* No special checks are required */
                break;
-       case ETH_MQ_RX_RSS:
+       case RTE_ETH_MQ_RX_RSS:
                if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
                        sfc_err(sa, "RSS is not available");
                        rc = EINVAL;
@@ -1640,16 +1665,16 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
         * so unsupported offloads cannot be added as the result of
         * below check.
         */
-       if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
-           (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+       if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+           (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
                sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-               rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+               rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
        }
 
-       if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-           (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+       if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+           (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
                sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-               rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+               rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
        }
 
        return rc;
@@ -1703,6 +1728,9 @@ sfc_rx_configure(struct sfc_adapter *sa)
        struct sfc_rss *rss = &sas->rss;
        struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
        const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+       const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
+       const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
+       bool reconfigure;
        int rc;
 
        sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
@@ -1712,12 +1740,15 @@ sfc_rx_configure(struct sfc_adapter *sa)
        if (rc != 0)
                goto fail_check_mode;
 
-       if (nb_rx_queues == sas->rxq_count)
+       if (nb_rxq_total == sas->rxq_count) {
+               reconfigure = true;
                goto configure_rss;
+       }
 
        if (sas->rxq_info == NULL) {
+               reconfigure = false;
                rc = ENOMEM;
-               sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+               sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
                                                  sizeof(sas->rxq_info[0]), 0,
                                                  sa->socket_id);
                if (sas->rxq_info == NULL)
@@ -1728,39 +1759,42 @@ sfc_rx_configure(struct sfc_adapter *sa)
                 * since it should not be shared.
                 */
                rc = ENOMEM;
-               sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0]));
+               sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
                if (sa->rxq_ctrl == NULL)
                        goto fail_rxqs_ctrl_alloc;
        } else {
                struct sfc_rxq_info *new_rxq_info;
                struct sfc_rxq *new_rxq_ctrl;
 
+               reconfigure = true;
+
+               /* Do not ununitialize reserved queues */
                if (nb_rx_queues < sas->ethdev_rxq_count)
                        sfc_rx_fini_queues(sa, nb_rx_queues);
 
                rc = ENOMEM;
                new_rxq_info =
                        rte_realloc(sas->rxq_info,
-                                   nb_rx_queues * sizeof(sas->rxq_info[0]), 0);
-               if (new_rxq_info == NULL && nb_rx_queues > 0)
+                                   nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
+               if (new_rxq_info == NULL && nb_rxq_total > 0)
                        goto fail_rxqs_realloc;
 
                rc = ENOMEM;
                new_rxq_ctrl = realloc(sa->rxq_ctrl,
-                                      nb_rx_queues * sizeof(sa->rxq_ctrl[0]));
-               if (new_rxq_ctrl == NULL && nb_rx_queues > 0)
+                                      nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
+               if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
                        goto fail_rxqs_ctrl_realloc;
 
                sas->rxq_info = new_rxq_info;
                sa->rxq_ctrl = new_rxq_ctrl;
-               if (nb_rx_queues > sas->rxq_count) {
+               if (nb_rxq_total > sas->rxq_count) {
                        unsigned int rxq_count = sas->rxq_count;
 
                        memset(&sas->rxq_info[rxq_count], 0,
-                              (nb_rx_queues - rxq_count) *
+                              (nb_rxq_total - rxq_count) *
                               sizeof(sas->rxq_info[0]));
                        memset(&sa->rxq_ctrl[rxq_count], 0,
-                              (nb_rx_queues - rxq_count) *
+                              (nb_rxq_total - rxq_count) *
                               sizeof(sa->rxq_ctrl[0]));
                }
        }
@@ -1770,17 +1804,23 @@ sfc_rx_configure(struct sfc_adapter *sa)
 
                sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
                                                        sas->ethdev_rxq_count);
-               rc = sfc_rx_qinit_info(sa, sw_index);
+               rc = sfc_rx_qinit_info(sa, sw_index, 0);
                if (rc != 0)
                        goto fail_rx_qinit_info;
 
                sas->ethdev_rxq_count++;
        }
 
-       sas->rxq_count = sas->ethdev_rxq_count;
+       sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
+
+       if (!reconfigure) {
+               rc = sfc_mae_counter_rxq_init(sa);
+               if (rc != 0)
+                       goto fail_count_rxq_init;
+       }
 
 configure_rss:
-       rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+       rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
                         MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
 
        if (rss->channels > 0) {
@@ -1799,6 +1839,10 @@ configure_rss:
        return 0;
 
 fail_rx_process_adv_conf_rss:
+       if (!reconfigure)
+               sfc_mae_counter_rxq_fini(sa);
+
+fail_count_rxq_init:
 fail_rx_qinit_info:
 fail_rxqs_ctrl_realloc:
 fail_rxqs_realloc:
@@ -1822,6 +1866,7 @@ sfc_rx_close(struct sfc_adapter *sa)
        struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
 
        sfc_rx_fini_queues(sa, 0);
+       sfc_mae_counter_rxq_fini(sa);
 
        rss->channels = 0;