net/sfc: support equal stride super-buffer Rx mode
authorAndrew Rybchenko <arybchenko@solarflare.com>
Thu, 19 Apr 2018 11:36:55 +0000 (12:36 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 27 Apr 2018 17:00:58 +0000 (18:00 +0100)
HW Rx descriptor represents many contiguous packet buffers which
follow each other. Number of buffers, stride and maximum DMA
length are setup-time configurable per Rx queue based on provided
mempool. The mempool must support contiguous block allocation and
get info API to retrieve number of objects in the block.

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
15 files changed:
doc/guides/nics/sfc_efx.rst
drivers/net/sfc/Makefile
drivers/net/sfc/efsys.h
drivers/net/sfc/meson.build
drivers/net/sfc/sfc_dp.h
drivers/net/sfc/sfc_dp_rx.h
drivers/net/sfc/sfc_ef10.h
drivers/net/sfc/sfc_ef10_essb_rx.c [new file with mode: 0644]
drivers/net/sfc/sfc_ef10_rx.c
drivers/net/sfc/sfc_ef10_rx_ev.h
drivers/net/sfc/sfc_ethdev.c
drivers/net/sfc/sfc_ev.c
drivers/net/sfc/sfc_kvargs.h
drivers/net/sfc/sfc_rx.c
drivers/net/sfc/sfc_rx.h

index 07ccafa..66daa1d 100644 (file)
@@ -121,6 +121,21 @@ required in the receive buffer.
 It should be taken into account when mbuf pool for receive is created.
 
 
 It should be taken into account when mbuf pool for receive is created.
 
 
+Equal stride super-buffer mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the receive queue uses equal stride super-buffer DMA mode, one HW Rx
+descriptor carries many Rx buffers which contiguously follow each other
+with some stride (equal to total size of rte_mbuf as mempool object).
+Each Rx buffer is an independent rte_mbuf.
+However dedicated mempool manager must be used when mempool for the Rx
+queue is created. The manager must support dequeue of the contiguous
+block of objects and provide mempool info API to get the block size.
+
+Another limitation of a equal stride super-buffer mode, imposed by the
+firmware, is that it allows for a single RSS context.
+
+
 Tunnels support
 ---------------
 
 Tunnels support
 ---------------
 
@@ -291,7 +306,7 @@ whitelist option like "-w 02:00.0,arg1=value1,...".
 Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
 boolean parameters value.
 
 Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
 boolean parameters value.
 
-- ``rx_datapath`` [auto|efx|ef10] (default **auto**)
+- ``rx_datapath`` [auto|efx|ef10|ef10_esps] (default **auto**)
 
   Choose receive datapath implementation.
   **auto** allows the driver itself to make a choice based on firmware
 
   Choose receive datapath implementation.
   **auto** allows the driver itself to make a choice based on firmware
@@ -300,6 +315,9 @@ boolean parameters value.
   **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
   more efficient than libefx-based and provides richer packet type
   classification, but lacks Rx scatter support.
   **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
   more efficient than libefx-based and provides richer packet type
   classification, but lacks Rx scatter support.
+  **ef10_esps** chooses SFNX2xxx equal stride packed stream datapath
+  which may be used on DPDK firmware variant only
+  (see notes about its limitations above).
 
 - ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**)
 
 
 - ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**)
 
index f3e0b4b..3bb41a0 100644 (file)
@@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
 
 VPATH += $(SRCDIR)/base
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
 
 VPATH += $(SRCDIR)/base
index f71581c..b9d2df5 100644 (file)
@@ -198,7 +198,7 @@ prefetch_read_once(const volatile void *addr)
 
 #define EFSYS_OPT_RX_PACKED_STREAM 0
 
 
 #define EFSYS_OPT_RX_PACKED_STREAM 0
 
-#define EFSYS_OPT_RX_ES_SUPER_BUFFER 0
+#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
 
 #define EFSYS_OPT_TUNNEL 1
 
 
 #define EFSYS_OPT_TUNNEL 1
 
index 0de2e17..3aa14c7 100644 (file)
@@ -54,6 +54,7 @@ sources = files(
        'sfc_flow.c',
        'sfc_dp.c',
        'sfc_ef10_rx.c',
        'sfc_flow.c',
        'sfc_dp.c',
        'sfc_ef10_rx.c',
+       'sfc_ef10_essb_rx.c',
        'sfc_ef10_tx.c'
 )
 
        'sfc_ef10_tx.c'
 )
 
index 26e7195..3da65ab 100644 (file)
@@ -79,7 +79,8 @@ struct sfc_dp {
        enum sfc_dp_type                type;
        /* Mask of required hardware/firmware capabilities */
        unsigned int                    hw_fw_caps;
        enum sfc_dp_type                type;
        /* Mask of required hardware/firmware capabilities */
        unsigned int                    hw_fw_caps;
-#define SFC_DP_HW_FW_CAP_EF10          0x1
+#define SFC_DP_HW_FW_CAP_EF10                          0x1
+#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER            0x2
 };
 
 /** List of datapath variants */
 };
 
 /** List of datapath variants */
index ecb486f..db075dd 100644 (file)
@@ -149,6 +149,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
  */
 typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
 
  */
 typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
 
+/**
+ * Packed stream receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
+                                    unsigned int id);
+
 /**
  * Receive queue purge function called after queue flush.
  *
 /**
  * Receive queue purge function called after queue flush.
  *
@@ -182,6 +188,7 @@ struct sfc_dp_rx {
        sfc_dp_rx_qstart_t                      *qstart;
        sfc_dp_rx_qstop_t                       *qstop;
        sfc_dp_rx_qrx_ev_t                      *qrx_ev;
        sfc_dp_rx_qstart_t                      *qstart;
        sfc_dp_rx_qstop_t                       *qstop;
        sfc_dp_rx_qrx_ev_t                      *qrx_ev;
+       sfc_dp_rx_qrx_ps_ev_t                   *qrx_ps_ev;
        sfc_dp_rx_qpurge_t                      *qpurge;
        sfc_dp_rx_supported_ptypes_get_t        *supported_ptypes_get;
        sfc_dp_rx_qdesc_npending_t              *qdesc_npending;
        sfc_dp_rx_qpurge_t                      *qpurge;
        sfc_dp_rx_supported_ptypes_get_t        *supported_ptypes_get;
        sfc_dp_rx_qdesc_npending_t              *qdesc_npending;
@@ -207,6 +214,7 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
 
 extern struct sfc_dp_rx sfc_efx_rx;
 extern struct sfc_dp_rx sfc_ef10_rx;
 
 extern struct sfc_dp_rx sfc_efx_rx;
 extern struct sfc_dp_rx sfc_ef10_rx;
+extern struct sfc_dp_rx sfc_ef10_essb_rx;
 
 #ifdef __cplusplus
 }
 
 #ifdef __cplusplus
 }
index 865359f..a73e0bd 100644 (file)
@@ -110,6 +110,9 @@ sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
 }
 
 
 }
 
 
+const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps);
+
+
 #ifdef __cplusplus
 }
 #endif
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
new file mode 100644 (file)
index 0000000..1df61ff
--- /dev/null
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* EF10 equal stride packed stream receive native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+/* Tunnels are not supported */
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT   0
+#include "sfc_ef10_rx_ev.h"
+
+#define sfc_ef10_essb_rx_err(dpq, ...) \
+       SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
+
+#define sfc_ef10_essb_rx_info(dpq, ...) \
+       SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
+
+/*
+ * Fake length for RXQ descriptors in equal stride super-buffer mode
+ * to make hardware happy.
+ */
+#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ */
+#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
+       ((_nevs) - 1 /* head must not step on tail */ - \
+        (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+        1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_essb_rx_sw_desc {
+       struct rte_mbuf                 *first_mbuf;
+};
+
+struct sfc_ef10_essb_rxq {
+       /* Used on data path */
+       unsigned int                    flags;
+#define SFC_EF10_ESSB_RXQ_STARTED      0x1
+#define SFC_EF10_ESSB_RXQ_NOT_RUNNING  0x2
+#define SFC_EF10_ESSB_RXQ_EXCEPTION    0x4
+       unsigned int                    rxq_ptr_mask;
+       unsigned int                    block_size;
+       unsigned int                    buf_stride;
+       unsigned int                    bufs_ptr;
+       unsigned int                    completed;
+       unsigned int                    pending_id;
+       unsigned int                    bufs_pending;
+       unsigned int                    left_in_completed;
+       unsigned int                    left_in_pending;
+       unsigned int                    evq_read_ptr;
+       unsigned int                    evq_ptr_mask;
+       efx_qword_t                     *evq_hw_ring;
+       struct sfc_ef10_essb_rx_sw_desc *sw_ring;
+       uint16_t                        port_id;
+
+       /* Used on refill */
+       unsigned int                    added;
+       unsigned int                    max_fill_level;
+       unsigned int                    refill_threshold;
+       struct rte_mempool              *refill_mb_pool;
+       efx_qword_t                     *rxq_hw_ring;
+       volatile void                   *doorbell;
+
+       /* Datapath receive queue anchor */
+       struct sfc_dp_rxq               dp;
+};
+
+static inline struct sfc_ef10_essb_rxq *
+sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+       return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
+                       struct rte_mbuf *mbuf)
+{
+       return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
+                           struct rte_mbuf *mbuf, unsigned int idx)
+{
+       return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
+{
+       const struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+       if (rxq->left_in_completed != 0) {
+               rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+               return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+                               rxq->block_size - rxq->left_in_completed);
+       } else {
+               rxq->completed++;
+               rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+               rxq->left_in_completed = rxq->block_size;
+               return rxd->first_mbuf;
+       }
+}
+
+static void
+sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
+{
+       const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
+       unsigned int free_space;
+       unsigned int bulks;
+       void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
+       unsigned int added = rxq->added;
+
+       free_space = rxq->max_fill_level - (added - rxq->completed);
+
+       if (free_space < rxq->refill_threshold)
+               return;
+
+       bulks = free_space / RTE_DIM(mbuf_blocks);
+       /* refill_threshold guarantees that bulks is positive */
+       SFC_ASSERT(bulks > 0);
+
+       do {
+               unsigned int id;
+               unsigned int i;
+
+               if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
+                               mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
+                       struct rte_eth_dev_data *dev_data =
+                               rte_eth_devices[rxq->port_id].data;
+
+                       /*
+                        * It is hardly a safe way to increment counter
+                        * from different contexts, but all PMDs do it.
+                        */
+                       dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
+                       /* Return if we have posted nothing yet */
+                       if (added == rxq->added)
+                               return;
+                       /* Push posted */
+                       break;
+               }
+
+               for (i = 0, id = added & rxq_ptr_mask;
+                    i < RTE_DIM(mbuf_blocks);
+                    ++i, ++id) {
+                       struct rte_mbuf *m = mbuf_blocks[i];
+                       struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+                       SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
+                       rxd = &rxq->sw_ring[id];
+                       rxd->first_mbuf = m;
+
+                       /* RX_KER_BYTE_CNT is ignored by firmware */
+                       EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+                                            ESF_DZ_RX_KER_BYTE_CNT,
+                                            SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
+                                            ESF_DZ_RX_KER_BUF_ADDR,
+                                            rte_mbuf_data_iova_default(m));
+               }
+
+               added += RTE_DIM(mbuf_blocks);
+
+       } while (--bulks > 0);
+
+       SFC_ASSERT(rxq->added != added);
+       rxq->added = added;
+       sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
+}
+
+static bool
+sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
+{
+       *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
+
+       if (!sfc_ef10_ev_present(*rx_ev))
+               return false;
+
+       if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+                    FSE_AZ_EV_CODE_RX_EV)) {
+               /*
+                * Do not move read_ptr to keep the event for exception
+                * handling
+                */
+               rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
+               sfc_ef10_essb_rx_err(&rxq->dp.dpq,
+                                    "RxQ exception at EvQ read ptr %#x",
+                                    rxq->evq_read_ptr);
+               return false;
+       }
+
+       rxq->evq_read_ptr++;
+       return true;
+}
+
+static void
+sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
+{
+       unsigned int ready;
+
+       ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
+                rxq->bufs_ptr) &
+               EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+
+       rxq->bufs_ptr += ready;
+       rxq->bufs_pending += ready;
+
+       SFC_ASSERT(ready > 0);
+       do {
+               const struct sfc_ef10_essb_rx_sw_desc *rxd;
+               struct rte_mbuf *m;
+               unsigned int todo_bufs;
+               struct rte_mbuf *m0;
+
+               rxd = &rxq->sw_ring[rxq->pending_id];
+               m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+                       rxq->block_size - rxq->left_in_pending);
+
+               if (ready < rxq->left_in_pending) {
+                       todo_bufs = ready;
+                       ready = 0;
+                       rxq->left_in_pending -= todo_bufs;
+               } else {
+                       todo_bufs = rxq->left_in_pending;
+                       ready -= todo_bufs;
+                       rxq->left_in_pending = rxq->block_size;
+                       if (rxq->pending_id != rxq->rxq_ptr_mask)
+                               rxq->pending_id++;
+                       else
+                               rxq->pending_id = 0;
+               }
+
+               SFC_ASSERT(todo_bufs > 0);
+               --todo_bufs;
+
+               sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
+
+               /* Prefetch pseudo-header */
+               rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+
+               m0 = m;
+               while (todo_bufs-- > 0) {
+                       m = sfc_ef10_essb_next_mbuf(rxq, m);
+                       m->ol_flags = m0->ol_flags;
+                       m->packet_type = m0->packet_type;
+                       /* Prefetch pseudo-header */
+                       rte_prefetch0((uint8_t *)m->buf_addr +
+                                     RTE_PKTMBUF_HEADROOM);
+               }
+       } while (ready > 0);
+}
+
+static unsigned int
+sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       unsigned int n_rx_pkts = 0;
+       unsigned int todo_bufs;
+       struct rte_mbuf *m;
+
+       while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
+                                   rxq->bufs_pending)) > 0) {
+               m = sfc_ef10_essb_maybe_next_completed(rxq);
+
+               todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
+
+               rxq->bufs_pending -= todo_bufs;
+               rxq->left_in_completed -= todo_bufs;
+
+               SFC_ASSERT(todo_bufs > 0);
+               todo_bufs--;
+
+               do {
+                       const efx_qword_t *qwordp;
+                       uint16_t pkt_len;
+
+                       rx_pkts[n_rx_pkts++] = m;
+
+                       /* Parse pseudo-header */
+                       qwordp = (const efx_qword_t *)
+                               ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+                       pkt_len =
+                               EFX_QWORD_FIELD(*qwordp,
+                                               ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
+
+                       m->data_off = RTE_PKTMBUF_HEADROOM +
+                               ES_EZ_ESSB_RX_PREFIX_LEN;
+                       m->port = rxq->port_id;
+
+                       rte_pktmbuf_pkt_len(m) = pkt_len;
+                       rte_pktmbuf_data_len(m) = pkt_len;
+
+                       m->ol_flags |=
+                               (PKT_RX_RSS_HASH *
+                                !!EFX_TEST_QWORD_BIT(*qwordp,
+                                       ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN));
+
+                       /* EFX_QWORD_FIELD converts little-endian to CPU */
+                       m->hash.rss =
+                               EFX_QWORD_FIELD(*qwordp,
+                                               ES_EZ_ESSB_RX_PREFIX_HASH);
+
+                       m = sfc_ef10_essb_next_mbuf(rxq, m);
+               } while (todo_bufs-- > 0);
+       }
+
+       return n_rx_pkts;
+}
+
+
+static uint16_t
+sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                       uint16_t nb_pkts)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
+       const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+       uint16_t n_rx_pkts;
+       efx_qword_t rx_ev;
+
+       if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+                                  SFC_EF10_ESSB_RXQ_EXCEPTION)))
+               return 0;
+
+       n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
+
+       while (n_rx_pkts != nb_pkts &&
+              sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+               /*
+                * DROP_EVENT is an internal to the NIC, software should
+                * never see it and, therefore, may ignore it.
+                */
+
+               sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+               n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
+                                                         rx_pkts + n_rx_pkts,
+                                                         nb_pkts - n_rx_pkts);
+       }
+
+       sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+                          evq_old_read_ptr, rxq->evq_read_ptr);
+
+       /* It is not a problem if we refill in the case of exception */
+       sfc_ef10_essb_rx_qrefill(rxq);
+
+       return n_rx_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+       /*
+        * Correct implementation requires EvQ polling and events
+        * processing.
+        */
+       return -ENOTSUP;
+}
+
+static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
+static void
+sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+       /*
+        * Number of descriptors just defines maximum number of pushed
+        * descriptors (fill level).
+        */
+       dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
+       dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
+}
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
+static int
+sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+                               struct rte_mempool *mb_pool,
+                               unsigned int *rxq_entries,
+                               unsigned int *evq_entries,
+                               unsigned int *rxq_max_fill_level)
+{
+       int rc;
+       struct rte_mempool_info mp_info;
+       unsigned int nb_hw_rx_desc;
+       unsigned int max_events;
+
+       rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
+       if (rc != 0)
+               return -rc;
+       if (mp_info.contig_block_size == 0)
+               return EINVAL;
+
+       /*
+        * Calculate required number of hardware Rx descriptors each
+        * carrying contig block size Rx buffers.
+        * It cannot be less than Rx write pointer alignment plus 1
+        * in order to avoid cases when the ring is guaranteed to be
+        * empty.
+        */
+       nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
+                                                mp_info.contig_block_size),
+                               SFC_EF10_RX_WPTR_ALIGN + 1);
+       if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
+               *rxq_entries = EFX_RXQ_MINNDESCS;
+       } else {
+               *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
+               if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+                       return EINVAL;
+       }
+
+       max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
+               mp_info.contig_block_size +
+               (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
+               1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
+
+       *evq_entries = rte_align32pow2(max_events);
+       *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
+       *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+
+       /*
+        * May be even maximum event queue size is insufficient to handle
+        * so many Rx descriptors. If so, we should limit Rx queue fill level.
+        */
+       *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+                                     SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
+       return 0;
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
+static int
+sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+                        const struct rte_pci_addr *pci_addr, int socket_id,
+                        const struct sfc_dp_rx_qcreate_info *info,
+                        struct sfc_dp_rxq **dp_rxqp)
+{
+       struct rte_mempool * const mp = info->refill_mb_pool;
+       struct rte_mempool_info mp_info;
+       struct sfc_ef10_essb_rxq *rxq;
+       int rc;
+
+       rc = rte_mempool_ops_get_info(mp, &mp_info);
+       if (rc != 0) {
+               /* Positive errno is used in the driver */
+               rc = -rc;
+               goto fail_get_contig_block_size;
+       }
+
+       /* Check if the mempool provides block dequeue */
+       rc = EINVAL;
+       if (mp_info.contig_block_size == 0)
+               goto fail_no_block_dequeue;
+
+       rc = ENOMEM;
+       rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxq == NULL)
+               goto fail_rxq_alloc;
+
+       sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+       rc = ENOMEM;
+       rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+                                        info->rxq_entries,
+                                        sizeof(*rxq->sw_ring),
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxq->sw_ring == NULL)
+               goto fail_desc_alloc;
+
+       rxq->block_size = mp_info.contig_block_size;
+       rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
+       rxq->rxq_ptr_mask = info->rxq_entries - 1;
+       rxq->evq_ptr_mask = info->evq_entries - 1;
+       rxq->evq_hw_ring = info->evq_hw_ring;
+       rxq->port_id = port_id;
+
+       rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
+       rxq->refill_threshold =
+               RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
+                       SFC_EF10_RX_WPTR_ALIGN);
+       rxq->refill_mb_pool = mp;
+       rxq->rxq_hw_ring = info->rxq_hw_ring;
+
+       rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+                       ER_DZ_RX_DESC_UPD_REG_OFST +
+                       (info->hw_index << info->vi_window_shift);
+
+       sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+                             "block size is %u, buf stride is %u",
+                             rxq->block_size, rxq->buf_stride);
+       sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+                             "max fill level is %u descs (%u bufs), "
+                             "refill threashold %u descs (%u bufs)",
+                             rxq->max_fill_level,
+                             rxq->max_fill_level * rxq->block_size,
+                             rxq->refill_threshold,
+                             rxq->refill_threshold * rxq->block_size);
+
+       *dp_rxqp = &rxq->dp;
+       return 0;
+
+fail_desc_alloc:
+       rte_free(rxq);
+
+fail_rxq_alloc:
+fail_no_block_dequeue:
+fail_get_contig_block_size:
+       return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
+static void
+sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+       rte_free(rxq->sw_ring);
+       rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
+static int
+sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+       rxq->evq_read_ptr = evq_read_ptr;
+
+       /* Initialize before refill */
+       rxq->completed = rxq->pending_id = rxq->added = 0;
+       rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
+       rxq->bufs_ptr = UINT_MAX;
+       rxq->bufs_pending = 0;
+
+       sfc_ef10_essb_rx_qrefill(rxq);
+
+       rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
+       rxq->flags &=
+               ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
+
+       return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
+static void
+sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+       rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
+
+       *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
+static bool
+sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+       __rte_unused struct sfc_ef10_essb_rxq *rxq;
+
+       rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+       SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
+
+       /*
+        * It is safe to ignore Rx event since we free all mbufs on
+        * queue purge anyway.
+        */
+
+       return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
+static void
+sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+       unsigned int i, j;
+       const struct sfc_ef10_essb_rx_sw_desc *rxd;
+       struct rte_mbuf *m;
+
+       if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
+               rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+               m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+                               rxq->block_size - rxq->left_in_completed);
+               do {
+                       rxq->left_in_completed--;
+                       rte_mempool_put(rxq->refill_mb_pool, m);
+                       m = sfc_ef10_essb_next_mbuf(rxq, m);
+               } while (rxq->left_in_completed > 0);
+               rxq->completed++;
+       }
+
+       for (i = rxq->completed; i != rxq->added; ++i) {
+               rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
+               m = rxd->first_mbuf;
+               for (j = 0; j < rxq->block_size; ++j) {
+                       rte_mempool_put(rxq->refill_mb_pool, m);
+                       m = sfc_ef10_essb_next_mbuf(rxq, m);
+               }
+       }
+
+       rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_essb_rx = {
+       .dp = {
+               .name           = SFC_KVARG_DATAPATH_EF10_ESSB,
+               .type           = SFC_DP_RX,
+               .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF10 |
+                                 SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
+       },
+       .features               = 0,
+       .get_dev_info           = sfc_ef10_essb_rx_get_dev_info,
+       .qsize_up_rings         = sfc_ef10_essb_rx_qsize_up_rings,
+       .qcreate                = sfc_ef10_essb_rx_qcreate,
+       .qdestroy               = sfc_ef10_essb_rx_qdestroy,
+       .qstart                 = sfc_ef10_essb_rx_qstart,
+       .qstop                  = sfc_ef10_essb_rx_qstop,
+       .qrx_ev                 = sfc_ef10_essb_rx_qrx_ev,
+       .qpurge                 = sfc_ef10_essb_rx_qpurge,
+       .supported_ptypes_get   = sfc_ef10_supported_ptypes_get,
+       .qdesc_npending         = sfc_ef10_essb_rx_qdesc_npending,
+       .pkt_burst              = sfc_ef10_essb_recv_pkts,
+};
index 1f0d6a0..42b35b9 100644 (file)
@@ -386,7 +386,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        return n_rx_pkts;
 }
 
        return n_rx_pkts;
 }
 
-static const uint32_t *
+const uint32_t *
 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
 {
        static const uint32_t ef10_native_ptypes[] = {
 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
 {
        static const uint32_t ef10_native_ptypes[] = {
index 9054fb9..615bd29 100644 (file)
@@ -34,7 +34,10 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
        uint32_t l4_ptype = 0;
        uint64_t ol_flags = 0;
 
        uint32_t l4_ptype = 0;
        uint64_t ol_flags = 0;
 
-       if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
+       if (unlikely(rx_ev.eq_u64[0] &
+               rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+                                (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
+                                (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN))))
                goto done;
 
 #if SFC_EF10_RX_EV_ENCAP_SUPPORT
                goto done;
 
 #if SFC_EF10_RX_EV_ENCAP_SUPPORT
index 35a8301..700e154 100644 (file)
@@ -1707,6 +1707,7 @@ static int
 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
 {
        struct sfc_adapter *sa = dev->data->dev_private;
 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
 {
        struct sfc_adapter *sa = dev->data->dev_private;
+       const efx_nic_cfg_t *encp;
        unsigned int avail_caps = 0;
        const char *rx_name = NULL;
        const char *tx_name = NULL;
        unsigned int avail_caps = 0;
        const char *rx_name = NULL;
        const char *tx_name = NULL;
@@ -1722,6 +1723,10 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
                break;
        }
 
                break;
        }
 
+       encp = efx_nic_cfg_get(sa->nic);
+       if (encp->enc_rx_es_super_buffer_supported)
+               avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
        rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
                                sfc_kvarg_string_handler, &rx_name);
        if (rc != 0)
        rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
                                sfc_kvarg_string_handler, &rx_name);
        if (rc != 0)
@@ -1911,6 +1916,7 @@ sfc_register_dp(void)
        /* Register once */
        if (TAILQ_EMPTY(&sfc_dp_head)) {
                /* Prefer EF10 datapath */
        /* Register once */
        if (TAILQ_EMPTY(&sfc_dp_head)) {
                /* Prefer EF10 datapath */
+               sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
                sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
                sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
 
                sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
                sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
 
index 8a5030b..f93d30e 100644 (file)
@@ -162,6 +162,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
        return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
 }
 
        return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
 }
 
+static boolean_t
+sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
+                uint32_t pkt_count, uint16_t flags)
+{
+       struct sfc_evq *evq = arg;
+
+       sfc_err(evq->sa,
+               "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
+               evq->evq_index, label, id, pkt_count, flags);
+       return B_TRUE;
+}
+
+/* It is not actually used on datapath, but required on RxQ flush */
+static boolean_t
+sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
+               __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
+{
+       struct sfc_evq *evq = arg;
+       struct sfc_dp_rxq *dp_rxq;
+
+       dp_rxq = evq->dp_rxq;
+       SFC_ASSERT(dp_rxq != NULL);
+
+       if (evq->sa->dp_rx->qrx_ps_ev != NULL)
+               return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id);
+       else
+               return B_FALSE;
+}
+
 static boolean_t
 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
 {
 static boolean_t
 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
 {
@@ -394,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
 static const efx_ev_callbacks_t sfc_ev_callbacks = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
 static const efx_ev_callbacks_t sfc_ev_callbacks = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
@@ -409,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_efx_rx,
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_efx_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
@@ -424,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_dp_rx,
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_dp_rx,
+       .eec_rx_ps              = sfc_ev_dp_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
@@ -439,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
        .eec_tx                 = sfc_ev_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
@@ -454,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_dp_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
        .eec_tx                 = sfc_ev_dp_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
index 1e578e7..057002e 100644 (file)
@@ -33,11 +33,13 @@ extern "C" {
 #define SFC_KVARG_DATAPATH_EFX         "efx"
 #define SFC_KVARG_DATAPATH_EF10                "ef10"
 #define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
 #define SFC_KVARG_DATAPATH_EFX         "efx"
 #define SFC_KVARG_DATAPATH_EF10                "ef10"
 #define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+#define SFC_KVARG_DATAPATH_EF10_ESSB   "ef10_essb"
 
 #define SFC_KVARG_RX_DATAPATH          "rx_datapath"
 #define SFC_KVARG_VALUES_RX_DATAPATH \
        "[" SFC_KVARG_DATAPATH_EFX "|" \
 
 #define SFC_KVARG_RX_DATAPATH          "rx_datapath"
 #define SFC_KVARG_VALUES_RX_DATAPATH \
        "[" SFC_KVARG_DATAPATH_EFX "|" \
-           SFC_KVARG_DATAPATH_EF10 "]"
+           SFC_KVARG_DATAPATH_EF10 "|" \
+           SFC_KVARG_DATAPATH_EF10_ESSB "]"
 
 #define SFC_KVARG_TX_DATAPATH          "tx_datapath"
 #define SFC_KVARG_VALUES_TX_DATAPATH \
 
 #define SFC_KVARG_TX_DATAPATH          "tx_datapath"
 #define SFC_KVARG_VALUES_TX_DATAPATH \
index 7345074..653724f 100644 (file)
@@ -680,10 +680,37 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
        if (rc != 0)
                goto fail_ev_qstart;
 
        if (rc != 0)
                goto fail_ev_qstart;
 
-       rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
-                           &rxq->mem, rxq_info->entries,
-                           0 /* not used on EF10 */, rxq_info->type_flags,
-                           evq->common, &rxq->common);
+       switch (rxq_info->type) {
+       case EFX_RXQ_TYPE_DEFAULT:
+               rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+                       &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
+                       rxq_info->type_flags, evq->common, &rxq->common);
+               break;
+       case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
+               struct rte_mempool *mp = rxq->refill_mb_pool;
+               struct rte_mempool_info mp_info;
+
+               rc = rte_mempool_ops_get_info(mp, &mp_info);
+               if (rc != 0) {
+                       /* Positive errno is used in the driver */
+                       rc = -rc;
+                       goto fail_mp_get_info;
+               }
+               if (mp_info.contig_block_size <= 0) {
+                       rc = EINVAL;
+                       goto fail_bad_contig_block_size;
+               }
+               rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
+                       mp_info.contig_block_size, rxq->buf_size,
+                       mp->header_size + mp->elt_size + mp->trailer_size,
+                       0 /* hol_block_timeout */,
+                       &rxq->mem, rxq_info->entries, rxq_info->type_flags,
+                       evq->common, &rxq->common);
+               break;
+       }
+       default:
+               rc = ENOTSUP;
+       }
        if (rc != 0)
                goto fail_rx_qcreate;
 
        if (rc != 0)
                goto fail_rx_qcreate;
 
@@ -714,6 +741,8 @@ fail_dp_qstart:
        sfc_rx_qflush(sa, sw_index);
 
 fail_rx_qcreate:
        sfc_rx_qflush(sa, sw_index);
 
 fail_rx_qcreate:
+fail_bad_contig_block_size:
+fail_mp_get_info:
        sfc_ev_qstop(evq);
 
 fail_ev_qstart:
        sfc_ev_qstop(evq);
 
 fail_ev_qstart:
@@ -1020,7 +1049,12 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 
        SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
        rxq_info->entries = rxq_entries;
 
        SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
        rxq_info->entries = rxq_entries;
-       rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
+       if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+               rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
+       else
+               rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
        rxq_info->type_flags =
                (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
                EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
        rxq_info->type_flags =
                (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
                EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
@@ -1047,6 +1081,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
        rxq->refill_threshold =
                RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
        rxq->refill_mb_pool = mb_pool;
        rxq->refill_threshold =
                RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
        rxq->refill_mb_pool = mb_pool;
+       rxq->buf_size = buf_size;
 
        rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
                           socket_id, &rxq->mem);
 
        rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
                           socket_id, &rxq->mem);
index d9e7b0b..3fba7d8 100644 (file)
@@ -60,6 +60,7 @@ struct sfc_rxq {
        unsigned int            hw_index;
        unsigned int            refill_threshold;
        struct rte_mempool      *refill_mb_pool;
        unsigned int            hw_index;
        unsigned int            refill_threshold;
        struct rte_mempool      *refill_mb_pool;
+       uint16_t                buf_size;
        struct sfc_dp_rxq       *dp;
        unsigned int            state;
 };
        struct sfc_dp_rxq       *dp;
        unsigned int            state;
 };