net/sfc: use NIC EVQ descs limits instead of defines
[dpdk.git] / drivers / net / sfc / sfc_ef10_essb_rx.c
index 8dd4396..ccb6aea 100644 (file)
  */
 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
 
+/**
+ * Minimum number of Rx buffers the datapath allows to use.
+ *
+ * Each HW Rx descriptor has many Rx buffers. The number of buffers
+ * in one HW Rx descriptor is equal to size of contiguous block
+ * provided by Rx buffers memory pool. The contiguous block size
+ * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
+ * data size specified on the memory pool creation. Typical rte_mbuf
+ * data size is about 2k which makes a bit less than 32 buffers in
+ * contiguous block with default bucket size equal to 64k.
+ * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
+ * it makes about 256 as required minimum. Double it in advertised
+ * minimum to allow for at least 2 refill blocks.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_MIN     512
+
+/**
+ * Number of Rx buffers should be aligned to.
+ *
+ * There are no extra requirements on alignment since actual number of
+ * pushed Rx buffers will be multiple by contiguous block size which
+ * is unknown beforehand.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_ALIGN   1
+
 /**
  * Maximum number of descriptors/buffers in the Rx ring.
  * It should guarantee that corresponding event queue never overfill.
@@ -98,14 +123,22 @@ static struct rte_mbuf *
 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
                        struct rte_mbuf *mbuf)
 {
-       return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+       struct rte_mbuf *m;
+
+       m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+       MBUF_RAW_ALLOC_CHECK(m);
+       return m;
 }
 
 static struct rte_mbuf *
 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
                            struct rte_mbuf *mbuf, unsigned int idx)
 {
-       return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+       struct rte_mbuf *m;
+
+       m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+       MBUF_RAW_ALLOC_CHECK(m);
+       return m;
 }
 
 static struct rte_mbuf *
@@ -297,6 +330,12 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
                        const efx_qword_t *qwordp;
                        uint16_t pkt_len;
 
+                       /* Buffers to be discarded have 0 in packet type */
+                       if (unlikely(m->packet_type == 0)) {
+                               rte_mbuf_raw_free(m);
+                               goto next_buf;
+                       }
+
                        rx_pkts[n_rx_pkts++] = m;
 
                        /* Parse pseudo-header */
@@ -316,13 +355,23 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
                        m->ol_flags |=
                                (PKT_RX_RSS_HASH *
                                 !!EFX_TEST_QWORD_BIT(*qwordp,
-                                       ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN));
+                                       ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
+                               (PKT_RX_FDIR_ID *
+                                !!EFX_TEST_QWORD_BIT(*qwordp,
+                                       ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
+                               (PKT_RX_FDIR *
+                                !!EFX_TEST_QWORD_BIT(*qwordp,
+                                       ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
 
                        /* EFX_QWORD_FIELD converts little-endian to CPU */
                        m->hash.rss =
                                EFX_QWORD_FIELD(*qwordp,
                                                ES_EZ_ESSB_RX_PREFIX_HASH);
+                       m->hash.fdir.hi =
+                               EFX_QWORD_FIELD(*qwordp,
+                                               ES_EZ_ESSB_RX_PREFIX_MARK);
 
+next_buf:
                        m = sfc_ef10_essb_next_mbuf(rxq, m);
                } while (todo_bufs-- > 0);
        }
@@ -370,13 +419,45 @@ sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
 static unsigned int
-sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
 {
-       /*
-        * Correct implementation requires EvQ polling and events
-        * processing.
-        */
-       return -ENOTSUP;
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+       const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+       efx_qword_t rx_ev;
+
+       if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+                                  SFC_EF10_ESSB_RXQ_EXCEPTION)))
+               return rxq->bufs_pending;
+
+       while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+               /*
+                * DROP_EVENT is an internal to the NIC, software should
+                * never see it and, therefore, may ignore it.
+                */
+               sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+       }
+
+       sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+                          evq_old_read_ptr, rxq->evq_read_ptr);
+
+       return rxq->bufs_pending;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
+static int
+sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+       struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+       unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
+
+       if (offset < pending)
+               return RTE_ETH_RX_DESC_DONE;
+
+       if (offset < (rxq->added - rxq->completed) * rxq->block_size +
+                    rxq->left_in_completed - rxq->block_size)
+               return RTE_ETH_RX_DESC_AVAIL;
+
+       return RTE_ETH_RX_DESC_UNAVAIL;
 }
 
 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
@@ -387,8 +468,8 @@ sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
         * Number of descriptors just defines maximum number of pushed
         * descriptors (fill level).
         */
-       dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
-       dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
+       dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
+       dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
 }
 
 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
@@ -406,6 +487,7 @@ sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
 static int
 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+                               struct sfc_dp_rx_hw_limits *limits,
                                struct rte_mempool *mb_pool,
                                unsigned int *rxq_entries,
                                unsigned int *evq_entries,
@@ -432,11 +514,11 @@ sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
        nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
                                                 mp_info.contig_block_size),
                                SFC_EF10_RX_WPTR_ALIGN + 1);
-       if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
-               *rxq_entries = EFX_RXQ_MINNDESCS;
+       if (nb_hw_rx_desc <= limits->rxq_min_entries) {
+               *rxq_entries = limits->rxq_min_entries;
        } else {
                *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
-               if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+               if (*rxq_entries > limits->rxq_max_entries)
                        return EINVAL;
        }
 
@@ -446,8 +528,8 @@ sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
                1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
 
        *evq_entries = rte_align32pow2(max_events);
-       *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
-       *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+       *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
+       *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
 
        /*
         * May be even maximum event queue size is insufficient to handle
@@ -605,29 +687,20 @@ static void
 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
 {
        struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
-       unsigned int i, j;
+       unsigned int i;
        const struct sfc_ef10_essb_rx_sw_desc *rxd;
        struct rte_mbuf *m;
 
-       if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
-               rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
-               m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
-                               rxq->block_size - rxq->left_in_completed);
-               do {
-                       rxq->left_in_completed--;
-                       rte_mempool_put(rxq->refill_mb_pool, m);
-                       m = sfc_ef10_essb_next_mbuf(rxq, m);
-               } while (rxq->left_in_completed > 0);
-               rxq->completed++;
-       }
-
        for (i = rxq->completed; i != rxq->added; ++i) {
                rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
-               m = rxd->first_mbuf;
-               for (j = 0; j < rxq->block_size; ++j) {
-                       rte_mempool_put(rxq->refill_mb_pool, m);
+               m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+                               rxq->block_size - rxq->left_in_completed);
+               while (rxq->left_in_completed > 0) {
+                       rte_mbuf_raw_free(m);
                        m = sfc_ef10_essb_next_mbuf(rxq, m);
+                       rxq->left_in_completed--;
                }
+               rxq->left_in_completed = rxq->block_size;
        }
 
        rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
@@ -640,7 +713,9 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
                .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF10 |
                                  SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
        },
-       .features               = 0,
+       .features               = SFC_DP_RX_FEAT_FLOW_FLAG |
+                                 SFC_DP_RX_FEAT_FLOW_MARK |
+                                 SFC_DP_RX_FEAT_CHECKSUM,
        .get_dev_info           = sfc_ef10_essb_rx_get_dev_info,
        .pool_ops_supported     = sfc_ef10_essb_rx_pool_ops_supported,
        .qsize_up_rings         = sfc_ef10_essb_rx_qsize_up_rings,
@@ -652,5 +727,6 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
        .qpurge                 = sfc_ef10_essb_rx_qpurge,
        .supported_ptypes_get   = sfc_ef10_supported_ptypes_get,
        .qdesc_npending         = sfc_ef10_essb_rx_qdesc_npending,
+       .qdesc_status           = sfc_ef10_essb_rx_qdesc_status,
        .pkt_burst              = sfc_ef10_essb_recv_pkts,
 };