/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017-2018 Solarflare Communications Inc.
- * All rights reserved.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
+ * Copyright(c) 2017-2019 Solarflare Communications Inc.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
#include <stdbool.h>
#include <rte_byteorder.h>
-#include <rte_mbuf_ptype.h>
#include <rte_mbuf.h>
#include <rte_io.h>
-#include "efx.h"
#include "efx_types.h"
-#include "efx_regs.h"
#include "efx_regs_ef10.h"
+#include "efx.h"
+#include "sfc_debug.h"
#include "sfc_tweak.h"
#include "sfc_dp_rx.h"
#include "sfc_kvargs.h"
*/
#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
+/**
+ * Minimum number of Rx buffers the datapath allows to use.
+ *
+ * Each HW Rx descriptor has many Rx buffers. The number of buffers
+ * in one HW Rx descriptor is equal to size of contiguous block
+ * provided by Rx buffers memory pool. The contiguous block size
+ * depends on RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
+ * data size specified on the memory pool creation. Typical rte_mbuf
+ * data size is about 2k which makes a bit less than 32 buffers in
+ * contiguous block with default bucket size equal to 64k.
+ * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
+ * it makes about 256 as required minimum. Double it in advertised
+ * minimum to allow for at least 2 refill blocks.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_MIN 512
+
+/**
+ * Number of Rx buffers should be aligned to.
+ *
+ * There are no extra requirements on alignment since actual number of
+ * pushed Rx buffers will be multiple by contiguous block size which
+ * is unknown beforehand.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
+
/**
* Maximum number of descriptors/buffers in the Rx ring.
* It should guarantee that corresponding event queue never overfill.
sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
struct rte_mbuf *mbuf)
{
- return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+ struct rte_mbuf *m;
+
+ m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+ __rte_mbuf_raw_sanity_check(m);
+ return m;
}
static struct rte_mbuf *
sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
struct rte_mbuf *mbuf, unsigned int idx)
{
- return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+ struct rte_mbuf *m;
+
+ m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+ __rte_mbuf_raw_sanity_check(m);
+ return m;
}
static struct rte_mbuf *
SFC_ASSERT(rxq->added != added);
rxq->added = added;
- sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
+ sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask,
+ &rxq->dp.dpq.dbells);
}
static bool
} while (ready > 0);
}
+/*
+ * Below function relies on the following length and layout of the
+ * Rx prefix.
+ */
+static const efx_rx_prefix_layout_t sfc_ef10_essb_rx_prefix_layout = {
+ .erpl_length = ES_EZ_ESSB_RX_PREFIX_LEN,
+ .erpl_fields = {
+#define SFC_EF10_ESSB_RX_PREFIX_FIELD(_efx, _ef10) \
+ EFX_RX_PREFIX_FIELD(_efx, ES_EZ_ESSB_RX_PREFIX_ ## _ef10, B_FALSE)
+
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(LENGTH, DATA_LEN),
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_MARK, MARK),
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(RSS_HASH_VALID, HASH_VALID),
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_MARK_VALID, MARK_VALID),
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_FLAG, MATCH_FLAG),
+ SFC_EF10_ESSB_RX_PREFIX_FIELD(RSS_HASH, HASH),
+
+#undef SFC_EF10_ESSB_RX_PREFIX_FIELD
+ }
+};
+
static unsigned int
sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
const efx_qword_t *qwordp;
uint16_t pkt_len;
+ /* Buffers to be discarded have 0 in packet type */
+ if (unlikely(m->packet_type == 0)) {
+ rte_mbuf_raw_free(m);
+ goto next_buf;
+ }
+
rx_pkts[n_rx_pkts++] = m;
/* Parse pseudo-header */
rte_pktmbuf_data_len(m) = pkt_len;
m->ol_flags |=
- (PKT_RX_RSS_HASH *
+ (RTE_MBUF_F_RX_RSS_HASH *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
- (PKT_RX_FDIR_ID *
+ (RTE_MBUF_F_RX_FDIR_ID *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
- (PKT_RX_FDIR *
+ (RTE_MBUF_F_RX_FDIR *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
EFX_QWORD_FIELD(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MARK);
+next_buf:
m = sfc_ef10_essb_next_mbuf(rxq, m);
} while (todo_bufs-- > 0);
}
static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
static unsigned int
-sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
{
- /*
- * Correct implementation requires EvQ polling and events
- * processing.
- */
- return -ENOTSUP;
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return rxq->bufs_pending;
+
+ while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ return rxq->bufs_pending;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
+static int
+sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
+
+ if (offset < pending)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed) * rxq->block_size +
+ rxq->left_in_completed - rxq->block_size)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
}
static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
* Number of descriptors just defines maximum number of pushed
* descriptors (fill level).
*/
- dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
- dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
+ dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
+ dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
}
static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
static int
sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct sfc_dp_rx_hw_limits *limits,
struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
mp_info.contig_block_size),
SFC_EF10_RX_WPTR_ALIGN + 1);
- if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
- *rxq_entries = EFX_RXQ_MINNDESCS;
+ if (nb_hw_rx_desc <= limits->rxq_min_entries) {
+ *rxq_entries = limits->rxq_min_entries;
} else {
*rxq_entries = rte_align32pow2(nb_hw_rx_desc);
- if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+ if (*rxq_entries > limits->rxq_max_entries)
return EINVAL;
}
1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
*evq_entries = rte_align32pow2(max_events);
- *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
- *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+ *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
+ *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
/*
* May be even maximum event queue size is insufficient to handle
struct sfc_ef10_essb_rxq *rxq;
int rc;
+ rc = ENOTSUP;
+ if (info->nic_dma_info->nb_regions > 0)
+ goto fail_nic_dma;
+
rc = rte_mempool_ops_get_info(mp, &mp_info);
if (rc != 0) {
/* Positive errno is used in the driver */
ER_DZ_RX_DESC_UPD_REG_OFST +
(info->hw_index << info->vi_window_shift);
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq, "RxQ doorbell is %p",
+ rxq->doorbell);
sfc_ef10_essb_rx_info(&rxq->dp.dpq,
"block size is %u, buf stride is %u",
rxq->block_size, rxq->buf_stride);
sfc_ef10_essb_rx_info(&rxq->dp.dpq,
"max fill level is %u descs (%u bufs), "
- "refill threashold %u descs (%u bufs)",
+ "refill threshold %u descs (%u bufs)",
rxq->max_fill_level,
rxq->max_fill_level * rxq->block_size,
rxq->refill_threshold,
fail_rxq_alloc:
fail_no_block_dequeue:
fail_get_contig_block_size:
+fail_nic_dma:
return rc;
}
static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
static int
-sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
+ const efx_rx_prefix_layout_t *pinfo)
{
struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ if (pinfo->erpl_length != sfc_ef10_essb_rx_prefix_layout.erpl_length)
+ return ENOTSUP;
+
+ if (efx_rx_prefix_layout_check(pinfo,
+ &sfc_ef10_essb_rx_prefix_layout) != 0)
+ return ENOTSUP;
+
rxq->evq_read_ptr = evq_read_ptr;
/* Initialize before refill */
sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
{
struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
- unsigned int i, j;
+ unsigned int i;
const struct sfc_ef10_essb_rx_sw_desc *rxd;
struct rte_mbuf *m;
- if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
- rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
- m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
- rxq->block_size - rxq->left_in_completed);
- do {
- rxq->left_in_completed--;
- rte_mempool_put(rxq->refill_mb_pool, m);
- m = sfc_ef10_essb_next_mbuf(rxq, m);
- } while (rxq->left_in_completed > 0);
- rxq->completed++;
- }
-
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
- m = rxd->first_mbuf;
- for (j = 0; j < rxq->block_size; ++j) {
- rte_mempool_put(rxq->refill_mb_pool, m);
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ while (rxq->left_in_completed > 0) {
+ rte_mbuf_raw_free(m);
m = sfc_ef10_essb_next_mbuf(rxq, m);
+ rxq->left_in_completed--;
}
+ rxq->left_in_completed = rxq->block_size;
}
rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
SFC_DP_RX_FEAT_FLOW_MARK,
+ .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = 0,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
.qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
.qpurge = sfc_ef10_essb_rx_qpurge,
.supported_ptypes_get = sfc_ef10_supported_ptypes_get,
.qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
.pkt_burst = sfc_ef10_essb_recv_pkts,
};