Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
boolean parameters value.
+- ``rx_datapath`` [auto|efx] (default **auto**)
+
+ Choose receive datapath implementation.
+ **auto** allows the driver itself to make a choice based on firmware
+ features available and required by the datapath implementation.
+ **efx** chooses libefx-based datapath which supports Rx scatter.
+
- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
Choose hardware tunning to be optimized for either throughput or
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
VPATH += $(SRCDIR)/base
struct sfc_evq_info;
struct sfc_rxq_info;
struct sfc_txq_info;
+struct sfc_dp_rx;
struct sfc_port {
unsigned int lsc_seq;
unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
uint8_t rss_key[SFC_RSS_KEY_SIZE];
#endif
+
+ const struct sfc_dp_rx *dp_rx;
};
/*
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_log.h>
+
+#include "sfc_dp.h"
+
+void
+sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr)
+{
+ dpq->port_id = port_id;
+ dpq->queue_id = queue_id;
+ dpq->pci_addr = *pci_addr;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type,
+ const char *name)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ if (strcmp(entry->name, name) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type,
+ unsigned int avail_caps)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ /* Take the first matching */
+ if (sfc_dp_match_hw_fw_caps(entry, avail_caps))
+ return entry;
+ }
+
+ return NULL;
+}
+
+int
+sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+{
+ if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
+ "sfc %s dapapath '%s' already registered\n",
+ entry->type == SFC_DP_RX ? "Rx" : "unknown",
+ entry->name);
+ return EEXIST;
+ }
+
+ TAILQ_INSERT_TAIL(head, entry, links);
+
+ return 0;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_H
+#define _SFC_DP_H
+
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DIV_ROUND_UP(a, b) \
+ __extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ \
+ (_a + (_b - 1)) / _b; \
+ })
+
+/**
+ * Datapath exception handler to be provided by the control path.
+ */
+typedef void (sfc_dp_exception_t)(void *ctrl);
+
+enum sfc_dp_type {
+ SFC_DP_RX = 0, /**< Receive datapath */
+};
+
+
+/** Datapath queue run-time information */
+struct sfc_dp_queue {
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_pci_addr pci_addr;
+};
+
+void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
+ uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr);
+
+/*
+ * Helper macro to define datapath logging macros and have uniform
+ * logging.
+ */
+#define SFC_DP_LOG(dp_name, level, dpq, ...) \
+ do { \
+ const struct sfc_dp_queue *_dpq = (dpq); \
+ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s " PCI_PRI_FMT \
+ " #%" PRIu16 ".%" PRIu16 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dp_name, \
+ _addr->domain, _addr->bus, \
+ _addr->devid, _addr->function, \
+ _dpq->port_id, _dpq->queue_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+
+/** Datapath definition */
+struct sfc_dp {
+ TAILQ_ENTRY(sfc_dp) links;
+ const char *name;
+ enum sfc_dp_type type;
+ /* Mask of required hardware/firmware capabilities */
+ unsigned int hw_fw_caps;
+};
+
+/** List of datapath variants */
+TAILQ_HEAD(sfc_dp_list, sfc_dp);
+
+/* Check if available HW/FW capabilities are sufficient for the datapath */
+static inline bool
+sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)
+{
+ return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps;
+}
+
+struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head,
+ enum sfc_dp_type type, const char *name);
+struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head,
+ enum sfc_dp_type type,
+ unsigned int avail_caps);
+int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_H */
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_RX_H
+#define _SFC_DP_RX_H
+
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic receive queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_rxq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath receive queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_rx_qcreate_info {
+ /** Memory pool to allocate Rx buffer from */
+ struct rte_mempool *refill_mb_pool;
+ /** Minimum number of unused Rx descriptors to do refill */
+ unsigned int refill_threshold;
+ /**
+ * Usable mbuf data space in accordance with alignment and
+ * padding requirements imposed by HW.
+ */
+ unsigned int buf_size;
+
+ /**
+ * Maximum number of Rx descriptors completed in one Rx event.
+ * Just for sanity checks if datapath would like to do.
+ */
+ unsigned int batch_max;
+
+ /** Pseudo-header size */
+ unsigned int prefix_size;
+
+ /** Receive queue flags initializer */
+ unsigned int flags;
+#define SFC_RXQ_FLAG_RSS_HASH 0x1
+
+ /** Rx queue size */
+ unsigned int rxq_entries;
+};
+
+/**
+ * Allocate and initialize datapath receive queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Receive queue information
+ * @param dp_rxqp Location for generic datapath receive queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp);
+
+/**
+ * Free resources allocated for datapath recevie queue.
+ */
+typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int evq_read_ptr);
+
+/**
+ * Receive queue stop function called before flush.
+ */
+typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Receive queue purge function called after queue flush.
+ *
+ * Should be used to free unused recevie buffers.
+ */
+typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Get packet types recognized/classified */
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+
+/** Get number of pending Rx descriptors */
+typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Receive datapath definition */
+struct sfc_dp_rx {
+ struct sfc_dp dp;
+
+ sfc_dp_rx_qcreate_t *qcreate;
+ sfc_dp_rx_qdestroy_t *qdestroy;
+ sfc_dp_rx_qstart_t *qstart;
+ sfc_dp_rx_qstop_t *qstop;
+ sfc_dp_rx_qpurge_t *qpurge;
+ sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
+ sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ eth_rx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+extern struct sfc_dp_rx sfc_efx_rx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_RX_H */
#include <rte_dev.h>
#include <rte_ethdev.h>
#include <rte_pci.h>
+#include <rte_errno.h>
#include "efx.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
#include "sfc_flow.h"
+#include "sfc_dp.h"
+#include "sfc_dp_rx.h"
+
+static struct sfc_dp_list sfc_dp_head =
+ TAILQ_HEAD_INITIALIZER(sfc_dp_head);
static int
sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
static const uint32_t *
sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
- static const uint32_t ptypes[] = {
- RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
- RTE_PTYPE_L4_TCP,
- RTE_PTYPE_L4_UDP,
- RTE_PTYPE_UNKNOWN
- };
-
- if (dev->rx_pkt_burst == sfc_recv_pkts)
- return ptypes;
-
- return NULL;
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ return sa->dp_rx->supported_ptypes_get();
}
static int
if (rc != 0)
goto fail_rx_qinit;
- dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
sfc_adapter_unlock(sa);
static void
sfc_rx_queue_release(void *queue)
{
- struct sfc_rxq *rxq = queue;
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq;
struct sfc_adapter *sa;
unsigned int sw_index;
- if (rxq == NULL)
+ if (dp_rxq == NULL)
return;
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
sa = rxq->evq->sa;
sfc_adapter_lock(sa);
static int
sfc_rx_descriptor_done(void *queue, uint16_t offset)
{
- struct sfc_rxq *rxq = queue;
+ struct sfc_dp_rxq *dp_rxq = queue;
- return sfc_rx_qdesc_done(rxq, offset);
+ return sfc_rx_qdesc_done(dp_rxq, offset);
}
static int
.fw_version_get = sfc_fw_version_get,
};
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ int rc;
+
+ if (sa == NULL || sa->state == SFC_ADAPTER_UNINITIALIZED)
+ return -E_RTE_SECONDARY;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx;
+ }
+ } else {
+ sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+
+ dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
+
+ dev->tx_pkt_burst = sfc_xmit_pkts;
+
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head))
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+}
+
static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
const efx_nic_cfg_t *encp;
const struct ether_addr *from;
+ sfc_register_dp();
+
/* Required for logging */
sa->eth_dev = dev;
from = (const struct ether_addr *)(encp->enc_mac_addr);
ether_addr_copy(from, &dev->data->mac_addrs[0]);
- dev->dev_ops = &sfc_eth_dev_ops;
- dev->rx_pkt_burst = &sfc_recv_pkts;
- dev->tx_pkt_burst = &sfc_xmit_pkts;
-
sfc_adapter_unlock(sa);
+ sfc_eth_dev_set_ops(dev);
+
sfc_log_init(sa, "done");
return 0;
RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
}
static boolean_t
-sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
- uint32_t size, uint16_t flags)
+sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
{
struct sfc_evq *evq = arg;
- struct sfc_rxq *rxq;
+ struct sfc_efx_rxq *rxq;
unsigned int stop;
unsigned int pending_id;
unsigned int delta;
unsigned int i;
- struct sfc_rx_sw_desc *rxd;
+ struct sfc_efx_rx_sw_desc *rxd;
if (unlikely(evq->exception))
goto done;
- rxq = evq->rxq;
+ rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->evq == evq);
- SFC_ASSERT(rxq->flags & SFC_RXQ_FLAG_STARTED);
+ SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
stop = (id + 1) & rxq->ptr_mask;
pending_id = rxq->pending & rxq->ptr_mask;
sfc_err(evq->sa,
"EVQ %u RxQ %u invalid RX abort "
"(id=%#x size=%u flags=%#x); needs restart",
- evq->evq_index, sfc_rxq_sw_index(rxq),
+ evq->evq_index, rxq->dp.dpq.queue_id,
id, size, flags);
goto done;
}
sfc_err(evq->sa,
"EVQ %u RxQ %u completion out of order "
"(id=%#x delta=%u flags=%#x); needs restart",
- evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
- flags);
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, delta, flags);
goto done;
}
sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
struct sfc_rxq *rxq;
- rxq = evq->rxq;
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->hw_index == rxq_hw_index);
SFC_ASSERT(rxq->evq == evq);
sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
struct sfc_rxq *rxq;
- rxq = evq->rxq;
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->hw_index == rxq_hw_index);
SFC_ASSERT(rxq->evq == evq);
.eec_link_change = sfc_ev_link_change,
};
-static const efx_ev_callbacks_t sfc_ev_callbacks_rx = {
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
.eec_initialized = sfc_ev_initialized,
- .eec_rx = sfc_ev_rx,
+ .eec_rx = sfc_ev_efx_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
struct sfc_adapter *sa = evq->sa;
int rc;
- if ((evq->rxq != NULL) &&
- (evq->rxq->flags & SFC_RXQ_FLAG_RUNNING)) {
- unsigned int rxq_sw_index = sfc_rxq_sw_index(evq->rxq);
+ if (evq->dp_rxq != NULL) {
+ unsigned int rxq_sw_index;
+
+ rxq_sw_index = evq->dp_rxq->dpq.queue_id;
sfc_warn(sa,
"restart RxQ %u because of exception on its EvQ %u",
if (rc != 0)
goto fail_ev_qcreate;
- SFC_ASSERT(evq->rxq == NULL || evq->txq == NULL);
- if (evq->rxq != 0)
- evq->callbacks = &sfc_ev_callbacks_rx;
- else if (evq->txq != 0)
+ SFC_ASSERT(evq->dp_rxq == NULL || evq->txq == NULL);
+ if (evq->dp_rxq != 0) {
+ if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_rx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_rx;
+ } else if (evq->txq != 0) {
evq->callbacks = &sfc_ev_callbacks_tx;
- else
+ } else {
evq->callbacks = &sfc_ev_callbacks;
+ }
evq->init_state = SFC_EVQ_STARTING;
#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
struct sfc_adapter;
-struct sfc_rxq;
+struct sfc_dp_rxq;
struct sfc_txq;
enum sfc_evq_state {
unsigned int read_ptr;
boolean_t exception;
efsys_mem_t mem;
- struct sfc_rxq *rxq;
+ struct sfc_dp_rxq *dp_rxq;
struct sfc_txq *txq;
/* Not used on datapath */
SFC_KVARG_DEBUG_INIT,
SFC_KVARG_MCDI_LOGGING,
SFC_KVARG_PERF_PROFILE,
+ SFC_KVARG_RX_DATAPATH,
NULL,
};
return 0;
}
+
+int
+sfc_kvarg_string_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ *(const char **)opaque = value_str;
+
+ return 0;
+}
#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms"
+#define SFC_KVARG_DATAPATH_EFX "efx"
+
+#define SFC_KVARG_RX_DATAPATH "rx_datapath"
+#define SFC_KVARG_VALUES_RX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "]"
+
struct sfc_adapter;
int sfc_kvargs_parse(struct sfc_adapter *sa);
int sfc_kvarg_bool_handler(const char *key, const char *value_str,
void *opaque);
-
int sfc_kvarg_long_handler(const char *key, const char *value_str,
void *opaque);
+int sfc_kvarg_string_handler(const char *key, const char *value_str,
+ void *opaque);
#ifdef __cplusplus
}
#include "sfc_log.h"
#include "sfc_ev.h"
#include "sfc_rx.h"
+#include "sfc_kvargs.h"
#include "sfc_tweak.h"
/*
}
static void
-sfc_rx_qrefill(struct sfc_rxq *rxq)
+sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
{
unsigned int free_space;
unsigned int bulks;
unsigned int added = rxq->added;
unsigned int id;
unsigned int i;
- struct sfc_rx_sw_desc *rxd;
+ struct sfc_efx_rx_sw_desc *rxd;
struct rte_mbuf *m;
- uint8_t port_id = rxq->port_id;
+ uint16_t port_id = rxq->dp.dpq.port_id;
free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
(added - rxq->completed);
}
static uint64_t
-sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
+sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
{
uint64_t mbuf_flags = 0;
}
static uint32_t
-sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
+sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
{
return RTE_PTYPE_L2_ETHER |
((desc_flags & EFX_PKT_IPV4) ?
((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
}
+static const uint32_t *
+sfc_efx_supported_ptypes_get(void)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
static void
-sfc_rx_set_rss_hash(struct sfc_rxq *rxq, unsigned int flags, struct rte_mbuf *m)
+sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
+ struct rte_mbuf *m)
{
#if EFSYS_OPT_RX_SCALE
uint8_t *mbuf_data;
- if ((rxq->flags & SFC_RXQ_FLAG_RSS_HASH) == 0)
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
return;
mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
#endif
}
-uint16_t
-sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static uint16_t
+sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
- struct sfc_rxq *rxq = rx_queue;
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
unsigned int completed;
unsigned int prefix_size = rxq->prefix_size;
unsigned int done_pkts = 0;
boolean_t discard_next = B_FALSE;
struct rte_mbuf *scatter_pkt = NULL;
- if (unlikely((rxq->flags & SFC_RXQ_FLAG_RUNNING) == 0))
+ if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
return 0;
sfc_ev_qpoll(rxq->evq);
completed = rxq->completed;
while (completed != rxq->pending && done_pkts < nb_pkts) {
unsigned int id;
- struct sfc_rx_sw_desc *rxd;
+ struct sfc_efx_rx_sw_desc *rxd;
struct rte_mbuf *m;
unsigned int seg_len;
unsigned int desc_flags;
/* The first fragment of the packet has prefix */
prefix_size = rxq->prefix_size;
- m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
- m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
+ m->ol_flags =
+ sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
+ m->packet_type =
+ sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
/*
* Extract RSS hash from the packet prefix and
* set the corresponding field (if needed and possible)
*/
- sfc_rx_set_rss_hash(rxq, desc_flags, m);
+ sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
m->data_off += prefix_size;
rxq->completed = completed;
- sfc_rx_qrefill(rxq);
+ sfc_efx_rx_qrefill(rxq);
return done_pkts;
}
-unsigned int
-sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
+static unsigned int
+sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
{
- struct sfc_rxq *rxq;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
- SFC_ASSERT(sw_index < sa->rxq_count);
- rxq = sa->rxq_info[sw_index].rxq;
-
- if (rxq == NULL || (rxq->flags & SFC_RXQ_FLAG_RUNNING) == 0)
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
return 0;
sfc_ev_qpoll(rxq->evq);
return rxq->pending - rxq->completed;
}
-int
-sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset)
+struct sfc_rxq *
+sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
- if ((rxq->flags & SFC_RXQ_FLAG_RUNNING) == 0)
- return 0;
+ const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_rxq *rxq;
- sfc_ev_qpoll(rxq->evq);
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
- return offset < (rxq->pending - rxq->completed);
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ rxq = sa->rxq_info[dpq->queue_id].rxq;
+
+ SFC_ASSERT(rxq != NULL);
+ return rxq;
+}
+
+static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
+static int
+sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_efx_rxq *rxq;
+ int rc;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
+ info->rxq_entries,
+ sizeof(*rxq->sw_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_desc == NULL)
+ goto fail_desc_alloc;
+
+ /* efx datapath is bound to efx control path */
+ rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->batch_max = info->batch_max;
+ rxq->prefix_size = info->prefix_size;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
+static void
+sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_desc);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
+static int
+sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int evq_read_ptr)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->common = crxq->common;
+
+ rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+
+ return 0;
}
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
static void
-sfc_rx_qpurge(struct sfc_rxq *rxq)
+sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int *evq_read_ptr)
{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
+
+ /* libefx-based datapath is bound to libefx-based PMD and uses
+ * event queue structure directly. So, there is no necessity to
+ * return EvQ read pointer.
+ */
+}
+
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+static void
+sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
unsigned int i;
- struct sfc_rx_sw_desc *rxd;
+ struct sfc_efx_rx_sw_desc *rxd;
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_desc[i & rxq->ptr_mask];
rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
rxd->mbuf = NULL;
+ /* Packed stream relies on 0 in inactive SW desc.
+ * Rx queue stop is not performance critical, so
+ * there is no harm to do it always.
+ */
+ rxd->flags = 0;
+ rxd->size = 0;
}
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_rx sfc_efx_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = 0,
+ },
+ .qcreate = sfc_efx_rx_qcreate,
+ .qdestroy = sfc_efx_rx_qdestroy,
+ .qstart = sfc_efx_rx_qstart,
+ .qstop = sfc_efx_rx_qstop,
+ .qpurge = sfc_efx_rx_qpurge,
+ .supported_ptypes_get = sfc_efx_supported_ptypes_get,
+ .qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .pkt_burst = sfc_efx_recv_pkts,
+};
+
+unsigned int
+sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq = sa->rxq_info[sw_index].rxq;
+
+ if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
+ return 0;
+
+ return sa->dp_rx->qdesc_npending(rxq->dp);
+}
+
+int
+sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
+{
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
}
static void
sfc_info(sa, "RxQ %u flushed", sw_index);
}
- sfc_rx_qpurge(rxq);
+ sa->dp_rx->qpurge(rxq->dp);
}
static int
efx_rx_qenable(rxq->common);
- rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+ rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ if (rc != 0)
+ goto fail_dp_qstart;
rxq->state |= SFC_RXQ_STARTED;
- rxq->flags |= SFC_RXQ_FLAG_STARTED | SFC_RXQ_FLAG_RUNNING;
-
- sfc_rx_qrefill(rxq);
if (sw_index == 0) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
return 0;
fail_mac_filter_default_rxq_set:
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+fail_dp_qstart:
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
sa->eth_dev->data->rx_queue_state[sw_index] =
RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->flags &= ~SFC_RXQ_FLAG_RUNNING;
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
if (sw_index == 0)
efx_mac_filter_default_rxq_clear(sa->nic);
sfc_rx_qflush(sa, sw_index);
- rxq->flags &= ~SFC_RXQ_FLAG_STARTED;
rxq->state = SFC_RXQ_INITIALIZED;
efx_rx_qdestroy(rxq->common);
unsigned int evq_index;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
+ struct sfc_dp_rx_qcreate_info info;
rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
if (rc != 0)
if (rxq == NULL)
goto fail_rxq_alloc;
- rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
- socket_id, &rxq->mem);
- if (rc != 0)
- goto fail_dma_alloc;
-
- rc = ENOMEM;
- rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
- sizeof(*rxq->sw_desc),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (rxq->sw_desc == NULL)
- goto fail_desc_alloc;
+ rxq_info->rxq = rxq;
- evq->rxq = rxq;
rxq->evq = evq;
- rxq->ptr_mask = rxq_info->entries - 1;
+ rxq->hw_index = sw_index;
rxq->refill_threshold = rx_conf->rx_free_thresh;
rxq->refill_mb_pool = mb_pool;
- rxq->buf_size = buf_size;
- rxq->hw_index = sw_index;
- rxq->port_id = sa->eth_dev->data->port_id;
- /* Cache limits required on datapath in RxQ structure */
- rxq->batch_max = encp->enc_rx_batch_max;
- rxq->prefix_size = encp->enc_rx_prefix_size;
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ socket_id, &rxq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.refill_mb_pool = rxq->refill_mb_pool;
+ info.refill_threshold = rxq->refill_threshold;
+ info.buf_size = buf_size;
+ info.batch_max = encp->enc_rx_batch_max;
+ info.prefix_size = encp->enc_rx_prefix_size;
#if EFSYS_OPT_RX_SCALE
if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
- rxq->flags |= SFC_RXQ_FLAG_RSS_HASH;
+ info.flags |= SFC_RXQ_FLAG_RSS_HASH;
#endif
+ info.rxq_entries = rxq_info->entries;
+
+ rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
+ if (rc != 0)
+ goto fail_dp_rx_qcreate;
+
+ evq->dp_rxq = rxq->dp;
+
rxq->state = SFC_RXQ_INITIALIZED;
- rxq_info->rxq = rxq;
rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
return 0;
-fail_desc_alloc:
+fail_dp_rx_qcreate:
sfc_dma_free(sa, &rxq->mem);
fail_dma_alloc:
+ rxq_info->rxq = NULL;
rte_free(rxq);
fail_rxq_alloc:
rxq = rxq_info->rxq;
SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+ sa->dp_rx->qdestroy(rxq->dp);
+ rxq->dp = NULL;
+
rxq_info->rxq = NULL;
rxq_info->entries = 0;
- rte_free(rxq->sw_desc);
sfc_dma_free(sa, &rxq->mem);
rte_free(rxq);
}
#include "efx.h"
+#include "sfc_dp_rx.h"
+
#ifdef __cplusplus
extern "C" {
#endif
* Software Rx descriptor information associated with hardware Rx
* descriptor.
*/
-struct sfc_rx_sw_desc {
+struct sfc_efx_rx_sw_desc {
struct rte_mbuf *mbuf;
unsigned int flags;
unsigned int size;
};
/**
- * Receive queue information used on data path.
+ * Receive queue control information.
* Allocated on the socket specified on the queue setup.
*/
struct sfc_rxq {
- /* Used on data path */
struct sfc_evq *evq;
- struct sfc_rx_sw_desc *sw_desc;
- unsigned int flags;
-#define SFC_RXQ_FLAG_STARTED 0x1
-#define SFC_RXQ_FLAG_RUNNING 0x2
-#define SFC_RXQ_FLAG_RSS_HASH 0x4
- unsigned int ptr_mask;
- unsigned int pending;
- unsigned int completed;
- uint16_t batch_max;
- uint16_t prefix_size;
-
- /* Used on refill */
- unsigned int added;
- unsigned int pushed;
- unsigned int refill_threshold;
- uint8_t port_id;
- uint16_t buf_size;
- struct rte_mempool *refill_mb_pool;
efx_rxq_t *common;
efsys_mem_t mem;
-
- /* Not used on data path */
unsigned int hw_index;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ struct sfc_dp_rxq *dp;
unsigned int state;
};
return sfc_rxq_sw_index_by_hw_index(rxq->hw_index);
}
+struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_rxq {
+ /* Used on data path */
+ struct sfc_evq *evq;
+ unsigned int flags;
+#define SFC_EFX_RXQ_FLAG_STARTED 0x1
+#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
+#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+ unsigned int ptr_mask;
+ unsigned int pending;
+ unsigned int completed;
+ uint16_t batch_max;
+ uint16_t prefix_size;
+ struct sfc_efx_rx_sw_desc *sw_desc;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int pushed;
+ unsigned int refill_threshold;
+ uint16_t buf_size;
+ struct rte_mempool *refill_mb_pool;
+ efx_rxq_t *common;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_efx_rxq *
+sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_efx_rxq, dp);
+}
+
/**
* Receive queue information used during setup/release only.
* Allocated on the same socket as adapter data.
void sfc_rx_qflush_done(struct sfc_rxq *rxq);
void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
-uint16_t sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
-
unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
unsigned int sw_index);
-int sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset);
+int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
#if EFSYS_OPT_RX_SCALE
efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);