/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2016-2019 Solarflare Communications Inc.
*
* This software was jointly developed between OKTET Labs (under contract
*/
#include <rte_dev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_errno.h>
#include "sfc_flow.h"
#include "sfc_dp.h"
#include "sfc_dp_rx.h"
+#include "sfc_sw_stats.h"
+
+#define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX
+#define SFC_XSTAT_ID_INVALID_NAME '\0'
uint32_t sfc_logtype_driver;
int ret;
int rc;
- /*
- * Return value of the callback is likely supposed to be
- * equal to or greater than 0, nevertheless, if an error
- * occurs, it will be desirable to pass it to the caller
- */
- if ((fw_version == NULL) || (fw_size == 0))
- return -EINVAL;
-
rc = efx_nic_get_fw_version(sa->nic, &enfi);
if (rc != 0)
return -rc;
}
static int
-sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
- rx_queue_id, nb_rx_desc, socket_id);
+ ethdev_qid, nb_rx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
rx_conf, mb_pool);
if (rc != 0)
goto fail_rx_qinit;
- dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
sfc_adapter_unlock(sa);
}
static void
-sfc_rx_queue_release(void *queue)
+sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
struct sfc_rxq *rxq;
struct sfc_adapter *sa;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
if (dp_rxq == NULL)
return;
}
static int
-sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
int rc;
sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
- tx_queue_id, nb_tx_desc, socket_id);
+ ethdev_qid, nb_tx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf);
if (rc != 0)
goto fail_tx_qinit;
- dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp;
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ dev->data->tx_queues[ethdev_qid] = txq_info->dp;
sfc_adapter_unlock(sa);
return 0;
}
static void
-sfc_tx_queue_release(void *queue)
+sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
struct sfc_txq *txq;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
struct sfc_adapter *sa;
if (dp_txq == NULL)
uint64_t *mac_stats;
int ret;
- rte_spinlock_lock(&port->mac_stats_lock);
+ sfc_adapter_lock(sa);
- ret = sfc_port_update_mac_stats(sa);
+ ret = sfc_port_update_mac_stats(sa, B_FALSE);
if (ret != 0)
goto unlock;
mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
} else {
stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
+ stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+
/*
* Take into account stats which are whenever supported
* on EF10. If some stat is not supported by current
}
unlock:
- rte_spinlock_unlock(&port->mac_stats_lock);
+ sfc_adapter_unlock(sa);
SFC_ASSERT(ret >= 0);
return -ret;
}
struct sfc_port *port = &sa->port;
int rc;
+ sfc_adapter_lock(sa);
+
if (sa->state != SFC_ADAPTER_STARTED) {
/*
* The operation cannot be done if port is not started; it
* will be scheduled to be done during the next port start
*/
port->mac_stats_reset_pending = B_TRUE;
+ sfc_adapter_unlock(sa);
return 0;
}
if (rc != 0)
sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+ sfc_sw_xstats_reset(sa);
+
+ sfc_adapter_unlock(sa);
+
SFC_ASSERT(rc >= 0);
return -rc;
}
+static unsigned int
+sfc_xstats_get_nb_supported(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ unsigned int nb_supported;
+
+ sfc_adapter_lock(sa);
+ nb_supported = port->mac_stats_nb_supported +
+ sfc_sw_xstats_get_nb_supported(sa);
+ sfc_adapter_unlock(sa);
+
+ return nb_supported;
+}
+
static int
sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int xstats_count)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- struct sfc_port *port = &sa->port;
- uint64_t *mac_stats;
+ unsigned int nb_written = 0;
+ unsigned int nb_supported = 0;
int rc;
- unsigned int i;
- int nstats = 0;
-
- rte_spinlock_lock(&port->mac_stats_lock);
- rc = sfc_port_update_mac_stats(sa);
- if (rc != 0) {
- SFC_ASSERT(rc > 0);
- nstats = -rc;
- goto unlock;
- }
+ if (unlikely(xstats == NULL))
+ return sfc_xstats_get_nb_supported(sa);
- mac_stats = port->mac_stats_buf;
+ rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
+ if (rc < 0)
+ return rc;
- for (i = 0; i < EFX_MAC_NSTATS; ++i) {
- if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
- if (xstats != NULL && nstats < (int)xstats_count) {
- xstats[nstats].id = nstats;
- xstats[nstats].value = mac_stats[i];
- }
- nstats++;
- }
- }
+ nb_supported = rc;
+ sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written,
+ &nb_supported);
-unlock:
- rte_spinlock_unlock(&port->mac_stats_lock);
-
- return nstats;
+ return nb_supported;
}
static int
struct sfc_port *port = &sa->port;
unsigned int i;
unsigned int nstats = 0;
+ unsigned int nb_written = 0;
+ int ret;
+
+ if (unlikely(xstats_names == NULL))
+ return sfc_xstats_get_nb_supported(sa);
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
- if (xstats_names != NULL && nstats < xstats_count)
+ if (nstats < xstats_count) {
strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
+ nb_written++;
+ }
nstats++;
}
}
+ ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count,
+ &nb_written, &nstats);
+ if (ret != 0) {
+ SFC_ASSERT(ret < 0);
+ return ret;
+ }
+
return nstats;
}
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
- uint64_t *mac_stats;
- unsigned int nb_supported = 0;
- unsigned int nb_written = 0;
+ unsigned int nb_supported;
unsigned int i;
- int ret;
int rc;
- if (unlikely(values == NULL) ||
- unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
- return port->mac_stats_nb_supported;
-
- rte_spinlock_lock(&port->mac_stats_lock);
-
- rc = sfc_port_update_mac_stats(sa);
- if (rc != 0) {
- SFC_ASSERT(rc > 0);
- ret = -rc;
- goto unlock;
- }
+ if (unlikely(ids == NULL || values == NULL))
+ return -EINVAL;
- mac_stats = port->mac_stats_buf;
+ /*
+ * Values array could be filled in nonsequential order. Fill values with
+ * constant indicating invalid ID first.
+ */
+ for (i = 0; i < n; i++)
+ values[i] = SFC_XSTAT_ID_INVALID_VAL;
- for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
- if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
- continue;
+ rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n);
+ if (rc != 0)
+ return rc;
- if ((ids == NULL) || (ids[nb_written] == nb_supported))
- values[nb_written++] = mac_stats[i];
+ nb_supported = port->mac_stats_nb_supported;
+ sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported);
- ++nb_supported;
+ /* Return number of written stats before invalid ID is encountered. */
+ for (i = 0; i < n; i++) {
+ if (values[i] == SFC_XSTAT_ID_INVALID_VAL)
+ return i;
}
- ret = nb_written;
-
-unlock:
- rte_spinlock_unlock(&port->mac_stats_lock);
-
- return ret;
+ return n;
}
static int
sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
- const uint64_t *ids, unsigned int size)
+ unsigned int size)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
- unsigned int nb_supported = 0;
- unsigned int nb_written = 0;
+ unsigned int nb_supported;
unsigned int i;
+ int ret;
- if (unlikely(xstats_names == NULL) ||
- unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
- return port->mac_stats_nb_supported;
+ if (unlikely(xstats_names == NULL && ids != NULL) ||
+ unlikely(xstats_names != NULL && ids == NULL))
+ return -EINVAL;
- for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
- if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
- continue;
+ if (unlikely(xstats_names == NULL && ids == NULL))
+ return sfc_xstats_get_nb_supported(sa);
- if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
- char *name = xstats_names[nb_written++].name;
+ /*
+ * Names array could be filled in nonsequential order. Fill names with
+ * string indicating invalid ID first.
+ */
+ for (i = 0; i < size; i++)
+ xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME;
+
+ sfc_adapter_lock(sa);
- strlcpy(name, efx_mac_stat_name(sa->nic, i),
+ SFC_ASSERT(port->mac_stats_nb_supported <=
+ RTE_DIM(port->mac_stats_by_id));
+
+ for (i = 0; i < size; i++) {
+ if (ids[i] < port->mac_stats_nb_supported) {
+ strlcpy(xstats_names[i].name,
+ efx_mac_stat_name(sa->nic,
+ port->mac_stats_by_id[ids[i]]),
sizeof(xstats_names[0].name));
}
+ }
- ++nb_supported;
+ nb_supported = port->mac_stats_nb_supported;
+
+ sfc_adapter_unlock(sa);
+
+ ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size,
+ &nb_supported);
+ if (ret != 0) {
+ SFC_ASSERT(ret < 0);
+ return ret;
+ }
+
+ /* Return number of written names before invalid ID is encountered. */
+ for (i = 0; i < size; i++) {
+ if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME)
+ return i;
}
- return nb_written;
+ return size;
}
static int
* The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
- if (mtu > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
* use any process-local pointers from the adapter data.
*/
static void
-sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_rxq_info *qinfo)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
-
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
qinfo->mp = rxq_info->refill_mb_pool;
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
* use any process-local pointers from the adapter data.
*/
static void
-sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_txq_info *qinfo)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_txq_info *txq_info;
- SFC_ASSERT(tx_queue_id < sas->txq_count);
+ SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count);
- txq_info = &sas->txq_info[tx_queue_id];
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
memset(qinfo, 0, sizeof(*qinfo));
* use any process-local pointers from the adapter data.
*/
static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0;
return sap->dp_rx->qdesc_npending(rxq_info->dp);
}
-/*
- * The function is used by the secondary process as well. It must not
- * use any process-local pointers from the adapter data.
- */
-static int
-sfc_rx_descriptor_done(void *queue, uint16_t offset)
-{
- struct sfc_dp_rxq *dp_rxq = queue;
- const struct sfc_dp_rx *dp_rx;
-
- dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
-
- return offset < dp_rx->qdesc_npending(dp_rxq);
-}
-
/*
* The function is used by the secondary process as well. It must not
* use any process-local pointers from the adapter data.
}
static int
-sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ if (rxq_info->state != SFC_RXQ_INITIALIZED)
goto fail_not_setup;
- rc = sfc_rx_qstart(sa, rx_queue_id);
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
goto fail_rx_qstart;
- sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+ rxq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
}
static int
-sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
- sfc_rx_qstop(sa, rx_queue_id);
- sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ sfc_rx_qstop(sa, sw_index);
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ rxq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
}
static int
-sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+ sfc_log_init(sa, "TxQ = %u", ethdev_qid);
sfc_adapter_lock(sa);
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED)
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ if (txq_info->state != SFC_TXQ_INITIALIZED)
goto fail_not_setup;
- rc = sfc_tx_qstart(sa, tx_queue_id);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
- sas->txq_info[tx_queue_id].deferred_started = B_TRUE;
+ txq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
return 0;
}
static int
-sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+ sfc_log_init(sa, "TxQ = %u", ethdev_qid);
sfc_adapter_lock(sa);
- sfc_tx_qstop(sa, tx_queue_id);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ sfc_tx_qstop(sa, sw_index);
- sas->txq_info[tx_queue_id].deferred_started = B_FALSE;
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ txq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
return 0;
}
static int
-sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- int rc = ENOTSUP;
-
- sfc_log_init(sa, "entry");
-
- switch (filter_type) {
- case RTE_ETH_FILTER_NONE:
- sfc_err(sa, "Global filters configuration not supported");
- break;
- case RTE_ETH_FILTER_FLEXIBLE:
- sfc_err(sa, "Flexible filters not supported");
- break;
- case RTE_ETH_FILTER_SYN:
- sfc_err(sa, "SYN filters not supported");
- break;
- case RTE_ETH_FILTER_NTUPLE:
- sfc_err(sa, "NTUPLE filters not supported");
- break;
- case RTE_ETH_FILTER_TUNNEL:
- sfc_err(sa, "Tunnel filters not supported");
- break;
- case RTE_ETH_FILTER_FDIR:
- sfc_err(sa, "Flow Director filters not supported");
- break;
- case RTE_ETH_FILTER_HASH:
- sfc_err(sa, "Hash filters not supported");
- break;
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET) {
- rc = EINVAL;
- } else {
- *(const void **)arg = &sfc_flow_ops;
- rc = 0;
- }
- break;
- default:
- sfc_err(sa, "Unknown filter type %u", filter_type);
- break;
- }
-
- sfc_log_init(sa, "exit: %d", -rc);
- SFC_ASSERT(rc >= 0);
- return -rc;
+ *ops = &sfc_flow_ops;
+ return 0;
}
static int
}
static int
-sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
return sap->dp_rx->intr_enable(rxq_info->dp);
}
static int
-sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
return sap->dp_rx->intr_disable(rxq_info->dp);
}
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
- .filter_ctrl = sfc_dev_filter_ctrl,
+ .flow_ops_get = sfc_dev_flow_ops_get,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
const struct rte_ether_addr *from;
int ret;
+ if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
+ SFC_EFX_DEV_CLASS_NET) {
+ SFC_GENERIC_LOG(DEBUG,
+ "Incompatible device class: skip probing, should be probed by other sfc driver.");
+ return 1;
+ }
+
sfc_register_dp();
logtype_main = sfc_register_logtype(&pci_dev->addr,
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
rc = sfc_kvargs_parse(sa);
if (rc != 0)