/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2018 Solarflare Communications Inc.
- * All rights reserved.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
+ * Copyright(c) 2016-2019 Solarflare Communications Inc.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*/
#include <rte_dev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
+#include <rte_ether.h>
#include "efx.h"
static struct sfc_dp_list sfc_dp_head =
TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
+
+
static int
sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
efx_nic_fw_info_t enfi;
int ret;
int rc;
- /*
- * Return value of the callback is likely supposed to be
- * equal to or greater than 0, nevertheless, if an error
- * occurs, it will be desirable to pass it to the caller
- */
- if ((fw_version == NULL) || (fw_size == 0))
- return -EINVAL;
-
rc = efx_nic_get_fw_version(sa->nic, &enfi);
if (rc != 0)
return -rc;
return 0;
}
-static void
+static int
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_rss *rss = &sas->rss;
+ struct sfc_mae *mae = &sa->mae;
uint64_t txq_offloads_def = 0;
sfc_log_init(sa, "entry");
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_mtu = EFX_MAC_SDU_MAX;
+
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+ dev_info->max_vfs = sa->sriov.num_vfs;
+
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_25G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_50G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
}
/* Initialize to hardware limits */
- dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
- dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
+ dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
/* The RXQ hardware requires that the descriptor count is a power
* of 2, but rx_desc_lim cannot properly describe that constraint.
*/
- dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+ dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
/* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
- dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
/*
* The TXQ hardware requires that the descriptor count is a power
* of 2, but tx_desc_lim cannot properly describe that constraint
*/
- dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+ dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
if (sap->dp_rx->get_dev_info != NULL)
sap->dp_rx->get_dev_info(dev_info);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
+ dev_info->switch_info.name = dev->device->driver->name;
+ dev_info->switch_info.domain_id = mae->switch_domain_id;
+ dev_info->switch_info.port_id = mae->switch_port_id;
+ }
+
+ return 0;
}
static const uint32_t *
sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
- return sap->dp_rx->supported_ptypes_get(tunnel_encaps);
+ return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
}
static int
sfc_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *dev_data = dev->data;
- struct sfc_adapter *sa = dev_data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int rc;
sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
static int
sfc_dev_start(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int rc;
sfc_log_init(sa, "entry");
static int
sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct rte_eth_link current_link;
int ret;
return ret;
}
-static void
+static int
sfc_dev_stop(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_log_init(sa, "entry");
sfc_adapter_unlock(sa);
sfc_log_init(sa, "done");
+
+ return 0;
}
static int
sfc_dev_set_link_up(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int rc;
sfc_log_init(sa, "entry");
static int
sfc_dev_set_link_down(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_log_init(sa, "entry");
}
static void
+sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
+{
+ free(dev->process_private);
+ rte_eth_dev_release_port(dev);
+}
+
+static int
sfc_dev_close(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_log_init(sa, "entry");
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ sfc_eth_dev_secondary_clear_ops(dev);
+ return 0;
+ }
+
sfc_adapter_lock(sa);
switch (sa->state) {
case SFC_ADAPTER_STARTED:
sfc_err(sa, "unexpected adapter state %u on close", sa->state);
break;
}
+
+ /*
+ * Cleanup all resources.
+ * Rollback primary process sfc_eth_dev_init() below.
+ */
+
+ sfc_eth_dev_clear_ops(dev);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ sfc_kvargs_cleanup(sa);
+
sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+
+ free(sa);
+
+ return 0;
}
-static void
+static int
sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
boolean_t enabled)
{
struct sfc_port *port;
boolean_t *toggle;
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
const char *desc = (allmulti) ? "all-multi" : "promiscuous";
+ int rc = 0;
sfc_adapter_lock(sa);
"start provided that isolated mode is "
"disabled prior the next start");
} else if ((sa->state == SFC_ADAPTER_STARTED) &&
- (sfc_set_rx_mode(sa) != 0)) {
+ ((rc = sfc_set_rx_mode(sa)) != 0)) {
*toggle = !(enabled);
- sfc_warn(sa, "Failed to %s %s mode",
- ((enabled) ? "enable" : "disable"), desc);
+ sfc_warn(sa, "Failed to %s %s mode, rc = %d",
+ ((enabled) ? "enable" : "disable"), desc, rc);
+
+ /*
+ * For promiscuous and all-multicast filters a
+ * permission failure should be reported as an
+ * unsupported filter.
+ */
+ if (rc == EPERM)
+ rc = ENOTSUP;
}
}
sfc_adapter_unlock(sa);
+ return rc;
}
-static void
+static int
sfc_dev_promisc_enable(struct rte_eth_dev *dev)
{
- sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+ int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
-static void
+static int
sfc_dev_promisc_disable(struct rte_eth_dev *dev)
{
- sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+ int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
-static void
+static int
sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
{
- sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+ int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
-static void
+static int
sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
{
- sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+ int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
static int
-sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
- rx_queue_id, nb_rx_desc, socket_id);
+ ethdev_qid, nb_rx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
rx_conf, mb_pool);
if (rc != 0)
goto fail_rx_qinit;
- dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
sfc_adapter_unlock(sa);
struct sfc_dp_rxq *dp_rxq = queue;
struct sfc_rxq *rxq;
struct sfc_adapter *sa;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
if (dp_rxq == NULL)
return;
}
static int
-sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
int rc;
sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
- tx_queue_id, nb_tx_desc, socket_id);
+ ethdev_qid, nb_tx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf);
if (rc != 0)
goto fail_tx_qinit;
- dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp;
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ dev->data->tx_queues[ethdev_qid] = txq_info->dp;
sfc_adapter_unlock(sa);
return 0;
{
struct sfc_dp_txq *dp_txq = queue;
struct sfc_txq *txq;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
struct sfc_adapter *sa;
if (dp_txq == NULL)
static int
sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
int ret;
mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
} else {
stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
+ stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+
/*
* Take into account stats which are whenever supported
* on EF10. If some stat is not supported by current
return -ret;
}
-static void
+static int
sfc_stats_reset(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
int rc;
* will be scheduled to be done during the next port start
*/
port->mac_stats_reset_pending = B_TRUE;
- return;
+ return 0;
}
rc = sfc_port_reset_mac_stats(sa);
if (rc != 0)
sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
static int
sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int xstats_count)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
int rc;
struct rte_eth_xstat_name *xstats_names,
unsigned int xstats_count)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
unsigned int i;
unsigned int nstats = 0;
sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, unsigned int n)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
unsigned int nb_supported = 0;
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids, unsigned int size)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
unsigned int nb_supported = 0;
unsigned int nb_written = 0;
static int
sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
unsigned int wanted_fc, link_fc;
memset(fc_conf, 0, sizeof(*fc_conf));
static int
sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
unsigned int fcntl;
int rc;
return -rc;
}
+static int
+sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
+{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ boolean_t scatter_enabled;
+ const char *error;
+ unsigned int i;
+
+ for (i = 0; i < sas->rxq_count; i++) {
+ if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
+ continue;
+
+ scatter_enabled = (sas->rxq_info[i].type_flags &
+ EFX_RXQ_FLAG_SCATTER);
+
+ if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
+ encp->enc_rx_prefix_size,
+ scatter_enabled,
+ encp->enc_rx_scatter_max, &error)) {
+ sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
+ error);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
size_t pdu = EFX_MAC_PDU(mtu);
size_t old_pdu;
int rc;
if (pdu > EFX_MAC_PDU_MAX) {
sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
(unsigned int)mtu, (unsigned int)pdu,
- EFX_MAC_PDU_MAX);
+ (unsigned int)EFX_MAC_PDU_MAX);
goto fail_inval;
}
sfc_adapter_lock(sa);
+ rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
+ if (rc != 0)
+ goto fail_check_scatter;
+
if (pdu != sa->port.pdu) {
if (sa->state == SFC_ADAPTER_STARTED) {
sfc_stop(sa);
* The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
- if (mtu > ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
"PDU max size - port is stopped",
(unsigned int)pdu, (unsigned int)old_pdu);
+
+fail_check_scatter:
sfc_adapter_unlock(sa);
fail_inval:
return -rc;
}
static int
-sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_port *port = &sa->port;
- struct ether_addr *old_addr = &dev->data->mac_addrs[0];
+ struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
int rc = 0;
sfc_adapter_lock(sa);
+ if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
+ goto unlock;
+
/*
* Copy the address to the device private data so that
* it could be recalled in the case of adapter restart.
*/
- ether_addr_copy(mac_addr, &port->default_mac_addr);
+ rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
/*
* Neither of the two following checks can return
* has no effect on received traffic, therefore
* we also need to update unicast filters
*/
- rc = sfc_set_rx_mode(sa);
+ rc = sfc_set_rx_mode_unchecked(sa);
if (rc != 0) {
sfc_err(sa, "cannot set filter (rc = %u)", rc);
/* Rollback the old address */
(void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
- (void)sfc_set_rx_mode(sa);
+ (void)sfc_set_rx_mode_unchecked(sa);
}
} else {
sfc_warn(sa, "cannot set MAC address with filters installed");
unlock:
if (rc != 0)
- ether_addr_copy(old_addr, &port->default_mac_addr);
+ rte_ether_addr_copy(old_addr, &port->default_mac_addr);
sfc_adapter_unlock(sa);
static int
-sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
- uint32_t nb_mc_addr)
+sfc_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint8_t *mc_addrs = port->mcast_addrs;
int rc;
* use any process-local pointers from the adapter data.
*/
static void
-sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_rxq_info *qinfo)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
-
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
qinfo->mp = rxq_info->refill_mb_pool;
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
* use any process-local pointers from the adapter data.
*/
static void
-sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_txq_info *qinfo)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_txq_info *txq_info;
- SFC_ASSERT(tx_queue_id < sas->txq_count);
+ SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count);
- txq_info = &sas->txq_info[tx_queue_id];
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
memset(qinfo, 0, sizeof(*qinfo));
* use any process-local pointers from the adapter data.
*/
static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0;
}
static int
-sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ if (rxq_info->state != SFC_RXQ_INITIALIZED)
goto fail_not_setup;
- rc = sfc_rx_qstart(sa, rx_queue_id);
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
goto fail_rx_qstart;
- sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+ rxq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
}
static int
-sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
- sfc_rx_qstop(sa, rx_queue_id);
- sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ sfc_rx_qstop(sa, sw_index);
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ rxq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
}
static int
-sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+ sfc_log_init(sa, "TxQ = %u", ethdev_qid);
sfc_adapter_lock(sa);
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED)
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ if (txq_info->state != SFC_TXQ_INITIALIZED)
goto fail_not_setup;
- rc = sfc_tx_qstart(sa, tx_queue_id);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
- sas->txq_info[tx_queue_id].deferred_started = B_TRUE;
+ txq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
return 0;
}
static int
-sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_txq_info *txq_info;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+ sfc_log_init(sa, "TxQ = %u", ethdev_qid);
sfc_adapter_lock(sa);
- sfc_tx_qstop(sa, tx_queue_id);
+ sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
+ sfc_tx_qstop(sa, sw_index);
- sas->txq_info[tx_queue_id].deferred_started = B_FALSE;
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
+ txq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
return 0;
struct rte_eth_udp_tunnel *tunnel_udp,
enum sfc_udp_tunnel_op_e op)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
efx_tunnel_protocol_t tunnel_proto;
int rc;
sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
unsigned int efx_hash_types;
+ uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
+ unsigned int n_contexts;
+ unsigned int mode_i = 0;
+ unsigned int key_i = 0;
+ unsigned int i = 0;
int rc = 0;
+ n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
+
if (sfc_sa2shared(sa)->isolated)
return -ENOTSUP;
if ((rss_conf->rss_key != NULL) &&
(rss_conf->rss_key_len != sizeof(rss->key))) {
- sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sfc_err(sa, "RSS key size is wrong (should be %zu)",
sizeof(rss->key));
return -EINVAL;
}
if (rc != 0)
goto fail_rx_hf_rte_to_efx;
- rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- rss->hash_alg, efx_hash_types, B_TRUE);
- if (rc != 0)
- goto fail_scale_mode_set;
+ for (mode_i = 0; mode_i < n_contexts; mode_i++) {
+ rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
+ rss->hash_alg, efx_hash_types,
+ B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+ }
if (rss_conf->rss_key != NULL) {
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_rx_scale_key_set(sa->nic,
- EFX_RSS_CONTEXT_DEFAULT,
- rss_conf->rss_key,
- sizeof(rss->key));
- if (rc != 0)
- goto fail_scale_key_set;
+ for (key_i = 0; key_i < n_contexts; key_i++) {
+ rc = efx_rx_scale_key_set(sa->nic,
+ contexts[key_i],
+ rss_conf->rss_key,
+ sizeof(rss->key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+ }
}
rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
return 0;
fail_scale_key_set:
- if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- rss->hash_types, B_TRUE) != 0)
- sfc_err(sa, "failed to restore RSS mode");
+ for (i = 0; i < key_i; i++) {
+ if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
+ sizeof(rss->key)) != 0)
+ sfc_err(sa, "failed to restore RSS key");
+ }
fail_scale_mode_set:
+ for (i = 0; i < mode_i; i++) {
+ if (efx_rx_scale_mode_set(sa->nic, contexts[i],
+ EFX_RX_HASHALG_TOEPLITZ,
+ rss->hash_types, B_TRUE) != 0)
+ sfc_err(sa, "failed to restore RSS mode");
+ }
+
fail_rx_hf_rte_to_efx:
sfc_adapter_unlock(sa);
return -rc;
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
unsigned int *rss_tbl_new;
uint16_t entry;
}
static int
-sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- struct sfc_adapter *sa = dev->data->dev_private;
- int rc = ENOTSUP;
-
- sfc_log_init(sa, "entry");
-
- switch (filter_type) {
- case RTE_ETH_FILTER_NONE:
- sfc_err(sa, "Global filters configuration not supported");
- break;
- case RTE_ETH_FILTER_MACVLAN:
- sfc_err(sa, "MACVLAN filters not supported");
- break;
- case RTE_ETH_FILTER_ETHERTYPE:
- sfc_err(sa, "EtherType filters not supported");
- break;
- case RTE_ETH_FILTER_FLEXIBLE:
- sfc_err(sa, "Flexible filters not supported");
- break;
- case RTE_ETH_FILTER_SYN:
- sfc_err(sa, "SYN filters not supported");
- break;
- case RTE_ETH_FILTER_NTUPLE:
- sfc_err(sa, "NTUPLE filters not supported");
- break;
- case RTE_ETH_FILTER_TUNNEL:
- sfc_err(sa, "Tunnel filters not supported");
- break;
- case RTE_ETH_FILTER_FDIR:
- sfc_err(sa, "Flow Director filters not supported");
- break;
- case RTE_ETH_FILTER_HASH:
- sfc_err(sa, "Hash filters not supported");
- break;
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET) {
- rc = EINVAL;
- } else {
- *(const void **)arg = &sfc_flow_ops;
- rc = 0;
- }
- break;
- default:
- sfc_err(sa, "Unknown filter type %u", filter_type);
- break;
- }
-
- sfc_log_init(sa, "exit: %d", -rc);
- SFC_ASSERT(rc >= 0);
- return -rc;
+ *ops = &sfc_flow_ops;
+ return 0;
}
static int
return sap->dp_rx->pool_ops_supported(pool);
}
+static int
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+
+ return sap->dp_rx->intr_enable(rxq_info->dp);
+}
+
+static int
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+
+ return sap->dp_rx->intr_disable(rxq_info->dp);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.tx_queue_stop = sfc_tx_queue_stop,
.rx_queue_setup = sfc_rx_queue_setup,
.rx_queue_release = sfc_rx_queue_release,
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_done = sfc_rx_descriptor_done,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
+ .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
- .filter_ctrl = sfc_dev_filter_ctrl,
+ .flow_ops_get = sfc_dev_flow_ops_get,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,
static int
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
const struct sfc_dp_rx *dp_rx;
const struct sfc_dp_tx *dp_tx;
case EFX_FAMILY_MEDFORD:
case EFX_FAMILY_MEDFORD2:
avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
+ avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
+ break;
+ case EFX_FAMILY_RIVERHEAD:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF100;
break;
default:
break;
sa->priv.dp_tx = dp_tx;
dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_done = sfc_rx_descriptor_done;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
return 0;
static void
sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
dev->dev_ops = NULL;
+ dev->tx_pkt_prepare = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
}
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
- .rx_queue_count = sfc_rx_queue_count,
- .rx_descriptor_done = sfc_rx_descriptor_done,
- .rx_descriptor_status = sfc_rx_descriptor_status,
- .tx_descriptor_status = sfc_tx_descriptor_status,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
.rxq_info_get = sfc_rx_queue_info_get,
dev->process_private = sap;
dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_done = sfc_rx_descriptor_done;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
return 0;
return rc;
}
-static void
-sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
-{
- free(dev->process_private);
- dev->process_private = NULL;
- dev->dev_ops = NULL;
- dev->tx_pkt_burst = NULL;
- dev->rx_pkt_burst = NULL;
-}
-
static void
sfc_register_dp(void)
{
/* Register once */
if (TAILQ_EMPTY(&sfc_dp_head)) {
/* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
- struct sfc_adapter_shared *sas;
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t logtype_main;
+ struct sfc_adapter *sa;
int rc;
const efx_nic_cfg_t *encp;
- const struct ether_addr *from;
+ const struct rte_ether_addr *from;
+ int ret;
+
+ if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
+ SFC_EFX_DEV_CLASS_NET) {
+ SFC_GENERIC_LOG(DEBUG,
+ "Incompatible device class: skip probing, should be probed by other sfc driver.");
+ return 1;
+ }
sfc_register_dp();
SFC_LOGTYPE_MAIN_STR,
RTE_LOG_NOTICE);
- sa->priv.shared = &sa->_shared;
- sas = sa->priv.shared;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -sfc_eth_dev_secondary_init(dev, logtype_main);
+ /* Required for logging */
+ ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
+ "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ dev->data->port_id);
+ if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
+ SFC_GENERIC_LOG(ERR,
+ "reserved log prefix is too short for " PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -EINVAL;
+ }
+ sas->pci_addr = pci_dev->addr;
+ sas->port_id = dev->data->port_id;
+
/*
- * sfc_adapter is a mixture of shared and process private data.
- * During transition period use it in both kinds. When the
- * driver becomes ready to separate it, sfc_adapter will become
- * primary process private only.
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
*/
+ sa = calloc(1, sizeof(*sa));
+ if (sa == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_sa;
+ }
+
dev->process_private = sa;
/* Required for logging */
- sas->pci_addr = pci_dev->addr;
- sas->port_id = dev->data->port_id;
+ sa->priv.shared = sas;
sa->priv.logtype_main = logtype_main;
sa->eth_dev = dev;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
rc = sfc_kvargs_parse(sa);
if (rc != 0)
sfc_log_init(sa, "entry");
- dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
if (dev->data->mac_addrs == NULL) {
rc = ENOMEM;
goto fail_mac_addrs;
* The arguments are really reverse order in comparison to
* Linux kernel. Copy from NIC config to Ethernet device data.
*/
- from = (const struct ether_addr *)(encp->enc_mac_addr);
- ether_addr_copy(from, &dev->data->mac_addrs[0]);
+ from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
+ rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
sfc_adapter_unlock(sa);
fail_kvargs_parse:
sfc_log_init(sa, "failed %d", rc);
dev->process_private = NULL;
+ free(sa);
+
+fail_alloc_sa:
SFC_ASSERT(rc > 0);
return -rc;
}
static int
sfc_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa;
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- sfc_eth_dev_secondary_clear_ops(dev);
- return 0;
- }
-
- sa = dev->data->dev_private;
- sfc_log_init(sa, "entry");
+ sfc_dev_close(dev);
- sfc_adapter_lock(sa);
-
- sfc_eth_dev_clear_ops(dev);
-
- sfc_detach(sa);
- sfc_unprobe(sa);
-
- sfc_kvargs_cleanup(sa);
-
- sfc_adapter_unlock(sa);
- sfc_adapter_lock_fini(sa);
-
- sfc_log_init(sa, "done");
-
- /* Required for logging, so cleanup last */
- sa->eth_dev = NULL;
return 0;
}
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
{ .vendor_id = 0 /* sentinel */ }
};
struct rte_pci_device *pci_dev)
{
return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct sfc_adapter), sfc_eth_dev_init);
+ sizeof(struct sfc_adapter_shared), sfc_eth_dev_init);
}
static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)