X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ethdev.c;h=52ba7b67b052762becffc82ad5c77aa55b621b7e;hb=0475c7770502cb4166b2577df3ff446af9d85515;hp=223da1cb5f6249e96d44c52fb53789a3980c8a47;hpb=c0802544d9e35042fe93f43fa4970e76011942eb;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 223da1cb5f..52ba7b67b0 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -1,39 +1,20 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2017 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include -#include -#include +#include +#include #include +#include #include +#include +#include #include "efx.h" @@ -48,13 +29,19 @@ #include "sfc_dp.h" #include "sfc_dp_rx.h" +uint32_t sfc_logtype_driver; + static struct sfc_dp_list sfc_dp_head = TAILQ_HEAD_INITIALIZER(sfc_dp_head); + +static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev); + + static int sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); efx_nic_fw_info_t enfi; int ret; int rc; @@ -99,25 +86,39 @@ sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) return 0; } -static void +static int sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct sfc_adapter *sa = dev->data->dev_private; - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; + struct sfc_mae *mae = &sa->mae; + uint64_t txq_offloads_def = 0; sfc_log_init(sa, "entry"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = EFX_MAC_SDU_MAX; + dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; + dev_info->max_vfs = sa->sriov.num_vfs; + /* Autonegotiation may be disabled */ dev_info->speed_capa = ETH_LINK_SPEED_FIXED; - if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX)) dev_info->speed_capa |= ETH_LINK_SPEED_1G; - if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX)) dev_info->speed_capa |= ETH_LINK_SPEED_10G; - if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_25G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX)) dev_info->speed_capa |= ETH_LINK_SPEED_40G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_50G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_100G; dev_info->max_rx_queues = sa->rxq_max; dev_info->max_tx_queues = sa->txq_max; @@ -125,66 +126,90 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) /* By default packets are dropped if no descriptors are available */ dev_info->default_rxconf.rx_drop_en = 1; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa); - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; + /* + * rx_offload_capa includes both device and queue offloads since + * the latter may be requested on a per device basis which makes + * sense when some offloads are needed to be set on all queues. + */ + dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) | + dev_info->rx_queue_offload_capa; - dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; - if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) || - !encp->enc_hw_tx_insert_vlan_enabled) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; - else - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; + dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa); - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + /* + * tx_offload_capa includes both device and queue offloads since + * the latter may be requested on a per device basis which makes + * sense when some offloads are needed to be set on all queues. + */ + dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) | + dev_info->tx_queue_offload_capa; + + if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + dev_info->default_txconf.offloads |= txq_offloads_def; + + if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { + uint64_t rte_hf = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) + rte_hf |= rss->hf_map[i].rte; -#if EFSYS_OPT_RX_SCALE - if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { dev_info->reta_size = EFX_RSS_TBL_SIZE; - dev_info->hash_key_size = SFC_RSS_KEY_SIZE; - dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; + dev_info->hash_key_size = EFX_RSS_KEY_SIZE; + dev_info->flow_type_rss_offloads = rte_hf; } -#endif - if (sa->tso) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; - - dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; - dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; + /* Initialize to hardware limits */ + dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries; + dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries; /* The RXQ hardware requires that the descriptor count is a power * of 2, but rx_desc_lim cannot properly describe that constraint. */ - dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; + dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries; + /* Initialize to hardware limits */ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; - dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; + dev_info->tx_desc_lim.nb_min = sa->txq_min_entries; /* * The TXQ hardware requires that the descriptor count is a power * of 2, but tx_desc_lim cannot properly describe that constraint */ - dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; + dev_info->tx_desc_lim.nb_align = sa->txq_min_entries; + + if (sap->dp_rx->get_dev_info != NULL) + sap->dp_rx->get_dev_info(dev_info); + if (sap->dp_tx->get_dev_info != NULL) + sap->dp_tx->get_dev_info(dev_info); + + dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + if (mae->status == SFC_MAE_STATUS_SUPPORTED) { + dev_info->switch_info.name = dev->device->driver->name; + dev_info->switch_info.domain_id = mae->switch_domain_id; + dev_info->switch_info.port_id = mae->switch_port_id; + } + + return 0; } static const uint32_t * sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); - return sa->dp_rx->supported_ptypes_get(); + return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps); } static int sfc_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_dev_data *dev_data = dev->data; - struct sfc_adapter *sa = dev_data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", @@ -213,7 +238,7 @@ sfc_dev_configure(struct rte_eth_dev *dev) static int sfc_dev_start(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "entry"); @@ -230,23 +255,14 @@ sfc_dev_start(struct rte_eth_dev *dev) static int sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { - struct sfc_adapter *sa = dev->data->dev_private; - struct rte_eth_link *dev_link = &dev->data->dev_link; - struct rte_eth_link old_link; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct rte_eth_link current_link; + int ret; sfc_log_init(sa, "entry"); -retry: - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); - *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (sa->state != SFC_ADAPTER_STARTED) { sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else if (wait_to_complete) { efx_link_mode_t link_mode; @@ -254,27 +270,23 @@ retry: link_mode = EFX_LINK_UNKNOWN; sfc_port_link_mode_to_info(link_mode, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else { sfc_ev_mgmt_qpoll(sa); - *(int64_t *)¤t_link = - rte_atomic64_read((rte_atomic64_t *)dev_link); + rte_eth_linkstatus_get(dev, ¤t_link); } - if (old_link.link_status != current_link.link_status) - sfc_info(sa, "Link status is %s", - current_link.link_status ? "UP" : "DOWN"); + ret = rte_eth_linkstatus_set(dev, ¤t_link); + if (ret == 0) + sfc_notice(sa, "Link status is %s", + current_link.link_status ? "UP" : "DOWN"); - return old_link.link_status == current_link.link_status ? 0 : -1; + return ret; } -static void +static int sfc_dev_stop(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); sfc_log_init(sa, "entry"); @@ -283,12 +295,14 @@ sfc_dev_stop(struct rte_eth_dev *dev) sfc_adapter_unlock(sa); sfc_log_init(sa, "done"); + + return 0; } static int sfc_dev_set_link_up(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "entry"); @@ -304,7 +318,7 @@ sfc_dev_set_link_up(struct rte_eth_dev *dev) static int sfc_dev_set_link_down(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); sfc_log_init(sa, "entry"); @@ -316,12 +330,24 @@ sfc_dev_set_link_down(struct rte_eth_dev *dev) } static void +sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) +{ + free(dev->process_private); + rte_eth_dev_release_port(dev); +} + +static int sfc_dev_close(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); sfc_log_init(sa, "entry"); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + sfc_eth_dev_secondary_clear_ops(dev); + return 0; + } + sfc_adapter_lock(sa); switch (sa->state) { case SFC_ADAPTER_STARTED: @@ -338,20 +364,42 @@ sfc_dev_close(struct rte_eth_dev *dev) sfc_err(sa, "unexpected adapter state %u on close", sa->state); break; } + + /* + * Cleanup all resources. + * Rollback primary process sfc_eth_dev_init() below. + */ + + sfc_eth_dev_clear_ops(dev); + + sfc_detach(sa); + sfc_unprobe(sa); + + sfc_kvargs_cleanup(sa); + sfc_adapter_unlock(sa); + sfc_adapter_lock_fini(sa); sfc_log_init(sa, "done"); + + /* Required for logging, so cleanup last */ + sa->eth_dev = NULL; + + free(sa); + + return 0; } -static void +static int sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, boolean_t enabled) { struct sfc_port *port; boolean_t *toggle; - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); const char *desc = (allmulti) ? "all-multi" : "promiscuous"; + int rc = 0; sfc_adapter_lock(sa); @@ -361,39 +409,65 @@ sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, if (*toggle != enabled) { *toggle = enabled; - if ((sa->state == SFC_ADAPTER_STARTED) && - (sfc_set_rx_mode(sa) != 0)) { + if (sfc_sa2shared(sa)->isolated) { + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "the change is to be applied on the next " + "start provided that isolated mode is " + "disabled prior the next start"); + } else if ((sa->state == SFC_ADAPTER_STARTED) && + ((rc = sfc_set_rx_mode(sa)) != 0)) { *toggle = !(enabled); - sfc_warn(sa, "Failed to %s %s mode", - ((enabled) ? "enable" : "disable"), desc); + sfc_warn(sa, "Failed to %s %s mode, rc = %d", + ((enabled) ? "enable" : "disable"), desc, rc); + + /* + * For promiscuous and all-multicast filters a + * permission failure should be reported as an + * unsupported filter. + */ + if (rc == EPERM) + rc = ENOTSUP; } } sfc_adapter_unlock(sa); + return rc; } -static void +static int sfc_dev_promisc_enable(struct rte_eth_dev *dev) { - sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); + + SFC_ASSERT(rc >= 0); + return -rc; } -static void +static int sfc_dev_promisc_disable(struct rte_eth_dev *dev) { - sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); + + SFC_ASSERT(rc >= 0); + return -rc; } -static void +static int sfc_dev_allmulti_enable(struct rte_eth_dev *dev) { - sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); + + SFC_ASSERT(rc >= 0); + return -rc; } -static void +static int sfc_dev_allmulti_disable(struct rte_eth_dev *dev) { - sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); + + SFC_ASSERT(rc >= 0); + return -rc; } static int @@ -402,7 +476,8 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", @@ -415,7 +490,7 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, if (rc != 0) goto fail_rx_qinit; - dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp; + dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; sfc_adapter_unlock(sa); @@ -442,12 +517,10 @@ sfc_rx_queue_release(void *queue) sa = rxq->evq->sa; sfc_adapter_lock(sa); - sw_index = sfc_rxq_sw_index(rxq); + sw_index = dp_rxq->dpq.queue_id; sfc_log_init(sa, "RxQ=%u", sw_index); - sa->eth_dev->data->rx_queues[sw_index] = NULL; - sfc_rx_qfini(sa, sw_index); sfc_adapter_unlock(sa); @@ -458,7 +531,8 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", @@ -470,7 +544,7 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, if (rc != 0) goto fail_tx_qinit; - dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp; + dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp; sfc_adapter_unlock(sa); return 0; @@ -493,7 +567,7 @@ sfc_tx_queue_release(void *queue) return; txq = sfc_txq_by_dp_txq(dp_txq); - sw_index = sfc_txq_sw_index(txq); + sw_index = dp_txq->dpq.queue_id; SFC_ASSERT(txq->evq != NULL); sa = txq->evq->sa; @@ -502,24 +576,46 @@ sfc_tx_queue_release(void *queue) sfc_adapter_lock(sa); - SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); - sa->eth_dev->data->tx_queues[sw_index] = NULL; - sfc_tx_qfini(sa, sw_index); sfc_adapter_unlock(sa); } +/* + * Some statistics are computed as A - B where A and B each increase + * monotonically with some hardware counter(s) and the counters are read + * asynchronously. + * + * If packet X is counted in A, but not counted in B yet, computed value is + * greater than real. + * + * If packet X is not counted in A at the moment of reading the counter, + * but counted in B at the moment of reading the counter, computed value + * is less than real. + * + * However, counter which grows backward is worse evil than slightly wrong + * value. So, let's try to guarantee that it never happens except may be + * the case when the MAC stats are zeroed as a result of a NIC reset. + */ static void +sfc_update_diff_stat(uint64_t *stat, uint64_t newval) +{ + if ((int64_t)(newval - *stat) > 0 || newval == 0) + *stat = newval; +} + +static int sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; uint64_t *mac_stats; + int ret; rte_spinlock_lock(&port->mac_stats_lock); - if (sfc_port_update_mac_stats(sa) != 0) + ret = sfc_port_update_mac_stats(sa); + if (ret != 0) goto unlock; mac_stats = port->mac_stats_buf; @@ -542,14 +638,21 @@ sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; - stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; - stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; + stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; + + /* CRC is included in these stats, but shouldn't be */ + stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; + stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN; } else { - stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; + + /* CRC is included in these stats, but shouldn't be */ + stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN; + stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN; + /* * Take into account stats which are whenever supported * on EF10. If some stat is not supported by current @@ -572,16 +675,25 @@ sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + mac_stats[EFX_MAC_RX_JABBER_PKTS]; /* no oerrors counters supported on EF10 */ + + /* Exclude missed, errors and pauses from Rx packets */ + sfc_update_diff_stat(&port->ipackets, + mac_stats[EFX_MAC_RX_PKTS] - + mac_stats[EFX_MAC_RX_PAUSE_PKTS] - + stats->imissed - stats->ierrors); + stats->ipackets = port->ipackets; } unlock: rte_spinlock_unlock(&port->mac_stats_lock); + SFC_ASSERT(ret >= 0); + return -ret; } -static void +static int sfc_stats_reset(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; int rc; @@ -591,19 +703,22 @@ sfc_stats_reset(struct rte_eth_dev *dev) * will be scheduled to be done during the next port start */ port->mac_stats_reset_pending = B_TRUE; - return; + return 0; } rc = sfc_port_reset_mac_stats(sa); if (rc != 0) sfc_err(sa, "failed to reset statistics (rc = %d)", rc); + + SFC_ASSERT(rc >= 0); + return -rc; } static int sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int xstats_count) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; uint64_t *mac_stats; int rc; @@ -642,7 +757,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int xstats_count) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; unsigned int i; unsigned int nstats = 0; @@ -650,7 +765,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < EFX_MAC_NSTATS; ++i) { if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { if (xstats_names != NULL && nstats < xstats_count) - strncpy(xstats_names[nstats].name, + strlcpy(xstats_names[nstats].name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); nstats++; @@ -660,10 +775,88 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, return nstats; } +static int +sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + uint64_t *mac_stats; + unsigned int nb_supported = 0; + unsigned int nb_written = 0; + unsigned int i; + int ret; + int rc; + + if (unlikely(values == NULL) || + unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) + return port->mac_stats_nb_supported; + + rte_spinlock_lock(&port->mac_stats_lock); + + rc = sfc_port_update_mac_stats(sa); + if (rc != 0) { + SFC_ASSERT(rc > 0); + ret = -rc; + goto unlock; + } + + mac_stats = port->mac_stats_buf; + + for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { + if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) + continue; + + if ((ids == NULL) || (ids[nb_written] == nb_supported)) + values[nb_written++] = mac_stats[i]; + + ++nb_supported; + } + + ret = nb_written; + +unlock: + rte_spinlock_unlock(&port->mac_stats_lock); + + return ret; +} + +static int +sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int size) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + unsigned int nb_supported = 0; + unsigned int nb_written = 0; + unsigned int i; + + if (unlikely(xstats_names == NULL) || + unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) + return port->mac_stats_nb_supported; + + for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { + if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) + continue; + + if ((ids == NULL) || (ids[nb_written] == nb_supported)) { + char *name = xstats_names[nb_written++].name; + + strlcpy(name, efx_mac_stat_name(sa->nic, i), + sizeof(xstats_names[0].name)); + } + + ++nb_supported; + } + + return nb_written; +} + static int sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); unsigned int wanted_fc, link_fc; memset(fc_conf, 0, sizeof(*fc_conf)); @@ -703,7 +896,7 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) static int sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; unsigned int fcntl; int rc; @@ -756,10 +949,39 @@ fail_inval: return -rc; } +static int +sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + boolean_t scatter_enabled; + const char *error; + unsigned int i; + + for (i = 0; i < sas->rxq_count; i++) { + if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0) + continue; + + scatter_enabled = (sas->rxq_info[i].type_flags & + EFX_RXQ_FLAG_SCATTER); + + if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size, + encp->enc_rx_prefix_size, + scatter_enabled, + encp->enc_rx_scatter_max, &error)) { + sfc_err(sa, "MTU check for RxQ %u failed: %s", i, + error); + return EINVAL; + } + } + + return 0; +} + static int sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); size_t pdu = EFX_MAC_PDU(mtu); size_t old_pdu; int rc; @@ -776,12 +998,16 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) if (pdu > EFX_MAC_PDU_MAX) { sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", (unsigned int)mtu, (unsigned int)pdu, - EFX_MAC_PDU_MAX); + (unsigned int)EFX_MAC_PDU_MAX); goto fail_inval; } sfc_adapter_lock(sa); + rc = sfc_check_scatter_on_all_rx_queues(sa, pdu); + if (rc != 0) + goto fail_check_scatter; + if (pdu != sa->port.pdu) { if (sa->state == SFC_ADAPTER_STARTED) { sfc_stop(sa); @@ -797,10 +1023,14 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } /* - * The driver does not use it, but other PMDs update jumbo_frame + * The driver does not use it, but other PMDs update jumbo frame * flag and max_rx_pkt_len when MTU is set. */ - dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); + if (mtu > RTE_ETHER_MTU) { + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } + dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; sfc_adapter_unlock(sa); @@ -814,6 +1044,8 @@ fail_start: sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " "PDU max size - port is stopped", (unsigned int)pdu, (unsigned int)old_pdu); + +fail_check_scatter: sfc_adapter_unlock(sa); fail_inval: @@ -821,18 +1053,42 @@ fail_inval: SFC_ASSERT(rc > 0); return -rc; } -static void -sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +static int +sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - int rc; + struct sfc_port *port = &sa->port; + struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0]; + int rc = 0; sfc_adapter_lock(sa); + if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr)) + goto unlock; + + /* + * Copy the address to the device private data so that + * it could be recalled in the case of adapter restart. + */ + rte_ether_addr_copy(mac_addr, &port->default_mac_addr); + + /* + * Neither of the two following checks can return + * an error. The new MAC address is preserved in + * the device private data and can be activated + * on the next port start if the user prevents + * isolated mode from being enabled. + */ + if (sfc_sa2shared(sa)->isolated) { + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "will not set MAC address"); + goto unlock; + } + if (sa->state != SFC_ADAPTER_STARTED) { - sfc_info(sa, "the port is not started"); - sfc_info(sa, "the new MAC address will be set on port start"); + sfc_notice(sa, "the port is not started"); + sfc_notice(sa, "the new MAC address will be set on port start"); goto unlock; } @@ -849,9 +1105,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) * has no effect on received traffic, therefore * we also need to update unicast filters */ - rc = sfc_set_rx_mode(sa); - if (rc != 0) + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) { sfc_err(sa, "cannot set filter (rc = %u)", rc); + /* Rollback the old address */ + (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); + (void)sfc_set_rx_mode_unchecked(sa); + } } else { sfc_warn(sa, "cannot set MAC address with filters installed"); sfc_warn(sa, "adapter will be restarted to pick the new MAC"); @@ -859,9 +1119,9 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) /* * Since setting MAC address with filters installed is not - * allowed on the adapter, one needs to simply restart adapter - * so that the new MAC address will be taken from an outer - * storage and set flawlessly by means of sfc_start() call + * allowed on the adapter, the new MAC address will be set + * by means of adapter restart. sfc_start() shall retrieve + * the new address from the device private data and set it. */ sfc_stop(sa); rc = sfc_start(sa); @@ -870,20 +1130,32 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) } unlock: + if (rc != 0) + rte_ether_addr_copy(old_addr, &port->default_mac_addr); + sfc_adapter_unlock(sa); + + SFC_ASSERT(rc >= 0); + return -rc; } static int -sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, - uint32_t nb_mc_addr) +sfc_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_port *port = &sa->port; uint8_t *mc_addrs = port->mcast_addrs; int rc; unsigned int i; + if (sfc_sa2shared(sa)->isolated) { + sfc_err(sa, "isolated mode is active on the port"); + sfc_err(sa, "will not set multicast address list"); + return -ENOTSUP; + } + if (mc_addrs == NULL) return -ENOBUFS; @@ -894,7 +1166,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, } for (i = 0; i < nb_mc_addr; ++i) { - (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, + rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, EFX_MAC_ADDR_LEN); mc_addrs += EFX_MAC_ADDR_LEN; } @@ -909,82 +1181,130 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, if (rc != 0) sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); - SFC_ASSERT(rc > 0); + SFC_ASSERT(rc >= 0); return -rc; } +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static void sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_rxq_info *rxq_info; - struct sfc_rxq *rxq; - sfc_adapter_lock(sa); + SFC_ASSERT(rx_queue_id < sas->rxq_count); - SFC_ASSERT(rx_queue_id < sa->rxq_count); + rxq_info = &sas->rxq_info[rx_queue_id]; - rxq_info = &sa->rxq_info[rx_queue_id]; - rxq = rxq_info->rxq; - SFC_ASSERT(rxq != NULL); - - qinfo->mp = rxq->refill_mb_pool; - qinfo->conf.rx_free_thresh = rxq->refill_threshold; + qinfo->mp = rxq_info->refill_mb_pool; + qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; qinfo->conf.rx_drop_en = 1; qinfo->conf.rx_deferred_start = rxq_info->deferred_start; - qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER); + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; + if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { + qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; + qinfo->scattered_rx = 1; + } qinfo->nb_desc = rxq_info->entries; - - sfc_adapter_unlock(sa); } +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static void sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_txq_info *txq_info; - sfc_adapter_lock(sa); - - SFC_ASSERT(tx_queue_id < sa->txq_count); + SFC_ASSERT(tx_queue_id < sas->txq_count); - txq_info = &sa->txq_info[tx_queue_id]; - SFC_ASSERT(txq_info->txq != NULL); + txq_info = &sas->txq_info[tx_queue_id]; memset(qinfo, 0, sizeof(*qinfo)); - qinfo->conf.txq_flags = txq_info->txq->flags; - qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; + qinfo->conf.offloads = txq_info->offloads; + qinfo->conf.tx_free_thresh = txq_info->free_thresh; qinfo->conf.tx_deferred_start = txq_info->deferred_start; qinfo->nb_desc = txq_info->entries; - - sfc_adapter_unlock(sa); } +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static uint32_t sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct sfc_adapter *sa = dev->data->dev_private; + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; - sfc_log_init(sa, "RxQ=%u", rx_queue_id); + SFC_ASSERT(rx_queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[rx_queue_id]; + + if ((rxq_info->state & SFC_RXQ_STARTED) == 0) + return 0; - return sfc_rx_qdesc_npending(sa, rx_queue_id); + return sap->dp_rx->qdesc_npending(rxq_info->dp); } +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static int sfc_rx_descriptor_done(void *queue, uint16_t offset) { struct sfc_dp_rxq *dp_rxq = queue; + const struct sfc_dp_rx *dp_rx; + + dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); - return sfc_rx_qdesc_done(dp_rxq, offset); + return offset < dp_rx->qdesc_npending(dp_rxq); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_rx_descriptor_status(void *queue, uint16_t offset) +{ + struct sfc_dp_rxq *dp_rxq = queue; + const struct sfc_dp_rx *dp_rx; + + dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); + + return dp_rx->qdesc_status(dp_rxq, offset); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_tx_descriptor_status(void *queue, uint16_t offset) +{ + struct sfc_dp_txq *dp_txq = queue; + const struct sfc_dp_tx *dp_tx; + + dp_tx = sfc_dp_tx_by_dp_txq(dp_txq); + + return dp_tx->qdesc_status(dp_txq, offset); } static int sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "RxQ=%u", rx_queue_id); @@ -995,17 +1315,21 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (sa->state != SFC_ADAPTER_STARTED) goto fail_not_started; + if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) + goto fail_not_setup; + rc = sfc_rx_qstart(sa, rx_queue_id); if (rc != 0) goto fail_rx_qstart; - sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; + sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; sfc_adapter_unlock(sa); return 0; fail_rx_qstart: +fail_not_setup: fail_not_started: sfc_adapter_unlock(sa); SFC_ASSERT(rc > 0); @@ -1015,14 +1339,15 @@ fail_not_started: static int sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); sfc_log_init(sa, "RxQ=%u", rx_queue_id); sfc_adapter_lock(sa); sfc_rx_qstop(sa, rx_queue_id); - sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; + sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; sfc_adapter_unlock(sa); @@ -1032,7 +1357,8 @@ sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) static int sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); int rc; sfc_log_init(sa, "TxQ = %u", tx_queue_id); @@ -1043,17 +1369,21 @@ sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (sa->state != SFC_ADAPTER_STARTED) goto fail_not_started; + if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED) + goto fail_not_setup; + rc = sfc_tx_qstart(sa, tx_queue_id); if (rc != 0) goto fail_tx_qstart; - sa->txq_info[tx_queue_id].deferred_started = B_TRUE; + sas->txq_info[tx_queue_id].deferred_started = B_TRUE; sfc_adapter_unlock(sa); return 0; fail_tx_qstart: +fail_not_setup: fail_not_started: sfc_adapter_unlock(sa); SFC_ASSERT(rc > 0); @@ -1063,7 +1393,8 @@ fail_not_started: static int sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); sfc_log_init(sa, "TxQ = %u", tx_queue_id); @@ -1071,39 +1402,153 @@ sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) sfc_tx_qstop(sa, tx_queue_id); - sa->txq_info[tx_queue_id].deferred_started = B_FALSE; + sas->txq_info[tx_queue_id].deferred_started = B_FALSE; + + sfc_adapter_unlock(sa); + return 0; +} + +static efx_tunnel_protocol_t +sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) +{ + switch (rte_type) { + case RTE_TUNNEL_TYPE_VXLAN: + return EFX_TUNNEL_PROTOCOL_VXLAN; + case RTE_TUNNEL_TYPE_GENEVE: + return EFX_TUNNEL_PROTOCOL_GENEVE; + default: + return EFX_TUNNEL_NPROTOS; + } +} + +enum sfc_udp_tunnel_op_e { + SFC_UDP_TUNNEL_ADD_PORT, + SFC_UDP_TUNNEL_DEL_PORT, +}; + +static int +sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp, + enum sfc_udp_tunnel_op_e op) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + efx_tunnel_protocol_t tunnel_proto; + int rc; + + sfc_log_init(sa, "%s udp_port=%u prot_type=%u", + (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : + (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", + tunnel_udp->udp_port, tunnel_udp->prot_type); + + tunnel_proto = + sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); + if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { + rc = ENOTSUP; + goto fail_bad_proto; + } + + sfc_adapter_lock(sa); + + switch (op) { + case SFC_UDP_TUNNEL_ADD_PORT: + rc = efx_tunnel_config_udp_add(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + case SFC_UDP_TUNNEL_DEL_PORT: + rc = efx_tunnel_config_udp_remove(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + default: + rc = EINVAL; + goto fail_bad_op; + } + + if (rc != 0) + goto fail_op; + + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_tunnel_reconfigure(sa->nic); + if (rc == EAGAIN) { + /* + * Configuration is accepted by FW and MC reboot + * is initiated to apply the changes. MC reboot + * will be handled in a usual way (MC reboot + * event on management event queue and adapter + * restart). + */ + rc = 0; + } else if (rc != 0) { + goto fail_reconfigure; + } + } sfc_adapter_unlock(sa); return 0; + +fail_reconfigure: + /* Remove/restore entry since the change makes the trouble */ + switch (op) { + case SFC_UDP_TUNNEL_ADD_PORT: + (void)efx_tunnel_config_udp_remove(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + case SFC_UDP_TUNNEL_DEL_PORT: + (void)efx_tunnel_config_udp_add(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + } + +fail_op: +fail_bad_op: + sfc_adapter_unlock(sa); + +fail_bad_proto: + SFC_ASSERT(rc > 0); + return -rc; } -#if EFSYS_OPT_RX_SCALE +static int +sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); +} + +static int +sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static int sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) return -ENOTSUP; - if (sa->rss_channels == 0) - return -EINVAL; - - sfc_adapter_lock(sa); - /* * Mapping of hash configuration between RTE and EFX is not one-to-one, * hence, conversion is done here to derive a correct set of ETH_RSS * flags which corresponds to the active EFX configuration stored * locally in 'sfc_adapter' and kept up-to-date */ - rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); - rss_conf->rss_key_len = SFC_RSS_KEY_SIZE; + rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types); + rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; if (rss_conf->rss_key != NULL) - rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE); - - sfc_adapter_unlock(sa); + rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); return 0; } @@ -1112,97 +1557,123 @@ static int sfc_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; unsigned int efx_hash_types; + uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context}; + unsigned int n_contexts; + unsigned int mode_i = 0; + unsigned int key_i = 0; + unsigned int i = 0; int rc = 0; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2; + + if (sfc_sa2shared(sa)->isolated) + return -ENOTSUP; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } if ((rss_conf->rss_key != NULL) && - (rss_conf->rss_key_len != sizeof(sa->rss_key))) { - sfc_err(sa, "RSS key size is wrong (should be %lu)", - sizeof(sa->rss_key)); - return -EINVAL; - } - - if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { - sfc_err(sa, "unsupported hash functions requested"); + (rss_conf->rss_key_len != sizeof(rss->key))) { + sfc_err(sa, "RSS key size is wrong (should be %zu)", + sizeof(rss->key)); return -EINVAL; } sfc_adapter_lock(sa); - efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); - - rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, - efx_hash_types, B_TRUE); + rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); if (rc != 0) - goto fail_scale_mode_set; + goto fail_rx_hf_rte_to_efx; + + for (mode_i = 0; mode_i < n_contexts; mode_i++) { + rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i], + rss->hash_alg, efx_hash_types, + B_TRUE); + if (rc != 0) + goto fail_scale_mode_set; + } if (rss_conf->rss_key != NULL) { if (sa->state == SFC_ADAPTER_STARTED) { - rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key, - sizeof(sa->rss_key)); - if (rc != 0) - goto fail_scale_key_set; + for (key_i = 0; key_i < n_contexts; key_i++) { + rc = efx_rx_scale_key_set(sa->nic, + contexts[key_i], + rss_conf->rss_key, + sizeof(rss->key)); + if (rc != 0) + goto fail_scale_key_set; + } } - rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); + rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); } - sa->rss_hash_types = efx_hash_types; + rss->hash_types = efx_hash_types; sfc_adapter_unlock(sa); return 0; fail_scale_key_set: - if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, - sa->rss_hash_types, B_TRUE) != 0) - sfc_err(sa, "failed to restore RSS mode"); + for (i = 0; i < key_i; i++) { + if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key, + sizeof(rss->key)) != 0) + sfc_err(sa, "failed to restore RSS key"); + } fail_scale_mode_set: + for (i = 0; i < mode_i; i++) { + if (efx_rx_scale_mode_set(sa->nic, contexts[i], + EFX_RX_HASHALG_TOEPLITZ, + rss->hash_types, B_TRUE) != 0) + sfc_err(sa, "failed to restore RSS mode"); + } + +fail_rx_hf_rte_to_efx: sfc_adapter_unlock(sa); return -rc; } +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ static int sfc_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; int entry; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated) return -ENOTSUP; - if (sa->rss_channels == 0) + if (rss->channels == 0) return -EINVAL; if (reta_size != EFX_RSS_TBL_SIZE) return -EINVAL; - sfc_adapter_lock(sa); - for (entry = 0; entry < reta_size; entry++) { int grp = entry / RTE_RETA_GROUP_SIZE; int grp_idx = entry % RTE_RETA_GROUP_SIZE; if ((reta_conf[grp].mask >> grp_idx) & 1) - reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; + reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; } - sfc_adapter_unlock(sa); - return 0; } @@ -1211,18 +1682,22 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; unsigned int *rss_tbl_new; uint16_t entry; - int rc; + int rc = 0; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + if (sfc_sa2shared(sa)->isolated) + return -ENOTSUP; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } @@ -1233,13 +1708,13 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, return -EINVAL; } - rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); + rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); if (rss_tbl_new == NULL) return -ENOMEM; sfc_adapter_lock(sa); - rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); + rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); for (entry = 0; entry < reta_size; entry++) { int grp_idx = entry % RTE_RETA_GROUP_SIZE; @@ -1248,7 +1723,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; if (grp->mask & (1ull << grp_idx)) { - if (grp->reta[grp_idx] >= sa->rss_channels) { + if (grp->reta[grp_idx] >= rss->channels) { rc = EINVAL; goto bad_reta_entry; } @@ -1256,10 +1731,16 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, } } - rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE); - if (rc == 0) - rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss_tbl_new, EFX_RSS_TBL_SIZE); + if (rc != 0) + goto fail_scale_tbl_set; + } + + rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); +fail_scale_tbl_set: bad_reta_entry: sfc_adapter_unlock(sa); @@ -1268,62 +1749,54 @@ bad_reta_entry: SFC_ASSERT(rc >= 0); return -rc; } -#endif static int -sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_ops **ops) { - struct sfc_adapter *sa = dev->data->dev_private; - int rc = ENOTSUP; + *ops = &sfc_flow_ops; + return 0; +} - sfc_log_init(sa, "entry"); +static int +sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); - switch (filter_type) { - case RTE_ETH_FILTER_NONE: - sfc_err(sa, "Global filters configuration not supported"); - break; - case RTE_ETH_FILTER_MACVLAN: - sfc_err(sa, "MACVLAN filters not supported"); - break; - case RTE_ETH_FILTER_ETHERTYPE: - sfc_err(sa, "EtherType filters not supported"); - break; - case RTE_ETH_FILTER_FLEXIBLE: - sfc_err(sa, "Flexible filters not supported"); - break; - case RTE_ETH_FILTER_SYN: - sfc_err(sa, "SYN filters not supported"); - break; - case RTE_ETH_FILTER_NTUPLE: - sfc_err(sa, "NTUPLE filters not supported"); - break; - case RTE_ETH_FILTER_TUNNEL: - sfc_err(sa, "Tunnel filters not supported"); - break; - case RTE_ETH_FILTER_FDIR: - sfc_err(sa, "Flow Director filters not supported"); - break; - case RTE_ETH_FILTER_HASH: - sfc_err(sa, "Hash filters not supported"); - break; - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) { - rc = EINVAL; - } else { - *(const void **)arg = &sfc_flow_ops; - rc = 0; - } - break; - default: - sfc_err(sa, "Unknown filter type %u", filter_type); - break; - } + /* + * If Rx datapath does not provide callback to check mempool, + * all pools are supported. + */ + if (sap->dp_rx->pool_ops_supported == NULL) + return 1; - sfc_log_init(sa, "exit: %d", -rc); - SFC_ASSERT(rc >= 0); - return -rc; + return sap->dp_rx->pool_ops_supported(pool); +} + +static int +sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[queue_id]; + + return sap->dp_rx->intr_enable(rxq_info->dp); +} + +static int +sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[queue_id]; + + return sap->dp_rx->intr_disable(rxq_info->dp); } static const struct eth_dev_ops sfc_eth_dev_ops = { @@ -1352,30 +1825,60 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .tx_queue_stop = sfc_tx_queue_stop, .rx_queue_setup = sfc_rx_queue_setup, .rx_queue_release = sfc_rx_queue_release, - .rx_queue_count = sfc_rx_queue_count, - .rx_descriptor_done = sfc_rx_descriptor_done, + .rx_queue_intr_enable = sfc_rx_queue_intr_enable, + .rx_queue_intr_disable = sfc_rx_queue_intr_disable, .tx_queue_setup = sfc_tx_queue_setup, .tx_queue_release = sfc_tx_queue_release, .flow_ctrl_get = sfc_flow_ctrl_get, .flow_ctrl_set = sfc_flow_ctrl_set, .mac_addr_set = sfc_mac_addr_set, -#if EFSYS_OPT_RX_SCALE + .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, .reta_update = sfc_dev_rss_reta_update, .reta_query = sfc_dev_rss_reta_query, .rss_hash_update = sfc_dev_rss_hash_update, .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, -#endif - .filter_ctrl = sfc_dev_filter_ctrl, + .flow_ops_get = sfc_dev_flow_ops_get, .set_mc_addr_list = sfc_set_mc_addr_list, .rxq_info_get = sfc_rx_queue_info_get, .txq_info_get = sfc_tx_queue_info_get, .fw_version_get = sfc_fw_version_get, + .xstats_get_by_id = sfc_xstats_get_by_id, + .xstats_get_names_by_id = sfc_xstats_get_names_by_id, + .pool_ops_supported = sfc_pool_ops_supported, }; +/** + * Duplicate a string in potentially shared memory required for + * multi-process support. + * + * strdup() allocates from process-local heap/memory. + */ +static char * +sfc_strdup(const char *str) +{ + size_t size; + char *copy; + + if (str == NULL) + return NULL; + + size = strlen(str) + 1; + copy = rte_malloc(__func__, size, 0); + if (copy != NULL) + rte_memcpy(copy, str, size); + + return copy; +} + static int sfc_eth_dev_set_ops(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + const struct sfc_dp_rx *dp_rx; + const struct sfc_dp_tx *dp_tx; + const efx_nic_cfg_t *encp; unsigned int avail_caps = 0; const char *rx_name = NULL; const char *tx_name = NULL; @@ -1384,34 +1887,44 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) switch (sa->family) { case EFX_FAMILY_HUNTINGTON: case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_MEDFORD2: avail_caps |= SFC_DP_HW_FW_CAP_EF10; + avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX; + avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX; + break; + case EFX_FAMILY_RIVERHEAD: + avail_caps |= SFC_DP_HW_FW_CAP_EF100; break; default: break; } + encp = efx_nic_cfg_get(sa->nic); + if (encp->enc_rx_es_super_buffer_supported) + avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; + rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, sfc_kvarg_string_handler, &rx_name); if (rc != 0) goto fail_kvarg_rx_datapath; if (rx_name != NULL) { - sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); - if (sa->dp_rx == NULL) { + dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); + if (dp_rx == NULL) { sfc_err(sa, "Rx datapath %s not found", rx_name); rc = ENOENT; goto fail_dp_rx; } - if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) { + if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) { sfc_err(sa, "Insufficient Hw/FW capabilities to use Rx datapath %s", rx_name); rc = EINVAL; - goto fail_dp_rx; + goto fail_dp_rx_caps; } } else { - sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); - if (sa->dp_rx == NULL) { + dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); + if (dp_rx == NULL) { sfc_err(sa, "Rx datapath by caps %#x not found", avail_caps); rc = ENOENT; @@ -1419,9 +1932,13 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) } } - sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name); + sas->dp_rx_name = sfc_strdup(dp_rx->dp.name); + if (sas->dp_rx_name == NULL) { + rc = ENOMEM; + goto fail_dp_rx_name; + } - dev->rx_pkt_burst = sa->dp_rx->pkt_burst; + sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name); rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, sfc_kvarg_string_handler, &tx_name); @@ -1429,22 +1946,22 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_kvarg_tx_datapath; if (tx_name != NULL) { - sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); - if (sa->dp_tx == NULL) { + dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); + if (dp_tx == NULL) { sfc_err(sa, "Tx datapath %s not found", tx_name); rc = ENOENT; goto fail_dp_tx; } - if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) { + if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) { sfc_err(sa, "Insufficient Hw/FW capabilities to use Tx datapath %s", tx_name); rc = EINVAL; - goto fail_dp_tx; + goto fail_dp_tx_caps; } } else { - sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); - if (sa->dp_tx == NULL) { + dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); + if (dp_tx == NULL) { sfc_err(sa, "Tx datapath by caps %#x not found", avail_caps); rc = ENOENT; @@ -1452,30 +1969,159 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) } } - sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name); + sas->dp_tx_name = sfc_strdup(dp_tx->dp.name); + if (sas->dp_tx_name == NULL) { + rc = ENOMEM; + goto fail_dp_tx_name; + } + + sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name); - dev->tx_pkt_burst = sa->dp_tx->pkt_burst; + sa->priv.dp_rx = dp_rx; + sa->priv.dp_tx = dp_tx; + dev->rx_pkt_burst = dp_rx->pkt_burst; + dev->tx_pkt_prepare = dp_tx->pkt_prepare; + dev->tx_pkt_burst = dp_tx->pkt_burst; + + dev->rx_queue_count = sfc_rx_queue_count; + dev->rx_descriptor_done = sfc_rx_descriptor_done; + dev->rx_descriptor_status = sfc_rx_descriptor_status; + dev->tx_descriptor_status = sfc_tx_descriptor_status; dev->dev_ops = &sfc_eth_dev_ops; return 0; +fail_dp_tx_name: +fail_dp_tx_caps: fail_dp_tx: fail_kvarg_tx_datapath: + rte_free(sas->dp_rx_name); + sas->dp_rx_name = NULL; + +fail_dp_rx_name: +fail_dp_rx_caps: fail_dp_rx: fail_kvarg_rx_datapath: return rc; } +static void +sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + + dev->dev_ops = NULL; + dev->tx_pkt_prepare = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(sas->dp_tx_name); + sas->dp_tx_name = NULL; + sa->priv.dp_tx = NULL; + + rte_free(sas->dp_rx_name); + sas->dp_rx_name = NULL; + sa->priv.dp_rx = NULL; +} + +static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { + .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, + .reta_query = sfc_dev_rss_reta_query, + .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, + .rxq_info_get = sfc_rx_queue_info_get, + .txq_info_get = sfc_tx_queue_info_get, +}; + +static int +sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter_priv *sap; + const struct sfc_dp_rx *dp_rx; + const struct sfc_dp_tx *dp_tx; + int rc; + + /* + * Allocate process private data from heap, since it should not + * be located in shared memory allocated using rte_malloc() API. + */ + sap = calloc(1, sizeof(*sap)); + if (sap == NULL) { + rc = ENOMEM; + goto fail_alloc_priv; + } + + sap->logtype_main = logtype_main; + + dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name); + if (dp_rx == NULL) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "cannot find %s Rx datapath", sas->dp_rx_name); + rc = ENOENT; + goto fail_dp_rx; + } + if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "%s Rx datapath does not support multi-process", + sas->dp_rx_name); + rc = EINVAL; + goto fail_dp_rx_multi_process; + } + + dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name); + if (dp_tx == NULL) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "cannot find %s Tx datapath", sas->dp_tx_name); + rc = ENOENT; + goto fail_dp_tx; + } + if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "%s Tx datapath does not support multi-process", + sas->dp_tx_name); + rc = EINVAL; + goto fail_dp_tx_multi_process; + } + + sap->dp_rx = dp_rx; + sap->dp_tx = dp_tx; + + dev->process_private = sap; + dev->rx_pkt_burst = dp_rx->pkt_burst; + dev->tx_pkt_prepare = dp_tx->pkt_prepare; + dev->tx_pkt_burst = dp_tx->pkt_burst; + dev->rx_queue_count = sfc_rx_queue_count; + dev->rx_descriptor_done = sfc_rx_descriptor_done; + dev->rx_descriptor_status = sfc_rx_descriptor_status; + dev->tx_descriptor_status = sfc_tx_descriptor_status; + dev->dev_ops = &sfc_eth_dev_secondary_ops; + + return 0; + +fail_dp_tx_multi_process: +fail_dp_tx: +fail_dp_rx_multi_process: +fail_dp_rx: + free(sap); + +fail_alloc_priv: + return rc; +} + static void sfc_register_dp(void) { /* Register once */ if (TAILQ_EMPTY(&sfc_dp_head)) { /* Prefer EF10 datapath */ + sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); @@ -1485,32 +2131,77 @@ sfc_register_dp(void) static int sfc_eth_dev_init(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint32_t logtype_main; + struct sfc_adapter *sa; int rc; const efx_nic_cfg_t *encp; - const struct ether_addr *from; + const struct rte_ether_addr *from; + int ret; + + if (sfc_efx_dev_class_get(pci_dev->device.devargs) != + SFC_EFX_DEV_CLASS_NET) { + SFC_GENERIC_LOG(DEBUG, + "Incompatible device class: skip probing, should be probed by other sfc driver."); + return 1; + } sfc_register_dp(); + logtype_main = sfc_register_logtype(&pci_dev->addr, + SFC_LOGTYPE_MAIN_STR, + RTE_LOG_NOTICE); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -sfc_eth_dev_secondary_init(dev, logtype_main); + /* Required for logging */ + ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix), + "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function, + dev->data->port_id); + if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) { + SFC_GENERIC_LOG(ERR, + "reserved log prefix is too short for " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + return -EINVAL; + } + sas->pci_addr = pci_dev->addr; + sas->port_id = dev->data->port_id; + + /* + * Allocate process private data from heap, since it should not + * be located in shared memory allocated using rte_malloc() API. + */ + sa = calloc(1, sizeof(*sa)); + if (sa == NULL) { + rc = ENOMEM; + goto fail_alloc_sa; + } + + dev->process_private = sa; + + /* Required for logging */ + sa->priv.shared = sas; + sa->priv.logtype_main = logtype_main; + sa->eth_dev = dev; /* Copy PCI device info to the dev->data */ rte_eth_copy_pci_info(dev, pci_dev); + dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; rc = sfc_kvargs_parse(sa); if (rc != 0) goto fail_kvargs_parse; - rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, - sfc_kvarg_bool_handler, &sa->debug_init); - if (rc != 0) - goto fail_kvarg_debug_init; - sfc_log_init(sa, "entry"); - dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); + dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0); if (dev->data->mac_addrs == NULL) { rc = ENOMEM; goto fail_mac_addrs; @@ -1540,8 +2231,8 @@ sfc_eth_dev_init(struct rte_eth_dev *dev) * The arguments are really reverse order in comparison to * Linux kernel. Copy from NIC config to Ethernet device data. */ - from = (const struct ether_addr *)(encp->enc_mac_addr); - ether_addr_copy(from, &dev->data->mac_addrs[0]); + from = (const struct rte_ether_addr *)(encp->enc_mac_addr); + rte_ether_addr_copy(from, &dev->data->mac_addrs[0]); sfc_adapter_unlock(sa); @@ -1549,6 +2240,8 @@ sfc_eth_dev_init(struct rte_eth_dev *dev) return 0; fail_attach: + sfc_eth_dev_clear_ops(dev); + fail_set_ops: sfc_unprobe(sa); @@ -1559,11 +2252,14 @@ fail_probe: dev->data->mac_addrs = NULL; fail_mac_addrs: -fail_kvarg_debug_init: sfc_kvargs_cleanup(sa); fail_kvargs_parse: sfc_log_init(sa, "failed %d", rc); + dev->process_private = NULL; + free(sa); + +fail_alloc_sa: SFC_ASSERT(rc > 0); return -rc; } @@ -1571,31 +2267,8 @@ fail_kvargs_parse: static int sfc_eth_dev_uninit(struct rte_eth_dev *dev) { - struct sfc_adapter *sa = dev->data->dev_private; - - sfc_log_init(sa, "entry"); - - sfc_adapter_lock(sa); - - sfc_detach(sa); - sfc_unprobe(sa); - - rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; + sfc_dev_close(dev); - sfc_kvargs_cleanup(sa); - - sfc_adapter_unlock(sa); - sfc_adapter_lock_fini(sa); - - sfc_log_init(sa, "done"); - - /* Required for logging, so cleanup last */ - sa->eth_dev = NULL; return 0; } @@ -1606,6 +2279,9 @@ static const struct rte_pci_id pci_id_sfc_efx_map[] = { { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) }, { .vendor_id = 0 /* sentinel */ } }; @@ -1613,7 +2289,7 @@ static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct sfc_adapter), sfc_eth_dev_init); + sizeof(struct sfc_adapter_shared), sfc_eth_dev_init); } static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) @@ -1637,6 +2313,15 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " - SFC_KVARG_STATS_UPDATE_PERIOD_MS "= " - SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " - SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); + SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " + SFC_KVARG_RXD_WAIT_TIMEOUT_NS "= " + SFC_KVARG_STATS_UPDATE_PERIOD_MS "="); + +RTE_INIT(sfc_driver_register_logtype) +{ + int ret; + + ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", + RTE_LOG_NOTICE); + sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; +}