2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <rte_ethdev.h>
37 #include "sfc_debug.h"
39 #include "sfc_kvargs.h"
46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
48 struct sfc_adapter *sa = dev->data->dev_private;
49 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
51 sfc_log_init(sa, "entry");
53 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
54 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
56 /* Autonegotiation may be disabled */
57 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
58 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
59 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
60 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
61 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
62 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
63 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
65 dev_info->max_rx_queues = sa->rxq_max;
66 dev_info->max_tx_queues = sa->txq_max;
68 /* By default packets are dropped if no descriptors are available */
69 dev_info->default_rxconf.rx_drop_en = 1;
71 dev_info->rx_offload_capa =
72 DEV_RX_OFFLOAD_IPV4_CKSUM |
73 DEV_RX_OFFLOAD_UDP_CKSUM |
74 DEV_RX_OFFLOAD_TCP_CKSUM;
76 dev_info->tx_offload_capa =
77 DEV_TX_OFFLOAD_IPV4_CKSUM |
78 DEV_TX_OFFLOAD_UDP_CKSUM |
79 DEV_TX_OFFLOAD_TCP_CKSUM;
81 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
82 if (!encp->enc_hw_tx_insert_vlan_enabled)
83 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
85 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
87 #if EFSYS_OPT_RX_SCALE
88 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
89 dev_info->reta_size = EFX_RSS_TBL_SIZE;
90 dev_info->hash_key_size = SFC_RSS_KEY_SIZE;
91 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
96 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
98 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
99 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
100 /* The RXQ hardware requires that the descriptor count is a power
101 * of 2, but rx_desc_lim cannot properly describe that constraint.
103 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
105 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
106 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
108 * The TXQ hardware requires that the descriptor count is a power
109 * of 2, but tx_desc_lim cannot properly describe that constraint
111 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
114 static const uint32_t *
115 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
117 static const uint32_t ptypes[] = {
119 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
120 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
126 if (dev->rx_pkt_burst == sfc_recv_pkts)
133 sfc_dev_configure(struct rte_eth_dev *dev)
135 struct rte_eth_dev_data *dev_data = dev->data;
136 struct sfc_adapter *sa = dev_data->dev_private;
139 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
140 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
142 sfc_adapter_lock(sa);
144 case SFC_ADAPTER_CONFIGURED:
146 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
148 case SFC_ADAPTER_INITIALIZED:
149 rc = sfc_configure(sa);
152 sfc_err(sa, "unexpected adapter state %u to configure",
157 sfc_adapter_unlock(sa);
159 sfc_log_init(sa, "done %d", rc);
165 sfc_dev_start(struct rte_eth_dev *dev)
167 struct sfc_adapter *sa = dev->data->dev_private;
170 sfc_log_init(sa, "entry");
172 sfc_adapter_lock(sa);
174 sfc_adapter_unlock(sa);
176 sfc_log_init(sa, "done %d", rc);
182 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
184 struct sfc_adapter *sa = dev->data->dev_private;
185 struct rte_eth_link *dev_link = &dev->data->dev_link;
186 struct rte_eth_link old_link;
187 struct rte_eth_link current_link;
189 sfc_log_init(sa, "entry");
192 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
193 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
195 if (sa->state != SFC_ADAPTER_STARTED) {
196 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
197 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
198 *(uint64_t *)&old_link,
199 *(uint64_t *)¤t_link))
201 } else if (wait_to_complete) {
202 efx_link_mode_t link_mode;
204 if (efx_port_poll(sa->nic, &link_mode) != 0)
205 link_mode = EFX_LINK_UNKNOWN;
206 sfc_port_link_mode_to_info(link_mode, ¤t_link);
208 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
209 *(uint64_t *)&old_link,
210 *(uint64_t *)¤t_link))
213 sfc_ev_mgmt_qpoll(sa);
214 *(int64_t *)¤t_link =
215 rte_atomic64_read((rte_atomic64_t *)dev_link);
218 if (old_link.link_status != current_link.link_status)
219 sfc_info(sa, "Link status is %s",
220 current_link.link_status ? "UP" : "DOWN");
222 return old_link.link_status == current_link.link_status ? 0 : -1;
226 sfc_dev_stop(struct rte_eth_dev *dev)
228 struct sfc_adapter *sa = dev->data->dev_private;
230 sfc_log_init(sa, "entry");
232 sfc_adapter_lock(sa);
234 sfc_adapter_unlock(sa);
236 sfc_log_init(sa, "done");
240 sfc_dev_set_link_up(struct rte_eth_dev *dev)
242 struct sfc_adapter *sa = dev->data->dev_private;
245 sfc_log_init(sa, "entry");
247 sfc_adapter_lock(sa);
249 sfc_adapter_unlock(sa);
256 sfc_dev_set_link_down(struct rte_eth_dev *dev)
258 struct sfc_adapter *sa = dev->data->dev_private;
260 sfc_log_init(sa, "entry");
262 sfc_adapter_lock(sa);
264 sfc_adapter_unlock(sa);
270 sfc_dev_close(struct rte_eth_dev *dev)
272 struct sfc_adapter *sa = dev->data->dev_private;
274 sfc_log_init(sa, "entry");
276 sfc_adapter_lock(sa);
278 case SFC_ADAPTER_STARTED:
280 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
282 case SFC_ADAPTER_CONFIGURED:
284 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
286 case SFC_ADAPTER_INITIALIZED:
289 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
292 sfc_adapter_unlock(sa);
294 sfc_log_init(sa, "done");
298 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
301 struct sfc_port *port;
303 struct sfc_adapter *sa = dev->data->dev_private;
304 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
305 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
307 sfc_adapter_lock(sa);
310 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
312 if (*toggle != enabled) {
315 if ((sa->state == SFC_ADAPTER_STARTED) &&
316 (sfc_set_rx_mode(sa) != 0)) {
317 *toggle = !(enabled);
318 sfc_warn(sa, "Failed to %s %s mode",
319 ((enabled) ? "enable" : "disable"), desc);
323 sfc_adapter_unlock(sa);
327 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
329 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
333 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
335 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
339 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
341 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
345 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
347 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
351 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
352 uint16_t nb_rx_desc, unsigned int socket_id,
353 const struct rte_eth_rxconf *rx_conf,
354 struct rte_mempool *mb_pool)
356 struct sfc_adapter *sa = dev->data->dev_private;
359 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
360 rx_queue_id, nb_rx_desc, socket_id);
362 sfc_adapter_lock(sa);
364 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
369 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
371 sfc_adapter_unlock(sa);
376 sfc_adapter_unlock(sa);
382 sfc_rx_queue_release(void *queue)
384 struct sfc_rxq *rxq = queue;
385 struct sfc_adapter *sa;
386 unsigned int sw_index;
392 sfc_adapter_lock(sa);
394 sw_index = sfc_rxq_sw_index(rxq);
396 sfc_log_init(sa, "RxQ=%u", sw_index);
398 sa->eth_dev->data->rx_queues[sw_index] = NULL;
400 sfc_rx_qfini(sa, sw_index);
402 sfc_adapter_unlock(sa);
406 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
407 uint16_t nb_tx_desc, unsigned int socket_id,
408 const struct rte_eth_txconf *tx_conf)
410 struct sfc_adapter *sa = dev->data->dev_private;
413 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
414 tx_queue_id, nb_tx_desc, socket_id);
416 sfc_adapter_lock(sa);
418 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
422 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq;
424 sfc_adapter_unlock(sa);
428 sfc_adapter_unlock(sa);
434 sfc_tx_queue_release(void *queue)
436 struct sfc_txq *txq = queue;
437 unsigned int sw_index;
438 struct sfc_adapter *sa;
443 sw_index = sfc_txq_sw_index(txq);
445 SFC_ASSERT(txq->evq != NULL);
448 sfc_log_init(sa, "TxQ = %u", sw_index);
450 sfc_adapter_lock(sa);
452 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
453 sa->eth_dev->data->tx_queues[sw_index] = NULL;
455 sfc_tx_qfini(sa, sw_index);
457 sfc_adapter_unlock(sa);
461 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
463 struct sfc_adapter *sa = dev->data->dev_private;
464 struct sfc_port *port = &sa->port;
467 rte_spinlock_lock(&port->mac_stats_lock);
469 if (sfc_port_update_mac_stats(sa) != 0)
472 mac_stats = port->mac_stats_buf;
474 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
475 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
477 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
478 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
479 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
481 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
482 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
483 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
485 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
486 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
487 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
489 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
490 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
491 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
492 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
493 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
494 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
496 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
497 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
498 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
499 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
501 * Take into account stats which are whenever supported
502 * on EF10. If some stat is not supported by current
503 * firmware variant or HW revision, it is guaranteed
504 * to be zero in mac_stats.
507 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
508 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
509 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
510 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
511 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
512 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
513 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
514 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
515 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
516 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
518 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
519 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
520 mac_stats[EFX_MAC_RX_JABBER_PKTS];
521 /* no oerrors counters supported on EF10 */
525 rte_spinlock_unlock(&port->mac_stats_lock);
529 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
530 unsigned int xstats_count)
532 struct sfc_adapter *sa = dev->data->dev_private;
533 struct sfc_port *port = &sa->port;
539 rte_spinlock_lock(&port->mac_stats_lock);
541 rc = sfc_port_update_mac_stats(sa);
548 mac_stats = port->mac_stats_buf;
550 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
551 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
552 if (xstats != NULL && nstats < (int)xstats_count) {
553 xstats[nstats].id = nstats;
554 xstats[nstats].value = mac_stats[i];
561 rte_spinlock_unlock(&port->mac_stats_lock);
567 sfc_xstats_get_names(struct rte_eth_dev *dev,
568 struct rte_eth_xstat_name *xstats_names,
569 unsigned int xstats_count)
571 struct sfc_adapter *sa = dev->data->dev_private;
572 struct sfc_port *port = &sa->port;
574 unsigned int nstats = 0;
576 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
577 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
578 if (xstats_names != NULL && nstats < xstats_count)
579 strncpy(xstats_names[nstats].name,
580 efx_mac_stat_name(sa->nic, i),
581 sizeof(xstats_names[0].name));
590 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
592 struct sfc_adapter *sa = dev->data->dev_private;
593 unsigned int wanted_fc, link_fc;
595 memset(fc_conf, 0, sizeof(*fc_conf));
597 sfc_adapter_lock(sa);
599 if (sa->state == SFC_ADAPTER_STARTED)
600 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
602 link_fc = sa->port.flow_ctrl;
606 fc_conf->mode = RTE_FC_NONE;
608 case EFX_FCNTL_RESPOND:
609 fc_conf->mode = RTE_FC_RX_PAUSE;
611 case EFX_FCNTL_GENERATE:
612 fc_conf->mode = RTE_FC_TX_PAUSE;
614 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
615 fc_conf->mode = RTE_FC_FULL;
618 sfc_err(sa, "%s: unexpected flow control value %#x",
622 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
624 sfc_adapter_unlock(sa);
630 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
632 struct sfc_adapter *sa = dev->data->dev_private;
633 struct sfc_port *port = &sa->port;
637 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
638 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
639 fc_conf->mac_ctrl_frame_fwd != 0) {
640 sfc_err(sa, "unsupported flow control settings specified");
645 switch (fc_conf->mode) {
649 case RTE_FC_RX_PAUSE:
650 fcntl = EFX_FCNTL_RESPOND;
652 case RTE_FC_TX_PAUSE:
653 fcntl = EFX_FCNTL_GENERATE;
656 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
663 sfc_adapter_lock(sa);
665 if (sa->state == SFC_ADAPTER_STARTED) {
666 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
668 goto fail_mac_fcntl_set;
671 port->flow_ctrl = fcntl;
672 port->flow_ctrl_autoneg = fc_conf->autoneg;
674 sfc_adapter_unlock(sa);
679 sfc_adapter_unlock(sa);
686 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
688 struct sfc_adapter *sa = dev->data->dev_private;
689 size_t pdu = EFX_MAC_PDU(mtu);
693 sfc_log_init(sa, "mtu=%u", mtu);
696 if (pdu < EFX_MAC_PDU_MIN) {
697 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
698 (unsigned int)mtu, (unsigned int)pdu,
702 if (pdu > EFX_MAC_PDU_MAX) {
703 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
704 (unsigned int)mtu, (unsigned int)pdu,
709 sfc_adapter_lock(sa);
711 if (pdu != sa->port.pdu) {
712 if (sa->state == SFC_ADAPTER_STARTED) {
715 old_pdu = sa->port.pdu;
726 * The driver does not use it, but other PMDs update jumbo_frame
727 * flag and max_rx_pkt_len when MTU is set.
729 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
730 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
732 sfc_adapter_unlock(sa);
734 sfc_log_init(sa, "done");
738 sa->port.pdu = old_pdu;
739 if (sfc_start(sa) != 0)
740 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
741 "PDU max size - port is stopped",
742 (unsigned int)pdu, (unsigned int)old_pdu);
743 sfc_adapter_unlock(sa);
746 sfc_log_init(sa, "failed %d", rc);
751 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
753 struct sfc_adapter *sa = dev->data->dev_private;
754 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
757 sfc_adapter_lock(sa);
759 if (sa->state != SFC_ADAPTER_STARTED) {
760 sfc_info(sa, "the port is not started");
761 sfc_info(sa, "the new MAC address will be set on port start");
766 if (encp->enc_allow_set_mac_with_installed_filters) {
767 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
769 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
774 * Changing the MAC address by means of MCDI request
775 * has no effect on received traffic, therefore
776 * we also need to update unicast filters
778 rc = sfc_set_rx_mode(sa);
780 sfc_err(sa, "cannot set filter (rc = %u)", rc);
782 sfc_warn(sa, "cannot set MAC address with filters installed");
783 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
784 sfc_warn(sa, "(some traffic may be dropped)");
787 * Since setting MAC address with filters installed is not
788 * allowed on the adapter, one needs to simply restart adapter
789 * so that the new MAC address will be taken from an outer
790 * storage and set flawlessly by means of sfc_start() call
795 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
799 sfc_adapter_unlock(sa);
804 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
807 struct sfc_adapter *sa = dev->data->dev_private;
808 uint8_t *mc_addrs_p = NULL;
812 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) {
813 sfc_err(sa, "too many multicast addresses: %u > %u",
814 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX);
818 if (nb_mc_addr != 0) {
821 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr,
822 EFX_MAC_ADDR_LEN, 0);
823 if (mc_addrs_p == NULL)
826 mc_addrs = mc_addrs_p;
827 for (i = 0; i < nb_mc_addr; ++i) {
828 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
830 mc_addrs += EFX_MAC_ADDR_LEN;
834 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr);
836 rte_free(mc_addrs_p);
839 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
846 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
847 struct rte_eth_rxq_info *qinfo)
849 struct sfc_adapter *sa = dev->data->dev_private;
850 struct sfc_rxq_info *rxq_info;
853 sfc_adapter_lock(sa);
855 SFC_ASSERT(rx_queue_id < sa->rxq_count);
857 rxq_info = &sa->rxq_info[rx_queue_id];
859 SFC_ASSERT(rxq != NULL);
861 qinfo->mp = rxq->refill_mb_pool;
862 qinfo->conf.rx_free_thresh = rxq->refill_threshold;
863 qinfo->conf.rx_drop_en = 1;
864 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
865 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
866 qinfo->nb_desc = rxq_info->entries;
868 sfc_adapter_unlock(sa);
872 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
873 struct rte_eth_txq_info *qinfo)
875 struct sfc_adapter *sa = dev->data->dev_private;
876 struct sfc_txq_info *txq_info;
878 sfc_adapter_lock(sa);
880 SFC_ASSERT(tx_queue_id < sa->txq_count);
882 txq_info = &sa->txq_info[tx_queue_id];
883 SFC_ASSERT(txq_info->txq != NULL);
885 memset(qinfo, 0, sizeof(*qinfo));
887 qinfo->conf.txq_flags = txq_info->txq->flags;
888 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
889 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
890 qinfo->nb_desc = txq_info->entries;
892 sfc_adapter_unlock(sa);
896 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
898 struct sfc_adapter *sa = dev->data->dev_private;
900 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
902 return sfc_rx_qdesc_npending(sa, rx_queue_id);
906 sfc_rx_descriptor_done(void *queue, uint16_t offset)
908 struct sfc_rxq *rxq = queue;
910 return sfc_rx_qdesc_done(rxq, offset);
914 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
916 struct sfc_adapter *sa = dev->data->dev_private;
919 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
921 sfc_adapter_lock(sa);
924 if (sa->state != SFC_ADAPTER_STARTED)
925 goto fail_not_started;
927 rc = sfc_rx_qstart(sa, rx_queue_id);
931 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
933 sfc_adapter_unlock(sa);
939 sfc_adapter_unlock(sa);
945 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
947 struct sfc_adapter *sa = dev->data->dev_private;
949 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
951 sfc_adapter_lock(sa);
952 sfc_rx_qstop(sa, rx_queue_id);
954 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
956 sfc_adapter_unlock(sa);
962 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
964 struct sfc_adapter *sa = dev->data->dev_private;
967 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
969 sfc_adapter_lock(sa);
972 if (sa->state != SFC_ADAPTER_STARTED)
973 goto fail_not_started;
975 rc = sfc_tx_qstart(sa, tx_queue_id);
979 sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
981 sfc_adapter_unlock(sa);
987 sfc_adapter_unlock(sa);
993 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
995 struct sfc_adapter *sa = dev->data->dev_private;
997 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
999 sfc_adapter_lock(sa);
1001 sfc_tx_qstop(sa, tx_queue_id);
1003 sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
1005 sfc_adapter_unlock(sa);
1009 #if EFSYS_OPT_RX_SCALE
1011 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1012 struct rte_eth_rss_conf *rss_conf)
1014 struct sfc_adapter *sa = dev->data->dev_private;
1016 if ((sa->rss_channels == 1) ||
1017 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE))
1020 sfc_adapter_lock(sa);
1023 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1024 * hence, conversion is done here to derive a correct set of ETH_RSS
1025 * flags which corresponds to the active EFX configuration stored
1026 * locally in 'sfc_adapter' and kept up-to-date
1028 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
1029 rss_conf->rss_key_len = SFC_RSS_KEY_SIZE;
1030 if (rss_conf->rss_key != NULL)
1031 rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE);
1033 sfc_adapter_unlock(sa);
1039 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1040 struct rte_eth_rss_conf *rss_conf)
1042 struct sfc_adapter *sa = dev->data->dev_private;
1043 unsigned int efx_hash_types;
1046 if ((sa->rss_channels == 1) ||
1047 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) {
1048 sfc_err(sa, "RSS is not available");
1052 if ((rss_conf->rss_key != NULL) &&
1053 (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
1054 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1055 sizeof(sa->rss_key));
1059 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
1060 sfc_err(sa, "unsupported hash functions requested");
1064 sfc_adapter_lock(sa);
1066 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
1068 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
1069 efx_hash_types, B_TRUE);
1071 goto fail_scale_mode_set;
1073 if (rss_conf->rss_key != NULL) {
1074 if (sa->state == SFC_ADAPTER_STARTED) {
1075 rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key,
1076 sizeof(sa->rss_key));
1078 goto fail_scale_key_set;
1081 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
1084 sa->rss_hash_types = efx_hash_types;
1086 sfc_adapter_unlock(sa);
1091 if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
1092 sa->rss_hash_types, B_TRUE) != 0)
1093 sfc_err(sa, "failed to restore RSS mode");
1095 fail_scale_mode_set:
1096 sfc_adapter_unlock(sa);
1101 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1102 struct rte_eth_rss_reta_entry64 *reta_conf,
1105 struct sfc_adapter *sa = dev->data->dev_private;
1108 if ((sa->rss_channels == 1) ||
1109 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE))
1112 if (reta_size != EFX_RSS_TBL_SIZE)
1115 sfc_adapter_lock(sa);
1117 for (entry = 0; entry < reta_size; entry++) {
1118 int grp = entry / RTE_RETA_GROUP_SIZE;
1119 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1121 if ((reta_conf[grp].mask >> grp_idx) & 1)
1122 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
1125 sfc_adapter_unlock(sa);
1131 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1132 struct rte_eth_rss_reta_entry64 *reta_conf,
1135 struct sfc_adapter *sa = dev->data->dev_private;
1136 unsigned int *rss_tbl_new;
1141 if ((sa->rss_channels == 1) ||
1142 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) {
1143 sfc_err(sa, "RSS is not available");
1147 if (reta_size != EFX_RSS_TBL_SIZE) {
1148 sfc_err(sa, "RETA size is wrong (should be %u)",
1153 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
1154 if (rss_tbl_new == NULL)
1157 sfc_adapter_lock(sa);
1159 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
1161 for (entry = 0; entry < reta_size; entry++) {
1162 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1163 struct rte_eth_rss_reta_entry64 *grp;
1165 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1167 if (grp->mask & (1ull << grp_idx)) {
1168 if (grp->reta[grp_idx] >= sa->rss_channels) {
1170 goto bad_reta_entry;
1172 rss_tbl_new[entry] = grp->reta[grp_idx];
1176 rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE);
1178 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
1181 sfc_adapter_unlock(sa);
1183 rte_free(rss_tbl_new);
1185 SFC_ASSERT(rc >= 0);
1190 static const struct eth_dev_ops sfc_eth_dev_ops = {
1191 .dev_configure = sfc_dev_configure,
1192 .dev_start = sfc_dev_start,
1193 .dev_stop = sfc_dev_stop,
1194 .dev_set_link_up = sfc_dev_set_link_up,
1195 .dev_set_link_down = sfc_dev_set_link_down,
1196 .dev_close = sfc_dev_close,
1197 .promiscuous_enable = sfc_dev_promisc_enable,
1198 .promiscuous_disable = sfc_dev_promisc_disable,
1199 .allmulticast_enable = sfc_dev_allmulti_enable,
1200 .allmulticast_disable = sfc_dev_allmulti_disable,
1201 .link_update = sfc_dev_link_update,
1202 .stats_get = sfc_stats_get,
1203 .xstats_get = sfc_xstats_get,
1204 .xstats_get_names = sfc_xstats_get_names,
1205 .dev_infos_get = sfc_dev_infos_get,
1206 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1207 .mtu_set = sfc_dev_set_mtu,
1208 .rx_queue_start = sfc_rx_queue_start,
1209 .rx_queue_stop = sfc_rx_queue_stop,
1210 .tx_queue_start = sfc_tx_queue_start,
1211 .tx_queue_stop = sfc_tx_queue_stop,
1212 .rx_queue_setup = sfc_rx_queue_setup,
1213 .rx_queue_release = sfc_rx_queue_release,
1214 .rx_queue_count = sfc_rx_queue_count,
1215 .rx_descriptor_done = sfc_rx_descriptor_done,
1216 .tx_queue_setup = sfc_tx_queue_setup,
1217 .tx_queue_release = sfc_tx_queue_release,
1218 .flow_ctrl_get = sfc_flow_ctrl_get,
1219 .flow_ctrl_set = sfc_flow_ctrl_set,
1220 .mac_addr_set = sfc_mac_addr_set,
1221 #if EFSYS_OPT_RX_SCALE
1222 .reta_update = sfc_dev_rss_reta_update,
1223 .reta_query = sfc_dev_rss_reta_query,
1224 .rss_hash_update = sfc_dev_rss_hash_update,
1225 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1227 .set_mc_addr_list = sfc_set_mc_addr_list,
1228 .rxq_info_get = sfc_rx_queue_info_get,
1229 .txq_info_get = sfc_tx_queue_info_get,
1233 sfc_eth_dev_init(struct rte_eth_dev *dev)
1235 struct sfc_adapter *sa = dev->data->dev_private;
1236 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
1238 const efx_nic_cfg_t *encp;
1239 const struct ether_addr *from;
1241 /* Required for logging */
1244 /* Copy PCI device info to the dev->data */
1245 rte_eth_copy_pci_info(dev, pci_dev);
1247 rc = sfc_kvargs_parse(sa);
1249 goto fail_kvargs_parse;
1251 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
1252 sfc_kvarg_bool_handler, &sa->debug_init);
1254 goto fail_kvarg_debug_init;
1256 sfc_log_init(sa, "entry");
1258 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
1259 if (dev->data->mac_addrs == NULL) {
1261 goto fail_mac_addrs;
1264 sfc_adapter_lock_init(sa);
1265 sfc_adapter_lock(sa);
1267 sfc_log_init(sa, "attaching");
1268 rc = sfc_attach(sa);
1272 encp = efx_nic_cfg_get(sa->nic);
1275 * The arguments are really reverse order in comparison to
1276 * Linux kernel. Copy from NIC config to Ethernet device data.
1278 from = (const struct ether_addr *)(encp->enc_mac_addr);
1279 ether_addr_copy(from, &dev->data->mac_addrs[0]);
1281 dev->dev_ops = &sfc_eth_dev_ops;
1282 dev->rx_pkt_burst = &sfc_recv_pkts;
1283 dev->tx_pkt_burst = &sfc_xmit_pkts;
1285 sfc_adapter_unlock(sa);
1287 sfc_log_init(sa, "done");
1291 sfc_adapter_unlock(sa);
1292 sfc_adapter_lock_fini(sa);
1293 rte_free(dev->data->mac_addrs);
1294 dev->data->mac_addrs = NULL;
1297 fail_kvarg_debug_init:
1298 sfc_kvargs_cleanup(sa);
1301 sfc_log_init(sa, "failed %d", rc);
1307 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
1309 struct sfc_adapter *sa = dev->data->dev_private;
1311 sfc_log_init(sa, "entry");
1313 sfc_adapter_lock(sa);
1317 rte_free(dev->data->mac_addrs);
1318 dev->data->mac_addrs = NULL;
1320 dev->dev_ops = NULL;
1321 dev->rx_pkt_burst = NULL;
1322 dev->tx_pkt_burst = NULL;
1324 sfc_kvargs_cleanup(sa);
1326 sfc_adapter_unlock(sa);
1327 sfc_adapter_lock_fini(sa);
1329 sfc_log_init(sa, "done");
1331 /* Required for logging, so cleanup last */
1336 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
1337 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
1338 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
1339 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
1340 { .vendor_id = 0 /* sentinel */ }
1343 static struct eth_driver sfc_efx_pmd = {
1345 .id_table = pci_id_sfc_efx_map,
1347 RTE_PCI_DRV_INTR_LSC |
1348 RTE_PCI_DRV_NEED_MAPPING,
1349 .probe = rte_eth_dev_pci_probe,
1350 .remove = rte_eth_dev_pci_remove,
1352 .eth_dev_init = sfc_eth_dev_init,
1353 .eth_dev_uninit = sfc_eth_dev_uninit,
1354 .dev_private_size = sizeof(struct sfc_adapter),
1357 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
1358 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
1359 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
1360 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
1361 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
1362 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
1363 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);