X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=e4f9f938491002674685070f0ea393b0533374eb;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=d55b532e20dd8ca9e6f6823ee801488149ecd118;hpb=fb9066e479a6e21d851c66c20ec530c0b180247b;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index d55b532e20..e4f9f93849 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -1,42 +1,15 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2016 Intel Corporation */ -#include +#include +#include #include #include #include #include #include +#include #include "fm10k.h" #include "base/fm10k_api.h" @@ -51,10 +24,25 @@ #define MAX_QUERY_SWITCH_STATE_TIMES 10 /* Wait interval to get switch status */ #define WAIT_SWITCH_MSG_US 100000 +/* A period of quiescence for switch */ +#define FM10K_SWITCH_QUIESCE_US 100000 /* Number of chars per uint32 type */ #define CHARS_PER_UINT32 (sizeof(uint32_t)) #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) +/* default 1:1 map from queue ID to interrupt vector ID */ +#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id]) + +/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ +#define MAX_LPORT_NUM 128 +#define GLORT_FD_Q_BASE 0x40 +#define GLORT_PF_MASK 0xFFC0 +#define GLORT_FD_MASK GLORT_PF_MASK +#define GLORT_FD_INDEX GLORT_FD_Q_BASE + +int fm10k_logtype_init; +int fm10k_logtype_driver; + static void fm10k_close_mbx_service(struct fm10k_hw *hw); static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -68,6 +56,58 @@ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, static void fm10k_tx_queue_release(void *queue); static void fm10k_rx_queue_release(void *queue); static void fm10k_set_rx_function(struct rte_eth_dev *dev); +static void fm10k_set_tx_function(struct rte_eth_dev *dev); +static int fm10k_check_ftag(struct rte_devargs *devargs); +static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); + +static void fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev); + +struct fm10k_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = { + {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)}, + {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)}, + {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)}, + {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)}, + {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)}, + {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)}, + {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)}, + {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats, + nodesc_drop)}, +}; + +#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \ + sizeof(fm10k_hw_stats_strings[0])) + +static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)}, + {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)}, +}; + +#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \ + sizeof(fm10k_hw_stats_rx_q_strings[0])) + +static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)}, +}; + +#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \ + sizeof(fm10k_hw_stats_tx_q_strings[0])) + +#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \ + (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS)) +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static void fm10k_mbx_initlock(struct fm10k_hw *hw) @@ -88,6 +128,65 @@ fm10k_mbx_unlock(struct fm10k_hw *hw) rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); } +/* Stubs needed for linkage when vPMD is disabled */ +__rte_weak int +fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev) +{ + return -1; +} + +__rte_weak uint16_t +fm10k_recv_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +__rte_weak uint16_t +fm10k_recv_scattered_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +__rte_weak int +fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq) + +{ + return -1; +} + +__rte_weak void +fm10k_rx_queue_release_mbufs_vec( + __rte_unused struct fm10k_rx_queue *rxq) +{ + return; +} + +__rte_weak void +fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq) +{ + return; +} + +__rte_weak int +fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) +{ + return -1; +} + +__rte_weak uint16_t +fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + /* * reset queue to initial state, allocate software buffers used when starting * device. @@ -98,6 +197,7 @@ fm10k_mbx_unlock(struct fm10k_hw *hw) static inline int rx_queue_reset(struct fm10k_rx_queue *q) { + static const union fm10k_rx_desc zero = {{0} }; uint64_t dma_addr; int i, diag; PMD_INIT_FUNC_TRACE(); @@ -118,6 +218,15 @@ rx_queue_reset(struct fm10k_rx_queue *q) q->hw_ring[i].q.hdr_addr = dma_addr; } + /* initialize extra software ring entries. Space for these extra + * entries is always allocated. + */ + memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf)); + for (i = 0; i < q->nb_fake_desc; ++i) { + q->sw_ring[q->nb_desc + i] = &q->fake_mbuf; + q->hw_ring[q->nb_desc + i] = zero; + } + q->next_dd = 0; q->next_alloc = 0; q->next_trigger = q->alloc_thresh - 1; @@ -143,6 +252,10 @@ rx_queue_clean(struct fm10k_rx_queue *q) for (i = 0; i < q->nb_desc; ++i) q->hw_ring[i] = zero; + /* zero faked descriptors */ + for (i = 0; i < q->nb_fake_desc; ++i) + q->hw_ring[q->nb_desc + i] = zero; + /* vPMD driver has a different way of releasing mbufs. */ if (q->rx_using_sse) { fm10k_rx_queue_release_mbufs_vec(q); @@ -328,7 +441,6 @@ fm10k_check_mq_mode(struct rte_eth_dev *dev) } static const struct fm10k_txq_ops def_txq_ops = { - .release_mbufs = tx_queue_free, .reset = tx_queue_reset, }; @@ -339,8 +451,6 @@ fm10k_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) - PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); /* multipe queue mode checking */ ret = fm10k_check_mq_mode(dev); if (ret != 0) { @@ -349,14 +459,11 @@ fm10k_dev_configure(struct rte_eth_dev *dev) return ret; } + dev->data->scattered_rx = 0; + return 0; } -/* fls = find last set bit = 32 minus the number of leading zeros */ -#ifndef fls -#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x)))) -#endif - static void fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev) { @@ -402,10 +509,11 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev) 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, }; - if (dev->data->nb_rx_queues == 1 || - dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || - dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) + if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); return; + } /* random key is rss_intel_key (default) or user provided (rss_key) */ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL) @@ -458,22 +566,11 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev) } static void -fm10k_dev_logic_port_update(struct rte_eth_dev *dev, - uint16_t nb_lport_old, uint16_t nb_lport_new) +fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t i; - fm10k_mbx_lock(hw); - /* Disable previous logic ports */ - if (nb_lport_old) - hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, - nb_lport_old, false); - /* Enable new logic ports */ - hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, - nb_lport_new, true); - fm10k_mbx_unlock(hw); - for (i = 0; i < nb_lport_new; i++) { /* Set unicast mode by default. App can change * to other mode in other API func. @@ -493,7 +590,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) struct rte_eth_conf *dev_conf = &dev->data->dev_conf; struct fm10k_macvlan_filter_info *macvlan; uint16_t nb_queue_pools = 0; /* pool number in configuration */ - uint16_t nb_lport_new, nb_lport_old; + uint16_t nb_lport_new; macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; @@ -511,14 +608,13 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) if (macvlan->nb_queue_pools == nb_queue_pools) return; - nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1; nb_lport_new = nb_queue_pools ? nb_queue_pools : 1; - fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new); + fm10k_dev_logic_port_update(dev, nb_lport_new); /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */ memset(dev->data->mac_addrs, 0, ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM); - ether_addr_copy((const struct ether_addr *)hw->mac.addr, + ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, &dev->data->mac_addrs[0]); memset(macvlan, 0, sizeof(*macvlan)); macvlan->nb_queue_pools = nb_queue_pools; @@ -555,6 +651,19 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "failed to disable queue %d", i); return -1; } + /* Enable use of FTAG bit in TX descriptor, PFVTCTL + * register is read-only for VF. + */ + if (fm10k_check_ftag(dev->device->devargs)) { + if (hw->mac.type == fm10k_mac_pf) { + FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + PMD_INIT_LOG(DEBUG, "FTAG mode is enabled"); + } else { + PMD_INIT_LOG(ERR, "VF FTAG is not supported."); + return -ENOTSUP; + } + } /* set location and size for descriptor ring */ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), @@ -562,7 +671,15 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_TDBAH(i), base_addr >> (CHAR_BIT * sizeof(uint32_t))); FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); + + /* assign default SGLORT for each TX queue by PF */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); } + + /* set up vector or scalar TX function as appropriate */ + fm10k_set_tx_function(dev); + return 0; } @@ -570,17 +687,37 @@ static int fm10k_dev_rx_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; int i, ret; struct fm10k_rx_queue *rxq; uint64_t base_addr; uint32_t size; uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + uint32_t logic_port = hw->mac.dglort_map; uint16_t buf_size; - - /* Disable RXINT to avoid possible interrupt */ - for (i = 0; i < hw->mac.max_queues; i++) + uint16_t queue_stride = 0; + + /* enable RXINT for interrupt mode */ + i = 0; + if (rte_intr_dp_is_en(intr_handle)) { + for (; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i)); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + } + } + /* Disable other RXINT to avoid possible interrupt */ + for (; i < hw->mac.max_queues; i++) FM10K_WRITE_REG(hw, FM10K_RXINT(i), - 3 << FM10K_RXINT_TIMER_SHIFT); + 3 << FM10K_RXINT_TIMER_SHIFT); /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; ++i) { @@ -615,12 +752,13 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) buf_size -= FM10K_RX_DATABUF_ALIGN; FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), - buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT); + (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) | + FM10K_SRRCTL_LOOPBACK_SUPPRESS); /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || - dev->data->dev_conf.rxmode.enable_scatter) { + rxq->offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t reg; dev->data->scattered_rx = 1; reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); @@ -642,6 +780,18 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) /* Decide the best RX function */ fm10k_set_rx_function(dev); + /* update RX_SGLORT for loopback suppress*/ + if (hw->mac.type != fm10k_mac_pf) + return 0; + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools) + queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools; + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + if (i && queue_stride && !(i % queue_stride)) + logic_port++; + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port); + } + return 0; } @@ -649,51 +799,50 @@ static int fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int err = -1; + int err; uint32_t reg; struct fm10k_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - err = rx_queue_reset(rxq); - if (err == -ENOMEM) { - PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); - return err; - } else if (err == -EINVAL) { - PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" - " %d", err); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; + err = rx_queue_reset(rxq); + if (err == -ENOMEM) { + PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); + return err; + } else if (err == -EINVAL) { + PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" + " %d", err); + return err; + } - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled on real - * hardware, but BEFORE the queue is enabled when using the - * emulation platform. Do it in both places for now and remove - * this comment and the following two register writes when the - * emulation platform is no longer being used. - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled on real + * hardware, but BEFORE the queue is enabled when using the + * emulation platform. Do it in both places for now and remove + * this comment and the following two register writes when the + * emulation platform is no longer being used. + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - /* Set PF ownership flag for PF devices */ - reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); - if (hw->mac.type == fm10k_mac_pf) - reg |= FM10K_RXQCTL_PF; - reg |= FM10K_RXQCTL_ENABLE; - /* enable RX queue */ - FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); - FM10K_WRITE_FLUSH(hw); + /* Set PF ownership flag for PF devices */ + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); + if (hw->mac.type == fm10k_mac_pf) + reg |= FM10K_RXQCTL_PF; + reg |= FM10K_RXQCTL_ENABLE; + /* enable RX queue */ + FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); + FM10K_WRITE_FLUSH(hw); - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - } + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -703,13 +852,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - /* Disable RX queue */ - rx_queue_disable(hw, rx_queue_id); + /* Disable RX queue */ + rx_queue_disable(hw, rx_queue_id); - /* Free mbuf and clean HW ring */ - rx_queue_clean(dev->data->rx_queues[rx_queue_id]); - } + /* Free mbuf and clean HW ring */ + rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -721,27 +869,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /** @todo - this should be defined in the shared code */ #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY; - int err = 0; + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; - - q->ops->reset(q); + q->ops->reset(q); - /* reset head and tail pointers */ - FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); + /* reset head and tail pointers */ + FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); - /* enable TX queue */ - FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), - FM10K_TXDCTL_ENABLE | txdctl); - FM10K_WRITE_FLUSH(hw); - } else - err = -1; + /* enable TX queue */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), + FM10K_TXDCTL_ENABLE | txdctl); + FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -751,10 +895,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - tx_queue_disable(hw, tx_queue_id); - tx_queue_clean(dev->data->tx_queues[tx_queue_id]); - } + tx_queue_disable(hw, tx_queue_id); + tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -774,7 +917,7 @@ fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; fm10k_mbx_lock(hw); @@ -796,7 +939,7 @@ fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->all_multicast == 1) @@ -822,7 +965,7 @@ fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; /* If promiscuous mode is enabled, it doesn't make sense to enable @@ -853,7 +996,7 @@ fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->promiscuous) { @@ -876,24 +1019,32 @@ static void fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t dglortdec, pool_len, rss_len, i; + uint32_t dglortdec, pool_len, rss_len, i, dglortmask; uint16_t nb_queue_pools; struct fm10k_macvlan_filter_info *macvlan; macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); nb_queue_pools = macvlan->nb_queue_pools; - pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0; - rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len; - dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len; - - /* Establish only MAP 0 as valid */ - FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY); + pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0; + rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len; + /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */ + dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len; + dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + hw->mac.dglort_map; + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask); /* Configure VMDQ/RSS DGlort Decoder */ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec); + /* Flow Director configurations, only queue number is valid. */ + dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1); + dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + (hw->mac.dglort_map + GLORT_FD_Q_BASE); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask); + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec); + /* Invalidate all other GLORT entries */ - for (i = 1; i < FM10K_DGLORT_COUNT; i++) + for (i = 2; i < FM10K_DGLORT_COUNT; i++) FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); } @@ -932,6 +1083,9 @@ fm10k_dev_start(struct rte_eth_dev *dev) return diag; } + if (fm10k_dev_rxq_interrupt_setup(dev)) + return -EIO; + diag = fm10k_dev_rx_init(dev); if (diag) { PMD_INIT_LOG(ERR, "RX init failed: %d", diag); @@ -977,12 +1131,17 @@ fm10k_dev_start(struct rte_eth_dev *dev) if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + fm10k_link_update(dev, 0); + return 0; } static void fm10k_dev_stop(struct rte_eth_dev *dev) { + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; int i; PMD_INIT_FUNC_TRACE(); @@ -994,6 +1153,24 @@ fm10k_dev_stop(struct rte_eth_dev *dev) if (dev->data->rx_queues) for (i = 0; i < dev->data->nb_rx_queues; i++) fm10k_dev_rx_queue_stop(dev, i); + + /* Disable datapath event */ + if (rte_intr_dp_is_en(intr_handle)) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), + FM10K_ITR_MASK_SET); + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; } static void @@ -1007,7 +1184,7 @@ fm10k_dev_queue_release(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { struct fm10k_tx_queue *txq = dev->data->tx_queues[i]; - txq->ops->release_mbufs(txq); + tx_queue_free(txq); } } @@ -1021,18 +1198,17 @@ static void fm10k_dev_close(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t nb_lport; - struct fm10k_macvlan_filter_info *macvlan; PMD_INIT_FUNC_TRACE(); - macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); - nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1; fm10k_mbx_lock(hw); hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, - nb_lport, false); + MAX_LPORT_NUM, false); fm10k_mbx_unlock(hw); + /* allow 100ms for device to quiesce */ + rte_delay_us(FM10K_SWITCH_QUIESCE_US); + /* Stop mailbox service first */ fm10k_close_mbx_service(hw); fm10k_dev_stop(dev); @@ -1044,22 +1220,101 @@ static int fm10k_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* The host-interface link is always up. The speed is ~50Gbps per Gen3 - * x8 PCIe interface. For now, we leave the speed undefined since there - * is no 50Gbps Ethernet. */ - dev->data->dev_link.link_speed = 0; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - dev->data->dev_link.link_status = 1; + dev->data->dev_link.link_status = + dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; return 0; } -static void +static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + unsigned i, q; + unsigned count = 0; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", fm10k_hw_stats_strings[count].name); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + count++; + } + } + } + return FM10K_NB_XSTATS; +} + +static int +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i, q, count = 0; + + if (n < FM10K_NB_XSTATS) + return FM10K_NB_XSTATS; + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + fm10k_hw_stats_strings[count].offset); + xstats[count].id = count; + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_rx_q_strings[i].offset); + xstats[count].id = count; + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_tx_q_strings[i].offset); + xstats[count].id = count; + count++; + } + } + + return FM10K_NB_XSTATS; +} + +static int fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - uint64_t ipackets, opackets, ibytes, obytes; + uint64_t ipackets, opackets, ibytes, obytes, imissed; struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_hw_stats *hw_stats = @@ -1070,22 +1325,26 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) fm10k_update_hw_stats(hw, hw_stats); - ipackets = opackets = ibytes = obytes = 0; + ipackets = opackets = ibytes = obytes = imissed = 0; for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && (i < hw->mac.max_queues); ++i) { stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count; stats->q_opackets[i] = hw_stats->q[i].tx_packets.count; stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count; stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count; + stats->q_errors[i] = hw_stats->q[i].rx_drops.count; ipackets += stats->q_ipackets[i]; opackets += stats->q_opackets[i]; ibytes += stats->q_ibytes[i]; obytes += stats->q_obytes[i]; + imissed += stats->q_errors[i]; } stats->ipackets = ipackets; stats->opackets = opackets; stats->ibytes = ibytes; stats->obytes = obytes; + stats->imissed = imissed; + return 0; } static void @@ -1106,6 +1365,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); PMD_INIT_FUNC_TRACE(); @@ -1115,25 +1375,29 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->max_tx_queues = hw->mac.max_queues; dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; dev_info->max_hash_mac_addrs = 0; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pdev->max_vfs; dev_info->vmdq_pool_base = 0; dev_info->vmdq_queue_base = 0; dev_info->max_vmdq_pools = ETH_32_POOLS; dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -1143,6 +1407,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -1153,8 +1418,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -1167,8 +1431,63 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, .nb_max = FM10K_MAX_TX_DESC, .nb_min = FM10K_MIN_TX_DESC, .nb_align = FM10K_MULT_TX_DESC, + .nb_seg_max = FM10K_TX_MAX_SEG, + .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG, }; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == fm10k_recv_pkts || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts) { + static uint32_t ptypes[] = { + /* refers to rx_desc_to_ol_flags() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; + } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) { + static uint32_t ptypes_vec[] = { + /* refers to fm10k_desc_to_pktype_v() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + return ptypes_vec; + } + + return NULL; +} +#else +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + return NULL; } +#endif static int fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) @@ -1185,12 +1504,12 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */ PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode"); - return (-EINVAL); + return -EINVAL; } if (vlan_id > ETH_VLAN_ID_MAX) { PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); - return (-EINVAL); + return -EINVAL; } vid_idx = FM10K_VFTA_IDX(vlan_id); @@ -1202,7 +1521,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) { PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing " "in the VLAN filter table"); - return (-EINVAL); + return -EINVAL; } fm10k_mbx_lock(hw); @@ -1210,7 +1529,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) fm10k_mbx_unlock(hw); if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "VLAN update failed: %d", result); - return (-EIO); + return -EIO; } for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) && @@ -1231,7 +1550,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "MAC address update failed: %d", result); - return (-EIO); + return -EIO; } if (on) { @@ -1244,25 +1563,30 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } -static void -fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask) +static int +fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP)) PMD_INIT_LOG(ERR, "VLAN stripping is " "always on in fm10k"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_INIT_LOG(ERR, "VLAN QinQ is not " "supported in fm10k"); } if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); } + + return 0; } /* Add/Remove a MAC address, and update filters to main VSI */ @@ -1347,9 +1671,9 @@ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, } /* Add a MAC address, and update filters */ -static void +static int fm10k_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { @@ -1358,6 +1682,7 @@ fm10k_macaddr_add(struct rte_eth_dev *dev, macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool); macvlan->mac_vmdq_id[index] = pool; + return 0; } /* Remove a MAC address, and update filters */ @@ -1382,34 +1707,6 @@ check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request) return 0; } -/* - * Create a memzone for hardware descriptor rings. Malloc cannot be used since - * the physical address is required. If the memzone is already created, then - * this function returns a pointer to the existing memzone. - */ -static inline const struct rte_memzone * -allocate_hw_ring(const char *driver_name, const char *ring_name, - uint8_t port_id, uint16_t queue_id, int socket_id, - uint32_t size, uint32_t align) -{ - char name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(name, sizeof(name), "%s_%s_%d_%d_%d", - driver_name, ring_name, port_id, queue_id, socket_id); - - /* return the memzone if it already exists */ - mz = rte_memzone_lookup(name); - if (mz) - return mz; - -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(name, size, socket_id, 0, align, - RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(name, size, socket_id, 0, align); -#endif -} static inline int check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request) @@ -1442,7 +1739,7 @@ handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf) rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q), FM10K_RX_FREE_THRESH_MIN(q), FM10K_RX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->alloc_thresh = rx_free_thresh; @@ -1483,22 +1780,46 @@ mempool_element_size_valid(struct rte_mempool *mp) return 1; } +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_SCATTER); +} + +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_HEADER_SPLIT); +} + static int fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); struct fm10k_rx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); - return (-EINVAL); + return -EINVAL; } /* make sure a valid number of descriptors have been requested */ @@ -1510,7 +1831,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC, FM10K_MULT_RX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1528,27 +1849,29 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ q->mp = mp; q->nb_desc = nb_desc; + q->nb_fake_desc = FM10K_MULT_RX_DESC; q->port_id = dev->data->port_id; q->queue_id = queue_id; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + q->offloads = offloads; if (handle_rxconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", - nb_desc * sizeof(struct rte_mbuf *), - RTE_CACHE_LINE_SIZE, socket_id); + (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1556,21 +1879,17 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, * enough to hold the maximum ring size is requested to allow for * resizing in later calls to the queue setup function. */ - mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring", - dev->data->port_id, queue_id, socket_id, - FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC); + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id, + FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC, + socket_id); if (mz == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; -#ifdef RTE_LIBRTE_XEN_DOM0 - q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); -#else - q->hw_ring_phys_addr = mz->phys_addr; -#endif + q->hw_ring_phys_addr = mz->iova; /* Check if number of descs satisfied Vector requirement */ if (!rte_is_power_of_2(nb_desc)) { @@ -1619,7 +1938,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q), FM10K_TX_FREE_THRESH_MIN(q), FM10K_TX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->free_thresh = tx_free_thresh; @@ -1643,7 +1962,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q), FM10K_TX_RS_THRESH_MIN(q), FM10K_TX_RS_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->rs_thresh = tx_rs_thresh; @@ -1651,6 +1970,25 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) return 0; } +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO); +} + static int fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1659,9 +1997,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_tx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + /* make sure a valid number of descriptors have been requested */ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, FM10K_MULT_TX_DESC, nb_desc)) { @@ -1671,7 +2012,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC, FM10K_MULT_TX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1682,7 +2023,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, if (dev->data->tx_queues[queue_id] != NULL) { struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id]; - txq->ops->release_mbufs(txq); + tx_queue_free(txq); dev->data->tx_queues[queue_id] = NULL; } @@ -1691,18 +2032,19 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ q->nb_desc = nb_desc; q->port_id = dev->data->port_id; q->queue_id = queue_id; + q->offloads = offloads; q->ops = &def_txq_ops; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; if (handle_txconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", @@ -1711,7 +2053,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1719,21 +2061,17 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, * enough to hold the maximum ring size is requested to allow for * resizing in later calls to the queue setup function. */ - mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring", - dev->data->port_id, queue_id, socket_id, - FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC); + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id, + FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC, + socket_id); if (mz == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; -#ifdef RTE_LIBRTE_XEN_DOM0 - q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); -#else - q->hw_ring_phys_addr = mz->phys_addr; -#endif + q->hw_ring_phys_addr = mz->iova; /* * allocate memory for the RS bit tracker. Enough slots to hold the @@ -1747,7 +2085,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } dev->data->tx_queues[queue_id] = q; @@ -1760,7 +2098,7 @@ fm10k_tx_queue_release(void *queue) struct fm10k_tx_queue *q = queue; PMD_INIT_FUNC_TRACE(); - q->ops->release_mbufs(q); + tx_queue_free(q); } static int @@ -1866,8 +2204,8 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * - FM10K_RSSRK_ENTRIES_PER_REG) + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) return -EINVAL; if (hf == 0) @@ -1909,8 +2247,8 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * - FM10K_RSSRK_ENTRIES_PER_REG) + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) return -EINVAL; if (key != NULL) @@ -1941,14 +2279,14 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; /* Bind all local non-queue interrupt to vector 0 */ - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); /* Enable misc causes */ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -1972,14 +2310,14 @@ fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t int_map = FM10K_INT_MAP_DISABLE; - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); /* Disable misc causes */ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | @@ -2003,7 +2341,7 @@ fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; /* Bind all local non-queue interrupt to vector 0 */ - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; /* Only INT 0 available, other 15 are reserved. */ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); @@ -2020,7 +2358,7 @@ fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t int_map = FM10K_INT_MAP_DISABLE; - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; /* Only INT 0 available, other 15 are reserved. */ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); @@ -2030,6 +2368,100 @@ fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } +static int +fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + + /* Enable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + rte_intr_enable(&pdev->intr_handle); + return 0; +} + +static int +fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + + /* Disable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), + FM10K_ITR_MASK_SET); + return 0; +} + +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + uint32_t intr_vector, vec; + uint16_t queue_id; + int result = 0; + + /* fm10k needs one separate interrupt for mailbox, + * so only drivers which support multiple interrupt vectors + * e.g. vfio-pci can work for fm10k interrupt mode + */ + if (!rte_intr_cap_multiple(intr_handle) || + dev->data->dev_conf.intr_conf.rxq == 0) + return result; + + intr_vector = dev->data->nb_rx_queues; + + /* disable interrupt first */ + rte_intr_disable(intr_handle); + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_disable_intr_pf(dev); + else + fm10k_dev_disable_intr_vf(dev); + + if (rte_intr_efd_enable(intr_handle, intr_vector)) { + PMD_INIT_LOG(ERR, "Failed to init event fd"); + result = -EIO; + } + + if (rte_intr_dp_is_en(intr_handle) && !result) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec) { + for (queue_id = 0, vec = FM10K_RX_VEC_START; + queue_id < dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < intr_handle->nb_efd - 1 + + FM10K_RX_VEC_START) + vec++; + } + } else { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + rte_intr_efd_disable(intr_handle); + result = -ENOMEM; + } + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_enable_intr_pf(dev); + else + fm10k_dev_enable_intr_vf(dev); + rte_intr_enable(intr_handle); + hw->mac.ops.update_int_moderator(hw); + return result; +} + static int fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) { @@ -2139,13 +2571,15 @@ error: * void */ static void -fm10k_dev_interrupt_handler_pf( - __rte_unused struct rte_intr_handle *handle, - void *param) +fm10k_dev_interrupt_handler_pf(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t cause, status; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + int status_mbx; + s32 err; if (hw->mac.type != fm10k_mac_pf) return; @@ -2162,14 +2596,69 @@ fm10k_dev_interrupt_handler_pf( if (cause & FM10K_EICR_SWITCHNOTREADY) PMD_INIT_LOG(ERR, "INT: Switch is not ready"); - if (cause & FM10K_EICR_SWITCHREADY) + if (cause & FM10K_EICR_SWITCHREADY) { PMD_INIT_LOG(INFO, "INT: Switch is ready"); + if (dev_info->sm_down == 1) { + fm10k_mbx_lock(hw); + + /* For recreating logical ports */ + status_mbx = hw->mac.ops.update_lport_state(hw, + hw->mac.dglort_map, MAX_LPORT_NUM, 1); + if (status_mbx == FM10K_SUCCESS) + PMD_INIT_LOG(INFO, + "INT: Recreated Logical port"); + else + PMD_INIT_LOG(INFO, + "INT: Logical ports weren't recreated"); + + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + false); + + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical + * ports that have been created, leave to the + * application to fully recover Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } + } /* Handle mailbox message */ fm10k_mbx_lock(hw); - hw->mbx.ops.process(hw, &hw->mbx); + err = hw->mbx.ops.process(hw, &hw->mbx); fm10k_mbx_unlock(hw); + if (err == FM10K_ERR_RESET_REQUESTED) { + PMD_INIT_LOG(INFO, "INT: Switch is down"); + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + /* Handle SRAM error */ if (cause & FM10K_EICR_SRAMERROR) { PMD_INIT_LOG(ERR, "INT: SRAM error on PEP"); @@ -2191,7 +2680,7 @@ fm10k_dev_interrupt_handler_pf( FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(dev->intr_handle); } /** @@ -2206,12 +2695,15 @@ fm10k_dev_interrupt_handler_pf( * void */ static void -fm10k_dev_interrupt_handler_vf( - __rte_unused struct rte_intr_handle *handle, - void *param) +fm10k_dev_interrupt_handler_vf(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + const enum fm10k_mbx_state state = mbx->state; + int status_mbx; if (hw->mac.type != fm10k_mac_vf) return; @@ -2221,11 +2713,54 @@ fm10k_dev_interrupt_handler_vf( hw->mbx.ops.process(hw, &hw->mbx); fm10k_mbx_unlock(hw); + if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) { + PMD_INIT_LOG(INFO, "INT: Switch has gone down"); + + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); + fm10k_mbx_unlock(hw); + + /* Setting reset flag */ + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (dev_info->sm_down == 1 && + hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) { + PMD_INIT_LOG(INFO, "INT: Switch has gone up"); + fm10k_mbx_lock(hw); + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + fm10k_vlan_filter_set(dev, hw->mac.default_vid, false); + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical ports that + * have been created, leave to the application to fully recover + * Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + /* Re-enable interrupt from device side */ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(dev->intr_handle); } /* Mailbox message handler in VF */ @@ -2236,29 +2771,16 @@ static const struct fm10k_msg_data fm10k_msgdata_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -/* Mailbox message handler in PF */ -static const struct fm10k_msg_data fm10k_msgdata_pf[] = { - FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), - FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), - FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - static int fm10k_setup_mbx_service(struct fm10k_hw *hw) { - int err; + int err = 0; /* Initialize mailbox lock */ fm10k_mbx_initlock(hw); /* Replace default message handler with new ones */ - if (hw->mac.type == fm10k_mac_pf) - err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf); - else + if (hw->mac.type == fm10k_mac_vf) err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf); if (err) { @@ -2287,9 +2809,13 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .allmulticast_enable = fm10k_dev_allmulticast_enable, .allmulticast_disable = fm10k_dev_allmulticast_disable, .stats_get = fm10k_stats_get, + .xstats_get = fm10k_xstats_get, + .xstats_get_names = fm10k_xstats_get_names, .stats_reset = fm10k_stats_reset, + .xstats_reset = fm10k_stats_reset, .link_update = fm10k_link_update, .dev_infos_get = fm10k_dev_infos_get, + .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get, .vlan_filter_set = fm10k_vlan_filter_set, .vlan_offload_set = fm10k_vlan_offload_set, .mac_addr_add = fm10k_macaddr_add, @@ -2302,37 +2828,168 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .rx_queue_release = fm10k_rx_queue_release, .tx_queue_setup = fm10k_tx_queue_setup, .tx_queue_release = fm10k_tx_queue_release, + .rx_queue_count = fm10k_dev_rx_queue_count, + .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_descriptor_status = fm10k_dev_rx_descriptor_status, + .tx_descriptor_status = fm10k_dev_tx_descriptor_status, + .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, .reta_update = fm10k_reta_update, .reta_query = fm10k_reta_query, .rss_hash_update = fm10k_rss_hash_update, .rss_hash_conf_get = fm10k_rss_hash_conf_get, }; +static int ftag_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +fm10k_check_ftag(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *ftag_key = "enable_ftag"; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, ftag_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* FTAG is enabled when there's key-value pair: enable_ftag=1 */ + if (rte_kvargs_process(kvlist, ftag_key, + ftag_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static uint16_t +fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +static void __attribute__((cold)) +fm10k_set_tx_function(struct rte_eth_dev *dev) +{ + struct fm10k_tx_queue *txq; + int i; + int use_sse = 1; + uint16_t tx_ftag_en = 0; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* primary process has set the ftag flag and offloads */ + txq = dev->data->tx_queues[0]; + if (fm10k_tx_vec_condition_check(txq)) { + dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } else { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + } + return; + } + + if (fm10k_check_ftag(dev->device->devargs)) + tx_ftag_en = 1; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->tx_ftag_en = tx_ftag_en; + /* Check if Vector Tx is satisfied */ + if (fm10k_tx_vec_condition_check(txq)) + use_sse = 0; + } + + if (use_sse) { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + fm10k_txq_vec_setup(txq); + } + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + } else { + dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } +} + static void __attribute__((cold)) fm10k_set_rx_function(struct rte_eth_dev *dev) { - struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); uint16_t i, rx_using_sse; + uint16_t rx_ftag_en = 0; + + if (fm10k_check_ftag(dev->device->devargs)) + rx_ftag_en = 1; /* In order to allow Vector Rx there are a few configuration * conditions to be met. */ - if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) { + if (!fm10k_rx_vec_condition_check(dev) && + dev_info->rx_vec_allowed && !rx_ftag_en) { if (dev->data->scattered_rx) dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec; else dev->rx_pkt_burst = fm10k_recv_pkts_vec; } else if (dev->data->scattered_rx) dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + else + dev->rx_pkt_burst = fm10k_recv_pkts; rx_using_sse = (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec || dev->rx_pkt_burst == fm10k_recv_pkts_vec); + if (rx_using_sse) + PMD_INIT_LOG(DEBUG, "Use vector Rx func"); + else + PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + for (i = 0; i < dev->data->nb_rx_queues; i++) { struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; + rxq->rx_ftag_en = rx_ftag_en; } } @@ -2340,7 +2997,8 @@ static void fm10k_params_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but * there is no way to get link status without reading BAR4. Until this @@ -2355,13 +3013,16 @@ fm10k_params_init(struct rte_eth_dev *dev) hw->bus.payload = fm10k_bus_payload_256; info->rx_vec_allowed = true; + info->sm_down = false; } static int eth_fm10k_dev_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int diag; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + int diag, i; struct fm10k_macvlan_filter_info *macvlan; PMD_INIT_FUNC_TRACE(); @@ -2369,21 +3030,30 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) dev->dev_ops = &fm10k_eth_dev_ops; dev->rx_pkt_burst = &fm10k_recv_pkts; dev->tx_pkt_burst = &fm10k_xmit_pkts; + dev->tx_pkt_prepare = &fm10k_prep_pkts; - /* only initialize in the primary process */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + /* + * Primary process does the whole initialization, for secondary + * processes, we just select the same Rx and Tx function as primary. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + fm10k_set_rx_function(dev); + fm10k_set_tx_function(dev); return 0; + } + + rte_eth_copy_pci_info(dev, pdev); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); memset(macvlan, 0, sizeof(*macvlan)); /* Vendor and Device ID need to be set before init of shared code */ memset(hw, 0, sizeof(*hw)); - hw->device_id = dev->pci_dev->id.device_id; - hw->vendor_id = dev->pci_dev->id.vendor_id; - hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; - hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->device_id = pdev->id.device_id; + hw->vendor_id = pdev->id.vendor_id; + hw->subsystem_device_id = pdev->id.subsystem_device_id; + hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id; hw->revision_id = 0; - hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr; + hw->hw_addr = (void *)pdev->mem_resource[0].addr; if (hw->hw_addr == NULL) { PMD_INIT_LOG(ERR, "Bad mem resource." " Try to blacklist unused devices."); @@ -2420,7 +3090,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) diag = fm10k_read_mac_addr(hw); - ether_addr_copy((const struct ether_addr *)hw->mac.addr, + ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, &dev->data->mac_addrs[0]); if (diag != FM10K_SUCCESS || @@ -2429,7 +3099,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /* Generate a random addr */ eth_random_addr(hw->mac.addr); memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); - ether_addr_copy((const struct ether_addr *)hw->mac.addr, + ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, &dev->data->mac_addrs[0]); } @@ -2453,27 +3123,26 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { /* register callback func to eal lib */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(intr_handle, fm10k_dev_interrupt_handler_pf, (void *)dev); /* enable MISC interrupt */ fm10k_dev_enable_intr_pf(dev); } else { /* VF */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(intr_handle, fm10k_dev_interrupt_handler_vf, (void *)dev); fm10k_dev_enable_intr_vf(dev); } - /* Enable uio intr after callback registered */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + /* Enable intr after callback registered */ + rte_intr_enable(intr_handle); hw->mac.ops.update_int_moderator(hw); /* Make sure Switch Manager is ready before going forward. */ if (hw->mac.type == fm10k_mac_pf) { int switch_ready = 0; - int i; for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { fm10k_mbx_lock(hw); @@ -2500,7 +3169,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) */ fm10k_mbx_lock(hw); /* Enable port first */ - hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); /* Set unicast mode by default. App can change to other mode in other * API func. @@ -2510,6 +3180,21 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) fm10k_mbx_unlock(hw); + /* Make sure default VID is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + if (hw->mac.default_vid) + break; + /* Delay some time to acquire async port VLAN info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); + return -1; + } + } + /* Add default mac address */ fm10k_MAC_filter_set(dev, hw->mac.addr, true, MAIN_VSI_POOL_NUMBER); @@ -2521,7 +3206,8 @@ static int eth_fm10k_dev_uninit(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; PMD_INIT_FUNC_TRACE(); /* only uninitialize in the primary process */ @@ -2536,7 +3222,7 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) dev->tx_pkt_burst = NULL; /* disable uio/vfio intr */ - rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_disable(intr_handle); /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { @@ -2544,25 +3230,29 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) fm10k_dev_disable_intr_pf(dev); /* unregister callback func to eal lib */ - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_pf, (void *)dev); } else { /* disable interrupt */ fm10k_dev_disable_intr_vf(dev); - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_vf, (void *)dev); } - /* free mac memory */ - if (dev->data->mac_addrs) { - rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - } + return 0; +} - memset(hw, 0, sizeof(*hw)); +static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct fm10k_adapter), eth_fm10k_dev_init); +} - return 0; +static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit); } /* @@ -2570,40 +3260,30 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) * and SRIOV-VF devices. */ static const struct rte_pci_id pci_id_fm10k_map[] = { -#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#include "rte_pci_dev_ids.h" + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) }, { .vendor_id = 0, /* sentinel */ }, }; -static struct eth_driver rte_pmd_fm10k = { - .pci_drv = { - .name = "rte_pmd_fm10k", - .id_table = pci_id_fm10k_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = eth_fm10k_dev_init, - .eth_dev_uninit = eth_fm10k_dev_uninit, - .dev_private_size = sizeof(struct fm10k_adapter), +static struct rte_pci_driver rte_pmd_fm10k = { + .id_table = pci_id_fm10k_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, + .probe = eth_fm10k_pci_probe, + .remove = eth_fm10k_pci_remove, }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI FM10K devices. - */ -static int -rte_pmd_fm10k_init(__rte_unused const char *name, - __rte_unused const char *params) +RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k); +RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); +RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(fm10k_init_log) { - PMD_INIT_FUNC_TRACE(); - rte_eth_driver_register(&rte_pmd_fm10k); - return 0; + fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init"); + if (fm10k_logtype_init >= 0) + rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE); + fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver"); + if (fm10k_logtype_driver >= 0) + rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE); } - -static struct rte_driver rte_fm10k_driver = { - .type = PMD_PDEV, - .init = rte_pmd_fm10k_init, -}; - -PMD_REGISTER_DRIVER(rte_fm10k_driver);