X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=3ff1b0e0f80089a8b8256b3cced5843d4332def1;hb=050316a88313e2cc0ac1c9155a04143cbd96a52e;hp=372564bc0a6655e69c26b38e5c7f6f02637561f2;hpb=b961fe9344ddfb7909b80376e29add4728381f85;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 372564bc0a..3ff1b0e0f8 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -1,37 +1,9 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2016 Intel Corporation */ -#include +#include +#include #include #include #include @@ -53,13 +25,13 @@ /* Wait interval to get switch status */ #define WAIT_SWITCH_MSG_US 100000 /* A period of quiescence for switch */ -#define FM10K_SWITCH_QUIESCE_US 10000 +#define FM10K_SWITCH_QUIESCE_US 100000 /* Number of chars per uint32 type */ #define CHARS_PER_UINT32 (sizeof(uint32_t)) #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) /* default 1:1 map from queue ID to interrupt vector ID */ -#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id]) +#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id]) /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ #define MAX_LPORT_NUM 128 @@ -68,6 +40,9 @@ #define GLORT_FD_MASK GLORT_PF_MASK #define GLORT_FD_INDEX GLORT_FD_Q_BASE +int fm10k_logtype_init; +int fm10k_logtype_driver; + static void fm10k_close_mbx_service(struct fm10k_hw *hw); static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -83,6 +58,14 @@ static void fm10k_rx_queue_release(void *queue); static void fm10k_set_rx_function(struct rte_eth_dev *dev); static void fm10k_set_tx_function(struct rte_eth_dev *dev); static int fm10k_check_ftag(struct rte_devargs *devargs); +static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); + +static void fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev); struct fm10k_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -197,9 +180,9 @@ fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) } uint16_t __attribute__((weak)) -fm10k_xmit_pkts_vec(__rte_unused void *tx_queue, - __rte_unused struct rte_mbuf **tx_pkts, - __rte_unused uint16_t nb_pkts) +fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) { return 0; } @@ -468,8 +451,10 @@ fm10k_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) == 0) PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); + /* multipe queue mode checking */ ret = fm10k_check_mq_mode(dev); if (ret != 0) { @@ -478,6 +463,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev) return ret; } + dev->data->scattered_rx = 0; + return 0; } @@ -531,9 +518,8 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev) 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, }; - if (dev->data->nb_rx_queues == 1 || - dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || - dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); return; } @@ -677,7 +663,7 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) /* Enable use of FTAG bit in TX descriptor, PFVTCTL * register is read-only for VF. */ - if (fm10k_check_ftag(dev->pci_dev->device.devargs)) { + if (fm10k_check_ftag(dev->device->devargs)) { if (hw->mac.type == fm10k_mac_pf) { FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), FM10K_PFVTCTL_FTAG_DESC_ENABLE); @@ -695,8 +681,9 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) base_addr >> (CHAR_BIT * sizeof(uint32_t))); FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); - /* assign default SGLORT for each TX queue */ - FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); + /* assign default SGLORT for each TX queue by PF */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); } /* set up vector or scalar TX function as appropriate */ @@ -710,7 +697,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_macvlan_filter_info *macvlan; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; int i, ret; struct fm10k_rx_queue *rxq; uint64_t base_addr; @@ -724,13 +712,13 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) i = 0; if (rte_intr_dp_is_en(intr_handle)) { for (; i < dev->data->nb_rx_queues; i++) { - FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i)); + FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i)); if (hw->mac.type == fm10k_mac_pf) - FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); else - FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); } @@ -779,7 +767,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || - dev->data->dev_conf.rxmode.enable_scatter) { + rxq->offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t reg; dev->data->scattered_rx = 1; reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); @@ -1163,6 +1151,8 @@ fm10k_dev_start(struct rte_eth_dev *dev) if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + fm10k_link_update(dev, 0); + return 0; } @@ -1170,7 +1160,8 @@ static void fm10k_dev_stop(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; int i; PMD_INIT_FUNC_TRACE(); @@ -1189,10 +1180,10 @@ fm10k_dev_stop(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_RXINT(i), 3 << FM10K_RXINT_TIMER_SHIFT); if (hw->mac.type == fm10k_mac_pf) - FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), FM10K_ITR_MASK_SET); else - FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), FM10K_ITR_MASK_SET); } } @@ -1235,7 +1226,7 @@ fm10k_dev_close(struct rte_eth_dev *dev) MAX_LPORT_NUM, false); fm10k_mbx_unlock(hw); - /* allow 10ms for device to quiesce */ + /* allow 100ms for device to quiesce */ rte_delay_us(FM10K_SWITCH_QUIESCE_US); /* Stop mailbox service first */ @@ -1249,14 +1240,15 @@ static int fm10k_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* The host-interface link is always up. The speed is ~50Gbps per Gen3 - * x8 PCIe interface. For now, we leave the speed undefined since there - * is no 50Gbps Ethernet. */ - dev->data->dev_link.link_speed = 0; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - dev->data->dev_link.link_status = ETH_LINK_UP; + dev->data->dev_link.link_status = + dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; return 0; } @@ -1314,6 +1306,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + fm10k_hw_stats_strings[count].offset); + xstats[count].id = count; count++; } @@ -1323,12 +1316,14 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_rx_q_strings[i].offset); + xstats[count].id = count; count++; } for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_tx_q_strings[i].offset); + xstats[count].id = count; count++; } } @@ -1336,7 +1331,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return FM10K_NB_XSTATS; } -static void +static int fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { uint64_t ipackets, opackets, ibytes, obytes; @@ -1366,6 +1361,7 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->opackets = opackets; stats->ibytes = ibytes; stats->obytes = obytes; + return 0; } static void @@ -1386,6 +1382,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); PMD_INIT_FUNC_TRACE(); @@ -1395,22 +1392,17 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->max_tx_queues = hw->mac.max_queues; dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; dev_info->max_hash_mac_addrs = 0; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pdev->max_vfs; dev_info->vmdq_pool_base = 0; dev_info->vmdq_queue_base = 0; dev_info->max_vmdq_pools = ETH_32_POOLS; dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; @@ -1423,6 +1415,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -1433,7 +1426,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), - .txq_flags = FM10K_SIMPLE_TX_FLAG, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -1446,6 +1439,8 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, .nb_max = FM10K_MAX_TX_DESC, .nb_min = FM10K_MIN_TX_DESC, .nb_align = FM10K_MULT_TX_DESC, + .nb_seg_max = FM10K_TX_MAX_SEG, + .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG, }; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | @@ -1576,25 +1571,30 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } -static void -fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask) +static int +fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP)) PMD_INIT_LOG(ERR, "VLAN stripping is " "always on in fm10k"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_INIT_LOG(ERR, "VLAN QinQ is not " "supported in fm10k"); } if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); } + + return 0; } /* Add/Remove a MAC address, and update filters to main VSI */ @@ -1679,7 +1679,7 @@ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, } /* Add a MAC address, and update filters */ -static void +static int fm10k_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, @@ -1690,6 +1690,7 @@ fm10k_macaddr_add(struct rte_eth_dev *dev, macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool); macvlan->mac_vmdq_id[index] = pool; + return 0; } /* Remove a MAC address, and update filters */ @@ -1787,18 +1788,43 @@ mempool_element_size_valid(struct rte_mempool *mp) return 1; } +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_SCATTER); +} + +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT); +} + static int fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); struct fm10k_rx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); @@ -1843,6 +1869,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->queue_id = queue_id; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + q->offloads = offloads; if (handle_rxconf(q, conf)) return -EINVAL; @@ -1871,7 +1898,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, return -ENOMEM; } q->hw_ring = mz->addr; - q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + q->hw_ring_phys_addr = mz->iova; /* Check if number of descs satisfied Vector requirement */ if (!rte_is_power_of_2(nb_desc)) { @@ -1952,6 +1979,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) return 0; } +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO); +} + static int fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1960,9 +2005,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_tx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + /* make sure a valid number of descriptors have been requested */ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, FM10K_MULT_TX_DESC, nb_desc)) { @@ -1999,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->nb_desc = nb_desc; q->port_id = dev->data->port_id; q->queue_id = queue_id; - q->txq_flags = conf->txq_flags; + q->offloads = offloads; q->ops = &def_txq_ops; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; @@ -2031,7 +2079,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, return -ENOMEM; } q->hw_ring = mz->addr; - q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + q->hw_ring_phys_addr = mz->iova; /* * allocate memory for the RS bit tracker. Enough slots to hold the @@ -2332,15 +2380,16 @@ static int fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); /* Enable ITR */ if (hw->mac.type == fm10k_mac_pf) - FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); else - FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(&pdev->intr_handle); return 0; } @@ -2348,13 +2397,14 @@ static int fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); /* Disable ITR */ if (hw->mac.type == fm10k_mac_pf) - FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), FM10K_ITR_MASK_SET); else - FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), FM10K_ITR_MASK_SET); return 0; } @@ -2363,7 +2413,8 @@ static int fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; uint32_t intr_vector, vec; uint16_t queue_id; int result = 0; @@ -2379,7 +2430,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) intr_vector = dev->data->nb_rx_queues; /* disable interrupt first */ - rte_intr_disable(&dev->pci_dev->intr_handle); + rte_intr_disable(intr_handle); if (hw->mac.type == fm10k_mac_pf) fm10k_dev_disable_intr_pf(dev); else @@ -2414,7 +2465,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) fm10k_dev_enable_intr_pf(dev); else fm10k_dev_enable_intr_vf(dev); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); hw->mac.ops.update_int_moderator(hw); return result; } @@ -2528,13 +2579,15 @@ error: * void */ static void -fm10k_dev_interrupt_handler_pf( - __rte_unused struct rte_intr_handle *handle, - void *param) +fm10k_dev_interrupt_handler_pf(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t cause, status; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + int status_mbx; + s32 err; if (hw->mac.type != fm10k_mac_pf) return; @@ -2551,14 +2604,69 @@ fm10k_dev_interrupt_handler_pf( if (cause & FM10K_EICR_SWITCHNOTREADY) PMD_INIT_LOG(ERR, "INT: Switch is not ready"); - if (cause & FM10K_EICR_SWITCHREADY) + if (cause & FM10K_EICR_SWITCHREADY) { PMD_INIT_LOG(INFO, "INT: Switch is ready"); + if (dev_info->sm_down == 1) { + fm10k_mbx_lock(hw); + + /* For recreating logical ports */ + status_mbx = hw->mac.ops.update_lport_state(hw, + hw->mac.dglort_map, MAX_LPORT_NUM, 1); + if (status_mbx == FM10K_SUCCESS) + PMD_INIT_LOG(INFO, + "INT: Recreated Logical port"); + else + PMD_INIT_LOG(INFO, + "INT: Logical ports weren't recreated"); + + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + false); + + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical + * ports that have been created, leave to the + * application to fully recover Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } + } /* Handle mailbox message */ fm10k_mbx_lock(hw); - hw->mbx.ops.process(hw, &hw->mbx); + err = hw->mbx.ops.process(hw, &hw->mbx); fm10k_mbx_unlock(hw); + if (err == FM10K_ERR_RESET_REQUESTED) { + PMD_INIT_LOG(INFO, "INT: Switch is down"); + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + /* Handle SRAM error */ if (cause & FM10K_EICR_SRAMERROR) { PMD_INIT_LOG(ERR, "INT: SRAM error on PEP"); @@ -2580,7 +2688,7 @@ fm10k_dev_interrupt_handler_pf( FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(dev->intr_handle); } /** @@ -2595,12 +2703,15 @@ fm10k_dev_interrupt_handler_pf( * void */ static void -fm10k_dev_interrupt_handler_vf( - __rte_unused struct rte_intr_handle *handle, - void *param) +fm10k_dev_interrupt_handler_vf(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + const enum fm10k_mbx_state state = mbx->state; + int status_mbx; if (hw->mac.type != fm10k_mac_vf) return; @@ -2610,11 +2721,54 @@ fm10k_dev_interrupt_handler_vf( hw->mbx.ops.process(hw, &hw->mbx); fm10k_mbx_unlock(hw); + if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) { + PMD_INIT_LOG(INFO, "INT: Switch has gone down"); + + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); + fm10k_mbx_unlock(hw); + + /* Setting reset flag */ + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (dev_info->sm_down == 1 && + hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) { + PMD_INIT_LOG(INFO, "INT: Switch has gone up"); + fm10k_mbx_lock(hw); + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + fm10k_vlan_filter_set(dev, hw->mac.default_vid, false); + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical ports that + * have been created, leave to the application to fully recover + * Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + /* Re-enable interrupt from device side */ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); /* Re-enable interrupt from host side */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(dev->intr_handle); } /* Mailbox message handler in VF */ @@ -2728,6 +2882,28 @@ fm10k_check_ftag(struct rte_devargs *devargs) return 1; } +static uint16_t +fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + static void __attribute__((cold)) fm10k_set_tx_function(struct rte_eth_dev *dev) { @@ -2736,7 +2912,22 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) int use_sse = 1; uint16_t tx_ftag_en = 0; - if (fm10k_check_ftag(dev->pci_dev->device.devargs)) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* primary process has set the ftag flag and offloads */ + txq = dev->data->tx_queues[0]; + if (fm10k_tx_vec_condition_check(txq)) { + dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } else { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + } + return; + } + + if (fm10k_check_ftag(dev->device->devargs)) tx_ftag_en = 1; for (i = 0; i < dev->data->nb_tx_queues; i++) { @@ -2754,8 +2945,10 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) fm10k_txq_vec_setup(txq); } dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; } else { dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; PMD_INIT_LOG(DEBUG, "Use regular Tx func"); } } @@ -2763,11 +2956,12 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) static void __attribute__((cold)) fm10k_set_rx_function(struct rte_eth_dev *dev) { - struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); uint16_t i, rx_using_sse; uint16_t rx_ftag_en = 0; - if (fm10k_check_ftag(dev->pci_dev->device.devargs)) + if (fm10k_check_ftag(dev->device->devargs)) rx_ftag_en = 1; /* In order to allow Vector Rx there are a few configuration @@ -2793,6 +2987,9 @@ fm10k_set_rx_function(struct rte_eth_dev *dev) else PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + for (i = 0; i < dev->data->nb_rx_queues; i++) { struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; @@ -2805,7 +3002,8 @@ static void fm10k_params_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_dev_info *info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but * there is no way to get link status without reading BAR4. Until this @@ -2826,6 +3024,8 @@ static int eth_fm10k_dev_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; int diag, i; struct fm10k_macvlan_filter_info *macvlan; @@ -2834,23 +3034,30 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) dev->dev_ops = &fm10k_eth_dev_ops; dev->rx_pkt_burst = &fm10k_recv_pkts; dev->tx_pkt_burst = &fm10k_xmit_pkts; + dev->tx_pkt_prepare = &fm10k_prep_pkts; - /* only initialize in the primary process */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + /* + * Primary process does the whole initialization, for secondary + * processes, we just select the same Rx and Tx function as primary. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + fm10k_set_rx_function(dev); + fm10k_set_tx_function(dev); return 0; + } - rte_eth_copy_pci_info(dev, dev->pci_dev); + rte_eth_copy_pci_info(dev, pdev); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); memset(macvlan, 0, sizeof(*macvlan)); /* Vendor and Device ID need to be set before init of shared code */ memset(hw, 0, sizeof(*hw)); - hw->device_id = dev->pci_dev->id.device_id; - hw->vendor_id = dev->pci_dev->id.vendor_id; - hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; - hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->device_id = pdev->id.device_id; + hw->vendor_id = pdev->id.vendor_id; + hw->subsystem_device_id = pdev->id.subsystem_device_id; + hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id; hw->revision_id = 0; - hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr; + hw->hw_addr = (void *)pdev->mem_resource[0].addr; if (hw->hw_addr == NULL) { PMD_INIT_LOG(ERR, "Bad mem resource." " Try to blacklist unused devices."); @@ -2920,20 +3127,20 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { /* register callback func to eal lib */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(intr_handle, fm10k_dev_interrupt_handler_pf, (void *)dev); /* enable MISC interrupt */ fm10k_dev_enable_intr_pf(dev); } else { /* VF */ - rte_intr_callback_register(&(dev->pci_dev->intr_handle), + rte_intr_callback_register(intr_handle, fm10k_dev_interrupt_handler_vf, (void *)dev); fm10k_dev_enable_intr_vf(dev); } /* Enable intr after callback registered */ - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); hw->mac.ops.update_int_moderator(hw); @@ -3003,7 +3210,8 @@ static int eth_fm10k_dev_uninit(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; PMD_INIT_FUNC_TRACE(); /* only uninitialize in the primary process */ @@ -3018,7 +3226,7 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) dev->tx_pkt_burst = NULL; /* disable uio/vfio intr */ - rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_disable(intr_handle); /*PF/VF has different interrupt handling mechanism */ if (hw->mac.type == fm10k_mac_pf) { @@ -3026,13 +3234,13 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) fm10k_dev_disable_intr_pf(dev); /* unregister callback func to eal lib */ - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_pf, (void *)dev); } else { /* disable interrupt */ fm10k_dev_disable_intr_vf(dev); - rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + rte_intr_callback_unregister(intr_handle, fm10k_dev_interrupt_handler_vf, (void *)dev); } @@ -3047,6 +3255,18 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) return 0; } +static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct fm10k_adapter), eth_fm10k_dev_init); +} + +static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit); +} + /* * The set of PCI devices this driver supports. This driver will enable both PF * and SRIOV-VF devices. @@ -3058,18 +3278,26 @@ static const struct rte_pci_id pci_id_fm10k_map[] = { { .vendor_id = 0, /* sentinel */ }, }; -static struct eth_driver rte_pmd_fm10k = { - .pci_drv = { - .id_table = pci_id_fm10k_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_fm10k_dev_init, - .eth_dev_uninit = eth_fm10k_dev_uninit, - .dev_private_size = sizeof(struct fm10k_adapter), +static struct rte_pci_driver rte_pmd_fm10k = { + .id_table = pci_id_fm10k_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, + .probe = eth_fm10k_pci_probe, + .remove = eth_fm10k_pci_remove, }; -DRIVER_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv); -DRIVER_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); +RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k); +RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); +RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(fm10k_init_log); +static void +fm10k_init_log(void) +{ + fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init"); + if (fm10k_logtype_init >= 0) + rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE); + fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver"); + if (fm10k_logtype_driver >= 0) + rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE); +}