X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=c423cf2d37e996b16c60714abd62ec17cf8021f8;hb=85ebc09bad1bbe1296bd31e787e13bcf893f40a9;hp=cfe1c11d81de55e7ce96a9955ab4b813aa6f74b9;hpb=ca9d6597184bddb8d3ee925cb462fcbd51628299;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index cfe1c11d81..c423cf2d37 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -48,19 +19,22 @@ #include #include #include +#include #include #include #include -#include #include #include #include -#include +#include #include #include #include #include #include +#ifdef RTE_LIBRTE_SECURITY +#include +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -92,6 +66,9 @@ /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 +/*Default value of Max Rx Queue*/ +#define IXGBE_MAX_RX_QUEUE_NUM 128 + #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ @@ -219,7 +196,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -276,7 +253,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -424,6 +401,9 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) +int ixgbe_logtype_init; +int ixgbe_logtype_driver; + /* * The set of PCI devices this driver supports */ @@ -1168,7 +1148,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1194,6 +1173,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Unlock any pending hardware semaphore */ ixgbe_swfw_lock_reset(hw); +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + if (ixgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); ixgbe_dcb_init(hw, dcb_config); @@ -1401,6 +1386,10 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* Remove all Traffic Manager configuration */ ixgbe_tm_conf_uninit(eth_dev); +#ifdef RTE_LIBRTE_SECURITY + rte_free(eth_dev->security_ctx); +#endif + return 0; } @@ -1627,7 +1616,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1781,7 +1769,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1803,7 +1792,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -1959,9 +1948,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; if (on) - rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; else - rxq->vlan_flags = PKT_RX_VLAN_PKT; + rxq->vlan_flags = PKT_RX_VLAN; } static void @@ -2125,7 +2114,7 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static void +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { @@ -2148,6 +2137,8 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) else ixgbe_vlan_hw_extend_disable(dev); } + + return 0; } static void @@ -2179,9 +2170,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) return -EINVAL; } - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; - + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; return 0; } @@ -2221,8 +2213,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) case ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; - if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; break; default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ @@ -2569,9 +2559,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ @@ -3694,6 +3688,13 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) { + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + } +#endif + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IXGBE_DEFAULT_RX_PTHRESH, @@ -3956,6 +3957,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); @@ -4358,12 +4360,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param) intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL, NULL); + NULL); } if (intr->flags & IXGBE_FLAG_MACSEC) { _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, - NULL, NULL); + NULL); intr->flags &= ~IXGBE_FLAG_MACSEC; } @@ -5000,7 +5002,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - hw->mac.ops.reset_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } hw->mac.get_link_status = true; /* negotiate mailbox API version to use with the PF. */ @@ -5022,12 +5028,18 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } ixgbevf_dev_rxtx_start(dev); /* check and configure queue intr-vector mapping */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { /* According to datasheet, only vector 0/1/2 can be used, * now only one vector is used for Rx queue */ @@ -5048,6 +5060,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ @@ -5202,7 +5223,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } -static void +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct ixgbe_hw *hw = @@ -5217,6 +5238,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) for (i = 0; i < hw->mac.max_rx_queues; i++) ixgbevf_vlan_strip_queue_set(dev, i, on); } + + return 0; } int @@ -7218,6 +7241,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -7225,19 +7250,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ @@ -8110,13 +8147,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); u32 in_msg = 0; - if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) - return; + /* peek the message first */ + in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); /* PF reset VF event */ - if (in_msg == IXGBE_PF_CONTROL_MSG) + if (in_msg == IXGBE_PF_CONTROL_MSG) { + /* dummy mbx read to ack pf */ + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL, NULL); + NULL); + } } static int @@ -8284,6 +8325,18 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) } } +/* restore rss filter */ +static inline void +ixgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.num) + ixgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + static int ixgbe_filter_restore(struct rte_eth_dev *dev) { @@ -8292,6 +8345,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev) ixgbe_syn_filter_restore(dev); ixgbe_fdir_filter_restore(dev); ixgbe_l2_tn_filter_restore(dev); + ixgbe_rss_filter_restore(dev); return 0; } @@ -8389,3 +8443,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); + +RTE_INIT(ixgbe_init_log); +static void +ixgbe_init_log(void) +{ + ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); + if (ixgbe_logtype_init >= 0) + rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); + ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); + if (ixgbe_logtype_driver >= 0) + rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); +}