-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/queue.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
-#include <rte_atomic.h>
+#include <rte_bus_pci.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
#include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
-static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
-static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
+int ixgbe_logtype_init;
+int ixgbe_logtype_driver;
+
/*
* The set of PCI devices this driver supports
*/
#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
sizeof(rte_ixgbevf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/*
* This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
/* Unlock any pending hardware semaphore */
ixgbe_swfw_lock_reset(hw);
+#ifdef RTE_LIBRTE_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (ixgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
ixgbe_dcb_init(hw, dcb_config);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
+ int retries = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler, eth_dev);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
/* Remove all Traffic Manager configuration */
ixgbe_tm_conf_uninit(eth_dev);
+#ifdef RTE_LIBRTE_SECURITY
+ rte_free(eth_dev->security_ctx);
+#endif
+
return 0;
}
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
static struct rte_pci_driver rte_ixgbe_pmd = {
.id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_ixgbe_pci_probe,
.remove = eth_ixgbe_pci_remove,
};
*/
static struct rte_pci_driver rte_ixgbevf_pmd = {
.id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_ixgbevf_pci_probe,
.remove = eth_ixgbevf_pci_remove,
};
rxq = dev->data->rx_queues[queue];
if (on)
- rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
else
- rxq->vlan_flags = PKT_RX_VLAN_PKT;
+ rxq->vlan_flags = PKT_RX_VLAN;
}
static void
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
- }
- }
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
- }
- }
-}
-
static void
ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
*/
}
-static void
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t ctrl;
+ uint16_t i;
+ struct ixgbe_rx_queue *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ } else {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ } else {
+ /*
+ * Other 10G NIC, the VLAN strip can be setup
+ * per queue in RXDCTL
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl |= IXGBE_RXDCTL_VME;
+ on = TRUE;
+ } else {
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ on = FALSE;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+ }
+ }
+}
+
+static int
ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
- else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
}
+
+ return 0;
}
static void
return -EINVAL;
}
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
-
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
return 0;
}
case ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
return ret;
}
+ ixgbe_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
+
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
int mask = 0;
int status;
uint16_t vf, idx;
* - fixed speed: TODO implement
*/
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
return -EINVAL;
}
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_set(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
if (err)
goto error;
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G;
+ break;
+ default:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+ }
+
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G)) {
+ if (*link_speeds & ~allowed_speeds) {
PMD_INIT_LOG(ERR, "Invalid link setting");
goto error;
}
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_100M)
if (err)
goto error;
+ ixgbe_dev_link_update(dev, 0);
+
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
/*
* This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
*/
-static void
+static int
ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct ixgbe_hw *hw =
&total_qbrc, &total_qprc, &total_qprdc);
if (stats == NULL)
- return;
+ return -EINVAL;
/* Fill out the rte_eth_stats statistics structure */
stats->ipackets = total_qprc;
/* Tx Errors */
stats->oerrors = 0;
+ return 0;
}
static void
return IXGBEVF_NB_XSTATS;
}
-static void
+static int
ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
ixgbevf_update_stats(dev);
if (stats == NULL)
- return;
+ return -EINVAL;
stats->ipackets = hw_stats->vfgprc;
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
+ return 0;
}
static void
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
- if (mac->type == ixgbe_mac_82599_vf) {
+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
int i;
for (i = 0; i < 5; i++) {
int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int wait = 1;
bool autoneg = false;
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
- memset(&old, 0, sizeof(old));
- rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+ link.link_autoneg = ETH_LINK_AUTONEG;
hw->mac.get_link_status = true;
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
if (link_up == 0) {
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
+
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
break;
}
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
-
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
- struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
}
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
/* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
- NULL, NULL);
+ NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
+ ixgbevf_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
+
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
PMD_INIT_FUNC_TRACE();
- hw->mac.ops.reset_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
hw->mac.get_link_status = true;
/* negotiate mailbox API version to use with the PF. */
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_set(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
+ ixgbe_dev_clear_queues(dev);
+ return err;
+ }
ixgbevf_dev_rxtx_start(dev);
+ ixgbevf_dev_link_update(dev, 0);
+
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ /* According to datasheet, only vector 0/1/2 can be used,
+ * now only one vector is used for Rx queue
+ */
+ intr_vector = 1;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
}
}
ixgbevf_configure_msix(dev);
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+ * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+ * is not cleared, it will fail when following rte_intr_enable( ) tries
+ * to map Rx queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ rte_intr_disable(intr_handle);
+
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
-static void
+static int
ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
uint16_t i;
int on = 0;
/* VF function only support hw strip feature, others are not support */
if (mask & ETH_VLAN_STRIP_MASK) {
- on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
- for (i = 0; i < hw->mac.max_rx_queues; i++)
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
+ }
}
+
+ return 0;
}
int
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = IXGBE_MISC_VEC_ID;
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask |= (1 << IXGBE_MISC_VEC_ID);
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ mask |= (1 << vec);
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask &= ~(1 << IXGBE_MISC_VEC_ID);
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ mask &= ~(1 << vec);
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ uint32_t base = IXGBE_MISC_VEC_ID;
/* Configure VF other cause ivar */
ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
if (!rte_intr_dp_is_en(intr_handle))
return;
+ if (rte_intr_allow_others(intr_handle)) {
+ base = IXGBE_RX_VEC_START;
+ vector_idx = IXGBE_RX_VEC_START;
+ }
+
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
+ if (vector_idx < base + intr_handle->nb_efd - 1)
+ vector_idx++;
}
}
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
uint32_t shift = 0;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
ixgbe_dev_link_update(dev, 1);
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_100M:
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
uint8_t i, j;
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
else
dcb_info->nb_tcs = 1;
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
- for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
- for (j = 0; j < dcb_info->nb_tcs; j++) {
- dcb_info->tc_queue.tc_rxq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
- dcb_info->tc_queue.tc_txq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
}
}
} else { /* vt is disabled*/
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
u32 in_msg = 0;
- if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
- return;
+ /* peek the message first */
+ in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
/* PF reset VF event */
- if (in_msg == IXGBE_PF_CONTROL_MSG)
+ if (in_msg == IXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL, NULL);
+ NULL);
+ }
}
static int
}
}
+/* restore rss filter */
+static inline void
+ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.num)
+ ixgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
static int
ixgbe_filter_restore(struct rte_eth_dev *dev)
{
ixgbe_syn_filter_restore(dev);
ixgbe_fdir_filter_restore(dev);
ixgbe_l2_tn_filter_restore(dev);
+ ixgbe_rss_filter_restore(dev);
return 0;
}
RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(ixgbe_init_log);
+static void
+ixgbe_init_log(void)
+{
+ ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
+ if (ixgbe_logtype_init >= 0)
+ rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
+ ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
+ if (ixgbe_logtype_driver >= 0)
+ rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
+}