* Copyright(c) 2013-2016 Intel Corporation
*/
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_spinlock.h>
#include <rte_kvargs.h>
+#include <rte_vect.h>
#include "fm10k.h"
#include "base/fm10k_api.h"
#define GLORT_FD_MASK GLORT_PF_MASK
#define GLORT_FD_INDEX GLORT_FD_Q_BASE
-int fm10k_logtype_init;
-int fm10k_logtype_driver;
-
static void fm10k_close_mbx_service(struct fm10k_hw *hw);
-static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
static inline int fm10k_glort_valid(struct fm10k_hw *hw);
static int
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
!= FM10K_DGLORTMAP_NONE);
}
-static void
+static int
fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
- return;
+ return 0;
fm10k_mbx_lock(hw);
status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
FM10K_XCAST_MODE_PROMISC);
fm10k_mbx_unlock(hw);
- if (status != FM10K_SUCCESS)
+ if (status != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
- return;
+ return 0;
if (dev->data->all_multicast == 1)
mode = FM10K_XCAST_MODE_ALLMULTI;
mode);
fm10k_mbx_unlock(hw);
- if (status != FM10K_SUCCESS)
+ if (status != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
- return;
+ return 0;
/* If promiscuous mode is enabled, it doesn't make sense to enable
* allmulticast and disable promiscuous since fm10k only can select
if (dev->data->promiscuous) {
PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
"needn't enable allmulticast");
- return;
+ return 0;
}
fm10k_mbx_lock(hw);
FM10K_XCAST_MODE_ALLMULTI);
fm10k_mbx_unlock(hw);
- if (status != FM10K_SUCCESS)
+ if (status != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
- return;
+ return 0;
if (dev->data->promiscuous) {
PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
"since promisc mode is enabled");
- return;
+ return -EINVAL;
}
fm10k_mbx_lock(hw);
FM10K_XCAST_MODE_NONE);
fm10k_mbx_unlock(hw);
- if (status != FM10K_SUCCESS)
+ if (status != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
+ return -EAGAIN;
+ }
+
+ return 0;
}
static void
return 0;
}
-static void
+static int
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int i;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
if (dev->data->tx_queues)
for (i = 0; i < dev->data->nb_tx_queues; i++)
rte_intr_efd_disable(intr_handle);
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
+
+ return 0;
}
static void
}
}
-static void
-fm10k_dev_close(struct rte_eth_dev *dev)
-{
- struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- PMD_INIT_FUNC_TRACE();
-
- fm10k_mbx_lock(hw);
- hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
- MAX_LPORT_NUM, false);
- fm10k_mbx_unlock(hw);
-
- /* allow 100ms for device to quiesce */
- rte_delay_us(FM10K_SWITCH_QUIESCE_US);
-
- /* Stop mailbox service first */
- fm10k_close_mbx_service(hw);
- fm10k_dev_stop(dev);
- fm10k_dev_queue_release(dev);
- fm10k_stop_hw(hw);
-}
-
static int
fm10k_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
return 0;
}
-static void
+static int
fm10k_stats_reset(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(hw_stats, 0, sizeof(*hw_stats));
fm10k_rebind_hw_stats(hw, hw_stats);
+
+ return 0;
}
-static void
+static int
fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+
+ return 0;
}
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
}
static int
-fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+fm10k_vlan_offload_set(struct rte_eth_dev *dev __rte_unused,
+ int mask __rte_unused)
{
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP))
- PMD_INIT_LOG(ERR, "VLAN stripping is "
- "always on in fm10k");
- }
-
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND)
- PMD_INIT_LOG(ERR, "VLAN QinQ is not "
- "supported in fm10k");
- }
-
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER))
- PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
- }
-
return 0;
}
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_HEADER_SPLIT);
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_RSS_HASH);
}
static int
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
q->offloads = offloads;
- if (handle_rxconf(q, conf))
+ if (handle_rxconf(q, conf)) {
+ rte_free(q);
return -EINVAL;
-
+ }
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
(nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
- if (handle_txconf(q, conf))
+ if (handle_txconf(q, conf)) {
+ rte_free(q);
return -EINVAL;
+ }
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
else
FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
- rte_intr_enable(&pdev->intr_handle);
+ rte_intr_ack(&pdev->intr_handle);
return 0;
}
true);
dev_info->sm_down = 0;
- _rte_eth_dev_callback_process(dev,
+ rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
if (err == FM10K_ERR_RESET_REQUESTED) {
PMD_INIT_LOG(INFO, "INT: Switch is down");
dev_info->sm_down = 1;
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/* Handle SRAM error */
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(dev->intr_handle);
+ rte_intr_ack(dev->intr_handle);
}
/**
/* Setting reset flag */
dev_info->sm_down = 1;
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
if (dev_info->sm_down == 1 &&
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
dev_info->sm_down = 0;
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/* Re-enable interrupt from device side */
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(dev->intr_handle);
+ rte_intr_ack(dev->intr_handle);
}
/* Mailbox message handler in VF */
hw->mbx.ops.disconnect(hw, &hw->mbx);
}
+static int
+fm10k_dev_close(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, false);
+ fm10k_mbx_unlock(hw);
+
+ /* allow 100ms for device to quiesce */
+ rte_delay_us(FM10K_SWITCH_QUIESCE_US);
+
+ /* Stop mailbox service first */
+ fm10k_close_mbx_service(hw);
+
+ ret = fm10k_dev_stop(dev);
+
+ fm10k_dev_queue_release(dev);
+ fm10k_stop_hw(hw);
+
+ /* disable uio/vfio intr */
+ rte_intr_disable(intr_handle);
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_pf(dev);
+
+ /* unregister callback func to eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+ } else {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_vf(dev);
+
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+ }
+
+ return ret;
+}
+
static const struct eth_dev_ops fm10k_eth_dev_ops = {
.dev_configure = fm10k_dev_configure,
.dev_start = fm10k_dev_start,
.rx_queue_release = fm10k_rx_queue_release,
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
- .rx_queue_count = fm10k_dev_rx_queue_count,
- .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
- .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
- .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
return nb_tx;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
struct fm10k_tx_queue *txq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/* primary process has set the ftag flag and offloads */
txq = dev->data->tx_queues[0];
- if (fm10k_tx_vec_condition_check(txq)) {
+ if (fm10k_tx_vec_condition_check(txq) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
dev->tx_pkt_prepare = fm10k_prep_pkts;
PMD_INIT_LOG(DEBUG, "Use regular Tx func");
txq = dev->data->tx_queues[i];
txq->tx_ftag_en = tx_ftag_en;
/* Check if Vector Tx is satisfied */
- if (fm10k_tx_vec_condition_check(txq))
+ if (fm10k_tx_vec_condition_check(txq) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
use_sse = 0;
}
}
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
struct fm10k_dev_info *dev_info =
* conditions to be met.
*/
if (!fm10k_rx_vec_condition_check(dev) &&
- dev_info->rx_vec_allowed && !rx_ftag_en) {
+ dev_info->rx_vec_allowed && !rx_ftag_en &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
if (dev->data->scattered_rx)
dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
else
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_queue_count = fm10k_dev_rx_queue_count;
+ dev->rx_descriptor_done = fm10k_dev_rx_descriptor_done;
+ dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
dev->tx_pkt_prepare = &fm10k_prep_pkts;
}
rte_eth_copy_pci_info(dev, pdev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
hw->hw_addr = (void *)pdev->mem_resource[0].addr;
if (hw->hw_addr == NULL) {
PMD_INIT_LOG(ERR, "Bad mem resource."
- " Try to blacklist unused devices.");
+ " Try to refuse unused devices.");
return -EIO;
}
}
/* Reset the hw statistics */
- fm10k_stats_reset(dev);
+ diag = fm10k_stats_reset(dev);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
+ return diag;
+ }
/* Reset the hw */
diag = fm10k_reset_hw(hw);
/* Make sure Switch Manager is ready before going forward. */
if (hw->mac.type == fm10k_mac_pf) {
- int switch_ready = 0;
+ bool switch_ready = false;
for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
fm10k_mbx_lock(hw);
hw->mac.ops.get_host_state(hw, &switch_ready);
fm10k_mbx_unlock(hw);
- if (switch_ready)
+ if (switch_ready == true)
break;
/* Delay some time to acquire async LPORT_MAP info. */
rte_delay_us(WAIT_SWITCH_MSG_US);
}
- if (switch_ready == 0) {
+ if (switch_ready == false) {
PMD_INIT_LOG(ERR, "switch is not ready");
return -1;
}
static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
- struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
-
- /* only uninitialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- /* safe to close dev here */
fm10k_dev_close(dev);
-
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
- /* disable uio/vfio intr */
- rte_intr_disable(intr_handle);
-
- /*PF/VF has different interrupt handling mechanism */
- if (hw->mac.type == fm10k_mac_pf) {
- /* disable interrupt */
- fm10k_dev_disable_intr_pf(dev);
-
- /* unregister callback func to eal lib */
- rte_intr_callback_unregister(intr_handle,
- fm10k_dev_interrupt_handler_pf, (void *)dev);
- } else {
- /* disable interrupt */
- fm10k_dev_disable_intr_vf(dev);
-
- rte_intr_callback_unregister(intr_handle,
- fm10k_dev_interrupt_handler_vf, (void *)dev);
- }
-
return 0;
}
static struct rte_pci_driver rte_pmd_fm10k = {
.id_table = pci_id_fm10k_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_fm10k_pci_probe,
.remove = eth_fm10k_pci_remove,
};
RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_INIT(fm10k_init_log)
-{
- fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
- if (fm10k_logtype_init >= 0)
- rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
- fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
- if (fm10k_logtype_driver >= 0)
- rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER_SUFFIX(fm10k_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(fm10k_logtype_driver, driver, NOTICE);
+#ifdef RTE_ETHDEV_DEBUG_RX
+RTE_LOG_REGISTER_SUFFIX(fm10k_logtype_rx, rx, DEBUG);
+#endif
+#ifdef RTE_ETHDEV_DEBUG_TX
+RTE_LOG_REGISTER_SUFFIX(fm10k_logtype_tx, tx, DEBUG);
+#endif