static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
-static int ixgbe_dev_configure(struct rte_eth_dev *dev);
-static int ixgbe_dev_start(struct rte_eth_dev *dev);
-static void ixgbe_dev_stop(struct rte_eth_dev *dev);
-static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
-static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
-static void ixgbe_dev_close(struct rte_eth_dev *dev);
-static int ixgbe_dev_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_configure(struct rte_eth_dev *dev);
+static int ixgbe_dev_start(struct rte_eth_dev *dev);
+static int ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
+static int ixgbe_dev_close(struct rte_eth_dev *dev);
+static int ixgbe_dev_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
static void *ixgbe_dev_setup_link_thread_handler(void *param);
-static void ixgbe_dev_cancel_link_thread(struct rte_eth_dev *dev);
+static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev,
+ uint32_t timeout_ms);
static int ixgbe_add_rar(struct rte_eth_dev *dev,
struct rte_ether_addr *mac_addr,
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
-static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static int ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static int ixgbevf_dev_close(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
-int ixgbe_logtype_init;
-int ixgbe_logtype_driver;
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
-int ixgbe_logtype_rx;
-#endif
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
-int ixgbe_logtype_tx;
-#endif
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
-int ixgbe_logtype_tx_free;
-#endif
-
/*
* The set of PCI devices this driver supports
*/
.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_queue_count = ixgbe_dev_rx_queue_count,
- .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
- .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
- .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
ixgbe_dev_macsec_setting_reset(eth_dev);
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+ eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count;
+ eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
rte_atomic32_clear(&ad->link_thread_running);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
diag = ixgbe_bypass_init_hw(hw);
#else
diag = ixgbe_init_hw(hw);
- hw->mac.autotry_restart = false;
#endif /* RTE_LIBRTE_IXGBE_BYPASS */
/*
return -ENOMEM;
}
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
/* enable support intr */
ixgbe_enable_intr(eth_dev);
- ixgbe_dev_set_link_down(eth_dev);
-
/* initialize filter info */
memset(filter_info, 0,
sizeof(struct ixgbe_filter_info));
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
return -ENOMEM;
}
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
/* Generate a random MAC address, if none was assigned by PF. */
if (rte_is_zero_ether_addr(perm_addr)) {
generate_random_mac_addr(perm_addr);
int err;
uint32_t mflcn;
+ ixgbe_setup_fc(hw);
+
err = ixgbe_fc_enable(hw);
/* Not negotiated is not an error case */
PMD_INIT_FUNC_TRACE();
/* Stop the link setup handler before resetting the HW. */
- ixgbe_dev_cancel_link_thread(dev);
+ ixgbe_dev_wait_setup_link_complete(dev, 0);
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
-static void
+static int
ixgbe_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
if (hw->adapter_stopped)
- return;
+ return 0;
PMD_INIT_FUNC_TRACE();
- ixgbe_dev_cancel_link_thread(dev);
+ ixgbe_dev_wait_setup_link_complete(dev, 0);
/* disable interrupts */
ixgbe_disable_intr(hw);
adapter->rss_reta_updated = 0;
- adapter->mac_ctrl_frame_fwd = 0;
-
hw->adapter_stopped = true;
+ dev->data->dev_started = 0;
+
+ return 0;
}
/*
/*
* Reset and stop device.
*/
-static void
+static int
ixgbe_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
int ret;
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
ixgbe_pf_reset_hw(hw);
- ixgbe_dev_stop(dev);
+ ret = ixgbe_dev_stop(dev);
ixgbe_dev_free_queues(dev);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
/* Unlock any pending hardware semaphore */
ixgbe_swfw_lock_reset(hw);
rte_free(dev->security_ctx);
#endif
+ return ret;
}
/*
dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
return ptypes;
-#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
+#if defined(RTE_ARCH_X86) || defined(__ARM_NEON)
if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
return ptypes;
return ret_val;
}
-static void
-ixgbe_dev_cancel_link_thread(struct rte_eth_dev *dev)
+/*
+ * If @timeout_ms was 0, it means that it will not return until link complete.
+ * It returns 1 on complete, return 0 on timeout.
+ */
+static int
+ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms)
{
+#define WARNING_TIMEOUT 9000 /* 9s in total */
struct ixgbe_adapter *ad = dev->data->dev_private;
- void *retval;
-
- if (rte_atomic32_read(&ad->link_thread_running)) {
- pthread_cancel(ad->link_thread_tid);
- pthread_join(ad->link_thread_tid, &retval);
- rte_atomic32_clear(&ad->link_thread_running);
+ uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
+
+ while (rte_atomic32_read(&ad->link_thread_running)) {
+ msec_delay(1);
+ timeout--;
+
+ if (timeout_ms) {
+ if (!timeout)
+ return 0;
+ } else if (!timeout) {
+ /* It will not return until link complete */
+ timeout = WARNING_TIMEOUT;
+ PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!");
+ }
}
+
+ return 1;
}
static void *
u32 speed;
bool autoneg = false;
+ pthread_detach(pthread_self());
speed = hw->phy.autoneg_advertised;
if (!speed)
ixgbe_get_link_capabilities(hw, &speed, &autoneg);
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
wait = 0;
+/* BSD has no interrupt mechanism, so force NIC status synchronization. */
+#ifdef RTE_EXEC_ENV_FREEBSD
+ wait = 1;
+#endif
+
if (vf)
diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
else
if (link_up == 0) {
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
- intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ ixgbe_dev_wait_setup_link_complete(dev, 0);
if (rte_atomic32_test_and_set(&ad->link_thread_running)) {
+ /* To avoid race condition between threads, set
+ * the IXGBE_FLAG_NEED_LINK_CONFIG flag only
+ * when there is no link thread running.
+ */
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
if (rte_ctrl_thread_create(&ad->link_thread_tid,
"ixgbe-link-handler",
NULL,
switch (link_speed) {
default:
case IXGBE_LINK_SPEED_UNKNOWN:
- if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
- hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
- link.link_speed = ETH_SPEED_NUM_10M;
- else
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ break;
+
+ case IXGBE_LINK_SPEED_10_FULL:
+ link.link_speed = ETH_SPEED_NUM_10M;
break;
case IXGBE_LINK_SPEED_100_FULL:
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
* MFLCN register.
*/
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ if (mflcn_reg & IXGBE_MFLCN_PMCF)
+ fc_conf->mac_ctrl_frame_fwd = 1;
+ else
+ fc_conf->mac_ctrl_frame_fwd = 0;
+
if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
rx_pause = 1;
else
PMD_INIT_FUNC_TRACE();
/* Stop the link setup handler before resetting the HW. */
- ixgbe_dev_cancel_link_thread(dev);
+ ixgbe_dev_wait_setup_link_complete(dev, 0);
err = hw->mac.ops.reset_hw(hw);
- if (err) {
+
+ /**
+ * In this case, reuses the MAC address assigned by VF
+ * initialization.
+ */
+ if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) {
PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
return err;
}
+
hw->mac.get_link_status = true;
/* negotiate mailbox API version to use with the PF. */
return 0;
}
-static void
+static int
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped)
- return;
+ return 0;
PMD_INIT_FUNC_TRACE();
- ixgbe_dev_cancel_link_thread(dev);
+ ixgbe_dev_wait_setup_link_complete(dev, 0);
ixgbevf_intr_disable(dev);
+ dev->data->dev_started = 0;
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
}
adapter->rss_reta_updated = 0;
+
+ return 0;
}
-static void
+static int
ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int ret;
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
ixgbe_reset_hw(hw);
- ixgbevf_dev_stop(dev);
+ ret = ixgbevf_dev_stop(dev);
ixgbe_dev_free_queues(dev);
**/
ixgbevf_remove_mac_addr(dev, 0);
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
ixgbevf_dev_interrupt_handler, dev);
+
+ return ret;
}
/*
static int
ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
- __attribute__((unused)) uint32_t index,
- __attribute__((unused)) uint32_t pool)
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int diag;
}
static u8 *
-ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
+ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
{
u8 *mc_addr;
/* dummy mbx read to ack pf */
if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
return;
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
}
}
RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
-RTE_INIT(ixgbe_init_log)
-{
- ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
- if (ixgbe_logtype_init >= 0)
- rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
- ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
- if (ixgbe_logtype_driver >= 0)
- rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
+RTE_LOG_REGISTER(ixgbe_logtype_init, pmd.net.ixgbe.init, NOTICE);
+RTE_LOG_REGISTER(ixgbe_logtype_driver, pmd.net.ixgbe.driver, NOTICE);
+
#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
- ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx");
- if (ixgbe_logtype_rx >= 0)
- rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG);
+RTE_LOG_REGISTER(ixgbe_logtype_rx, pmd.net.ixgbe.rx, DEBUG);
#endif
-
#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
- ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx");
- if (ixgbe_logtype_tx >= 0)
- rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG);
+RTE_LOG_REGISTER(ixgbe_logtype_tx, pmd.net.ixgbe.tx, DEBUG);
#endif
-
#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
- ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free");
- if (ixgbe_logtype_tx_free >= 0)
- rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG);
+RTE_LOG_REGISTER(ixgbe_logtype_tx_free, pmd.net.ixgbe.tx_free, DEBUG);
#endif
-}