#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
+#define IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE "fiber_sdp3_no_tx_disable"
+
+static const char * const ixgbe_valid_arguments[] = {
+ IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE,
+ NULL
+};
+
#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk"
static const char * const ixgbevf_valid_arguments[] = {
static int ixgbe_filter_restore(struct rte_eth_dev *dev);
static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw);
+static int devarg_handle_int(__rte_unused const char *key, const char *value,
+ void *extra_args);
/*
* Define VF Stats MACRO for Non "cleared on read" register
case ixgbe_phy_sfp_passive_unknown:
return 1;
default:
+ /* x550em devices may be SFP, check media type */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ switch (ixgbe_get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ return 1;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
return 0;
}
}
ixgbe_release_swfw_semaphore(hw, mask);
}
+static void
+ixgbe_parse_devargs(struct ixgbe_adapter *adapter,
+ struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ uint16_t sdp3_no_tx_disable;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, ixgbe_valid_arguments);
+ if (kvlist == NULL)
+ return;
+
+ if (rte_kvargs_count(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE) == 1 &&
+ rte_kvargs_process(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE,
+ devarg_handle_int, &sdp3_no_tx_disable) == 0 &&
+ sdp3_no_tx_disable == 1)
+ adapter->sdp3_no_tx_disable = 1;
+
+ rte_kvargs_free(kvlist);
+}
+
/*
* This function is based on code in ixgbe_attach() in base/ixgbe.c.
* It returns 0 on success.
}
rte_atomic32_clear(&ad->link_thread_running);
+ ixgbe_parse_devargs(eth_dev->data->dev_private,
+ pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* initialize PF if max_vfs not zero */
ret = ixgbe_pf_host_init(eth_dev);
- if (ret) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- rte_free(eth_dev->data->hash_mac_addrs);
- eth_dev->data->hash_mac_addrs = NULL;
- return ret;
- }
+ if (ret)
+ goto err_pf_host_init;
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
/* let hardware know driver is loaded */
TAILQ_INIT(&filter_info->fivetuple_list);
/* initialize flow director filter list & hash */
- ixgbe_fdir_filter_init(eth_dev);
+ ret = ixgbe_fdir_filter_init(eth_dev);
+ if (ret)
+ goto err_fdir_filter_init;
/* initialize l2 tunnel filter list & hash */
- ixgbe_l2_tn_filter_init(eth_dev);
+ ret = ixgbe_l2_tn_filter_init(eth_dev);
+ if (ret)
+ goto err_l2_tn_filter_init;
/* initialize flow filter lists */
ixgbe_filterlist_init();
ixgbe_tm_conf_init(eth_dev);
return 0;
+
+err_l2_tn_filter_init:
+ ixgbe_fdir_filter_uninit(eth_dev);
+err_fdir_filter_init:
+ ixgbe_disable_intr(hw);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ ixgbe_pf_host_uninit(eth_dev);
+err_pf_host_init:
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+ return ret;
}
static int
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
struct ixgbe_fdir_filter *fdir_filter;
- if (fdir_info->hash_map)
- rte_free(fdir_info->hash_map);
- if (fdir_info->hash_handle)
- rte_hash_free(fdir_info->hash_handle);
+ rte_free(fdir_info->hash_map);
+ rte_hash_free(fdir_info->hash_handle);
while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list,
IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
struct ixgbe_l2_tn_filter *l2_tn_filter;
- if (l2_tn_info->hash_map)
- rte_free(l2_tn_info->hash_map);
- if (l2_tn_info->hash_handle)
- rte_hash_free(l2_tn_info->hash_handle);
+ rte_free(l2_tn_info->hash_map);
+ rte_hash_free(l2_tn_info->hash_handle);
while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = ixgbe_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
}
}
- /* confiugre msix for sleep until rx interrupt */
+ /* configure MSI-X for sleep until Rx interrupt */
ixgbe_configure_msix(dev);
/* initialize transmission unit */
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link up is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link down is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
#ifdef RTE_LIB_SECURITY
rte_free(dev->security_ctx);
+ dev->security_ctx = NULL;
#endif
return ret;
return rte_eth_linkstatus_set(dev, &link);
}
- if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber &&
+ !ad->sdp3_no_tx_disable) {
esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
if ((esdp_reg & IXGBE_ESDP_SDP3))
link_up = 0;
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
- * as IXGBE_VF_MAXMSIVECOTR = 1
+ * as IXGBE_VF_MAXMSIVECTOR = 1
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
rte_intr_vec_list_index_set(intr_handle, q_idx,
* @param
* dev: Pointer to struct rte_eth_dev.
* index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
+ * filter: pointer to the filter that will be added.
* rx_queue: the queue id the filter assigned to.
*
* @return
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
return 0;
RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe,
+ IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE "=<0|1>");
RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");