I40E_PFINT_ICR0_ENA_GRST_MASK | \
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
- I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
I40E_PFINT_ICR0_ENA_VFLR_MASK | \
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void i40e_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
uint64_t *offset,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
- __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
static struct eth_driver rte_i40e_pmd = {
.pci_drv = {
.id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = rte_eth_dev_pci_probe,
.remove = rte_eth_dev_pci_remove,
},
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
i40e_set_tx_function(dev);
return 0;
}
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
/* Disable double vlan by default */
i40e_vsi_config_double_vlan(vsi, FALSE);
- /* Disable S-TAG identification by default */
- ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
- if (ret & I40E_L2_TAGS_S_TAG_MASK) {
- ret &= ~I40E_L2_TAGS_S_TAG_MASK;
- I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+ /* Disable S-TAG identification when floating_veb is disabled */
+ if (!pf->floating_veb) {
+ ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
+ if (ret & I40E_L2_TAGS_S_TAG_MASK) {
+ ret &= ~I40E_L2_TAGS_S_TAG_MASK;
+ I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+ }
}
if (!vsi->max_macaddrs)
i40e_pf_host_init(dev);
/* register callback func to eal lib */
- rte_intr_callback_register(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
/* configure and enable device interrupt */
i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
int ret;
return 0;
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
- /* Disable LLDP */
- ret = i40e_aq_stop_lldp(hw, true, NULL);
- if (ret != I40E_SUCCESS) /* Its failure can be ignored */
- PMD_INIT_LOG(INFO, "Failed to stop lldp");
-
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* register callback func to eal lib */
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
return 0;
}
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
RTE_LIBRTE_I40E_ITR_INTERVAL);
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
hw->adapter_stopped = 0;
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
" no intr multiplex\n");
+ } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ ret = i40e_aq_set_phy_int_mask(hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL |
+ I40E_AQ_EVENT_MEDIA_NA), NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ /* Call get_link_info aq commond to enable LSE */
+ i40e_dev_link_update(dev, 0);
}
/* enable uio intr after callback register */
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
/* Disable all queues */
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
struct rte_eth_link link, old;
int status;
unsigned rep_cnt = MAX_REPEAT_TIME;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
memset(&link, 0, sizeof(link));
memset(&old, 0, sizeof(old));
do {
/* Get link status information from hardware */
- status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ status = i40e_aq_get_link_info(hw, enable_lse,
+ &link_status, NULL);
if (status != I40E_SUCCESS) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
rte_i40e_stats_strings[i].offset);
+ xstats[count].id = count;
count++;
}
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
+ xstats[count].id = count;
count++;
}
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_rxq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_txq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
return -ENOSYS;
}
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
.nb_max = I40E_MAX_RING_DESC,
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
};
if (pf->flags & I40E_FLAG_VMDQ) {
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
- if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
/* VF queue/VSI allocation */
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
- if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- pf->vf_num = dev->pci_dev->max_vfs;
+ pf->vf_num = pci_dev->max_vfs;
PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
"in total %u queues", pf->vf_num, pf->vf_nb_qps,
pf->vf_nb_qps * pf->vf_num);
}
}
+static void
+i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_virtchnl_pf_event event;
+ int i;
+
+ event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+ event.event_data.link_event.link_speed =
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ for (i = 0; i < pf->vf_num; i++)
+ i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
static void
i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
{
info.msg_buf,
info.msg_len);
break;
+ case i40e_aqc_opc_get_link_status:
+ ret = i40e_dev_link_update(dev, 0);
+ if (!ret) {
+ i40e_notify_all_vfs_link_status(dev);
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+ break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
opcode);
rte_free(info.msg_buf);
}
-/*
- * Interrupt handler is registered as the alarm callback for handling LSC
- * interrupt in a definite of time, in order to wait the NIC into a stable
- * state. Currently it waits 1 sec in i40e for the link up interrupt, and
- * no need for link down interrupt.
- */
-static void
-i40e_dev_interrupt_delayed_handler(void *param)
-{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t icr0;
-
- /* read interrupt causes again */
- icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
- if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
- PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
- if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
- PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
- if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
- PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
- if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
- PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
- "state\n");
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
- if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
-
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
- i40e_dev_handle_vfr_event(dev);
- }
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
- i40e_dev_handle_aq_msg(dev);
- }
-
- /* handle the link up interrupt in an alarm callback */
- i40e_dev_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
-
- i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
-}
-
/**
* Interrupt handler triggered by NIC for handling
* specific interrupt.
* void
*/
static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
i40e_dev_handle_aq_msg(dev);
}
- /* Link Status Change interrupt */
- if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-#define I40E_US_PER_SECOND 1000000
- struct rte_eth_link link;
-
- PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
- memset(&link, 0, sizeof(link));
- rte_i40e_dev_atomic_read_link_status(dev, &link);
- i40e_dev_link_update(dev, 0);
-
- /*
- * For link up interrupt, it needs to wait 1 second to let the
- * hardware be a stable state. Otherwise several consecutive
- * interrupts can be observed.
- * For link down interrupt, no need to wait.
- */
- if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
- i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
- return;
- else
- _rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
- }
-
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
}
static int
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CAP_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
}
buf = 0;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
return;
}
buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
- ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
- ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
- if (ret != I40E_SUCCESS)
- PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
-
ret = i40e_init_dcb(hw);
- /* if sw_dcb, lldp agent is stopped, the return from
+ /* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
- * adminq status.
+ * adminq status. Otherwise, it should return success.
*/
- if (ret != I40E_SUCCESS &&
- hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
memset(&hw->local_dcbx_config, 0,
sizeof(struct i40e_dcbx_config));
/* set dcb default configuration */
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
+ PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
+ " err = %d, aq_err = %d.", ret,
hw->aq.asq_last_status);
return -ENOTSUP;
}
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;