}
i40e_set_default_ptype_table(dev);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- intr_handle = &pci_dev->intr_handle;
+ intr_handle = pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
goto err;
/* VMDQ setup.
- * General PMD driver call sequence are NIC init, configure,
+ * General PMD call sequence are NIC init, configure,
* rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
* will try to lookup the VSI that specific queue belongs to if VMDQ
* applicable. So, VMDQ setting has to be done before
{
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
{
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
- uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+ rte_intr_nb_efd_get(intr_handle));
uint16_t queue_idx = 0;
int record = 0;
int i;
vsi->nb_used_qps - i,
itr_idx);
for (; !!record && i < vsi->nb_used_qps; i++)
- intr_handle->intr_vec[queue_idx + i] =
- msix_vect;
+ rte_intr_vec_list_index_set(intr_handle,
+ queue_idx + i, msix_vect);
break;
}
/* 1:1 queue/msix_vect mapping */
vsi->base_queue + i, 1,
itr_idx);
if (!!record)
- intr_handle->intr_vec[queue_idx + i] = msix_vect;
+ if (rte_intr_vec_list_index_set(intr_handle,
+ queue_idx + i, msix_vect))
+ return -rte_errno;
msix_vect++;
nb_msix--;
{
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
{
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
uint16_t nb_rxq, nb_txq;
return ret;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int),
- 0);
- if (!intr_handle->intr_vec) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d rx_queues intr_vec",
dev->data->nb_rx_queues);
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
if (hw->adapter_stopped == 1)
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+
+ /* Cleanup vector list */
+ rte_intr_vec_list_free(intr_handle);
/* reset hierarchy commit */
pf->tm_conf.committed = false;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_filter_control_settings settings;
struct rte_flow *p_flow;
uint32_t reg;
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
rte_i40e_hw_port_strings[i].name,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
- "VSI failed to get TC bandwdith configuration %u",
+ "VSI failed to get TC bandwidth configuration %u",
hw->aq.asq_last_status);
return ret;
}
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
return 0;
}
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = intr_handle->intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
- rte_intr_ack(&pci_dev->intr_handle);
+ rte_intr_ack(pci_dev->intr_handle);
return 0;
}
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = intr_handle->intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);