Number of queues reserved for each VMDQ Pool.
-- ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` (default ``-1``)
-
- Interrupt Throttling interval.
-
-
Runtime Config Options
~~~~~~~~~~~~~~~~~~~~~~
As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets.
Configuration of ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` in config files can be changed to use 16 bytes size RX descriptors.
-High Performance and per Packet Latency Tradeoff
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Due to the hardware design, the interrupt signal inside NIC is needed for per
-packet descriptor write-back. The minimum interval of interrupts could be set
-at compile time by ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` in configuration files.
-Though there is a default configuration, the interval could be tuned by the
-users with that configuration item depends on what the user cares about more,
-performance or per packet latency.
-
Example of getting best performance with l3fwd example
------------------------------------------------------
#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv)
+i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
- if (is_multi_drv) {
- interval = I40E_QUEUE_ITR_INTERVAL_MAX;
- } else {
- if (is_pf)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
- else
- interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
- }
+ uint16_t interval = 0;
+
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
}
/* Convert to hardware count, as writing each 1 represents 2 us */
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
return;
i40evf_stop_queues(dev);
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
/*
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
-
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}