The Rx interrupt feature is now part of the standard ABI.
Because of changes in rte_intr_handle and struct rte_eth_conf,
the eal and ethdev library versions are incremented.
Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
There is no backward compatibility planned from release 2.2.
All binaries will need to be rebuilt from release 2.2.
-* ABI changes are planned for struct rte_intr_handle, struct rte_eth_conf
- and struct eth_dev_ops to support interrupt mode feature from release 2.1.
- Those changes may be enabled in the release 2.1 with CONFIG_RTE_NEXT_ABI.
-
* The EAL function rte_eal_pci_close_one is deprecated because renamed to
rte_eal_pci_detach.
ABI Changes
-----------
+* The EAL and ethdev structures rte_intr_handle and rte_eth_conf were changed
+ to support Rx interrupt. It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.
+
Shared Library Versions
-----------------------
.. code-block:: diff
- libethdev.so.1
+ + libethdev.so.2
librte_acl.so.1
librte_cfgfile.so.1
librte_cmdline.so.1
librte_distributor.so.1
- librte_eal.so.1
+ + librte_eal.so.2
librte_hash.so.1
librte_ip_frag.so.1
librte_ivshmem.so.1
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
-#ifdef RTE_NEXT_ABI
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
-#endif
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
uint32_t flags);
static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
-#ifdef RTE_NEXT_ABI
static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint8_t queue, uint8_t msix_vector);
static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset);
-#endif
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
/*
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.vlan_offload_set = eth_igb_vlan_offload_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
-#ifdef RTE_NEXT_ABI
.rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
-#endif
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int ret, mask;
-#ifdef RTE_NEXT_ABI
uint32_t intr_vector = 0;
-#endif
uint32_t ctrl_ext;
PMD_INIT_FUNC_TRACE();
/* configure PF module if SRIOV enabled */
igb_pf_host_configure(dev);
-#ifdef RTE_NEXT_ABI
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
return -ENOMEM;
}
}
-#endif
/* confiugre msix for rx interrupt */
eth_igb_configure_msix_intr(dev);
" no intr multiplex\n");
}
-#ifdef RTE_NEXT_ABI
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
eth_igb_rxq_interrupt_setup(dev);
-#endif
/* enable uio/vfio intr/eventfd mapping */
rte_intr_enable(intr_handle);
}
filter_info->twotuple_mask = 0;
-#ifdef RTE_NEXT_ABI
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
-#endif
}
static void
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
-#ifdef RTE_NEXT_ABI
struct rte_pci_device *pci_dev;
-#endif
eth_igb_stop(dev);
adapter->stopped = 1;
igb_dev_free_queues(dev);
-#ifdef RTE_NEXT_ABI
pci_dev = dev->pci_dev;
if (pci_dev->intr_handle.intr_vec) {
rte_free(pci_dev->intr_handle.intr_vec);
pci_dev->intr_handle.intr_vec = NULL;
}
-#endif
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
return 0;
}
-#ifdef RTE_NEXT_ABI
/* It clears the interrupt causes and enables the interrupt.
* It will be called once only during nic initialized.
*
return 0;
}
-#endif
/*
* It reads ICR and gets interrupt causes, check it and set a bit flag
.init = rte_igbvf_pmd_init,
};
-#ifdef RTE_NEXT_ABI
static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
8 * direction);
}
}
-#endif
/* Sets up the hardware to generate MSI-X interrupts properly
* @hw
static void
eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
{
-#ifdef RTE_NEXT_ABI
int queue_id;
uint32_t tmpval, regval, intr_mask;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t vec = 0;
-#endif
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
if (!rte_intr_dp_is_en(intr_handle))
return;
-#ifdef RTE_NEXT_ABI
/* set interrupt vector for other causes */
if (hw->mac.type == e1000_82575) {
tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
}
E1000_WRITE_FLUSH(hw);
-#endif
}
PMD_REGISTER_DRIVER(pmd_igb_drv);
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
-#ifdef RTE_NEXT_ABI
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-#endif
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
void *param);
-#ifdef RTE_NEXT_ABI
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
-#endif
static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
uint8_t rule_id, uint8_t on);
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
-#ifdef RTE_NEXT_ABI
static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
-#endif
static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
.tx_queue_start = ixgbe_dev_tx_queue_start,
.tx_queue_stop = ixgbe_dev_tx_queue_stop,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
-#ifdef RTE_NEXT_ABI
.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
-#endif
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
-#ifdef RTE_NEXT_ABI
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
-#endif
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
uint32_t intr_vector = 0;
-#endif
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
/* configure PF module if SRIOV enabled */
ixgbe_pf_host_configure(dev);
-#ifdef RTE_NEXT_ABI
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
return -ENOMEM;
}
}
-#endif
/* confiugre msix for sleep until rx interrupt */
ixgbe_configure_msix(dev);
" no intr multiplex\n");
}
-#ifdef RTE_NEXT_ABI
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
ixgbe_dev_rxq_interrupt_setup(dev);
-#endif
/* enable uio/vfio intr/eventfd mapping */
rte_intr_enable(intr_handle);
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-#ifdef RTE_NEXT_ABI
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
-#endif
}
/*
* - On success, zero.
* - On failure, a negative value.
*/
-#ifdef RTE_NEXT_ABI
static int
ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
return 0;
}
-#endif
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-#ifdef RTE_NEXT_ABI
uint32_t intr_vector = 0;
-#endif
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int err, mask = 0;
ixgbevf_dev_rxtx_start(dev);
-#ifdef RTE_NEXT_ABI
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
return -ENOMEM;
}
}
-#endif
ixgbevf_configure_msix(dev);
if (dev->data->dev_conf.intr_conf.lsc != 0) {
/* disable intr eventfd mapping */
rte_intr_disable(intr_handle);
-#ifdef RTE_NEXT_ABI
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
-#endif
}
static void
ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-#ifdef RTE_NEXT_ABI
struct rte_pci_device *pci_dev;
-#endif
PMD_INIT_FUNC_TRACE();
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-#ifdef RTE_NEXT_ABI
pci_dev = dev->pci_dev;
if (pci_dev->intr_handle.intr_vec) {
rte_free(pci_dev->intr_handle.intr_vec);
pci_dev->intr_handle.intr_vec = NULL;
}
-#endif
}
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
return 0;
}
-#ifdef RTE_NEXT_ABI
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
}
}
}
-#endif
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
uint32_t vector_idx = 0;
-#endif
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
if (!rte_intr_dp_is_en(intr_handle))
return;
-#ifdef RTE_NEXT_ABI
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
/* Configure VF Rx queue ivar */
ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
-#endif
}
/**
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
-#ifdef RTE_NEXT_ABI
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t queue_id, vec = 0;
uint32_t mask;
uint32_t gpie;
-#endif
/* won't configure msix register if no mapping is done
* between intr vector and event fd
if (!rte_intr_dp_is_en(intr_handle))
return;
-#ifdef RTE_NEXT_ABI
/* setup GPIE for MSI-x mode */
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
-#endif
}
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
},
.intr_conf = {
.lsc = 1,
-#ifdef RTE_NEXT_ABI
.rxq = 1,
-#endif
},
};
EXPORT_MAP := rte_eal_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) := eal.c
int fd; /**< file descriptor */
int uio_cfg_fd; /**< UIO config file descriptor */
enum rte_intr_handle_type type; /**< handle type */
-#ifdef RTE_NEXT_ABI
int max_intr; /**< max interrupt requested */
uint32_t nb_efd; /**< number of available efds */
int *intr_vec; /**< intr vector number array */
-#endif
};
/**
EXPORT_MAP := rte_eal_version.map
-LIBABIVER := 1
+LIBABIVER := 2
VPATH += $(RTE_SDK)/lib/librte_eal/common
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
-#ifdef RTE_NEXT_ABI
if (!intr_handle->max_intr)
intr_handle->max_intr = 1;
else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
irq_set->count = intr_handle->max_intr;
-#else
- irq_set->count = 1;
-#endif
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
-#ifdef RTE_NEXT_ABI
memcpy(fd_ptr, intr_handle->efds, sizeof(intr_handle->efds));
fd_ptr[intr_handle->max_intr - 1] = intr_handle->fd;
-#else
- fd_ptr[0] = intr_handle->fd;
-#endif
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
return -ret;
}
-#ifdef RTE_NEXT_ABI
static void
eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
{
return;
} while (1);
}
-#endif
static int
eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
return 0;
}
-#ifdef RTE_NEXT_ABI
int
rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
int op, unsigned int vec, void *data)
{
return !!(intr_handle->max_intr - intr_handle->nb_efd);
}
-
-#else
-int
-rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
- int epfd, int op, unsigned int vec, void *data)
-{
- RTE_SET_USED(intr_handle);
- RTE_SET_USED(epfd);
- RTE_SET_USED(op);
- RTE_SET_USED(vec);
- RTE_SET_USED(data);
- return -ENOTSUP;
-}
-
-int
-rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
-{
- RTE_SET_USED(intr_handle);
- RTE_SET_USED(nb_efd);
- return 0;
-}
-
-void
-rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
-{
- RTE_SET_USED(intr_handle);
-}
-
-int
-rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
-{
- RTE_SET_USED(intr_handle);
- return 0;
-}
-
-int
-rte_intr_allow_others(struct rte_intr_handle *intr_handle)
-{
- RTE_SET_USED(intr_handle);
- return 1;
-}
-#endif
};
int fd; /**< interrupt event file descriptor */
enum rte_intr_handle_type type; /**< handle type */
-#ifdef RTE_NEXT_ABI
uint32_t max_intr; /**< max interrupt requested */
uint32_t nb_efd; /**< number of available efd(event fd) */
int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */
struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID];
/**< intr vector epoll event */
int *intr_vec; /**< intr vector number array */
-#endif
};
#define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */
EXPORT_MAP := rte_ether_version.map
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-y += rte_ethdev.c
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
}
-#ifdef RTE_NEXT_ABI
int
rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
{
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
}
-#else
-int
-rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id)
-{
- RTE_SET_USED(port_id);
- RTE_SET_USED(queue_id);
- return -ENOTSUP;
-}
-
-int
-rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id)
-{
- RTE_SET_USED(port_id);
- RTE_SET_USED(queue_id);
- return -ENOTSUP;
-}
-
-int
-rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
-{
- RTE_SET_USED(port_id);
- RTE_SET_USED(epfd);
- RTE_SET_USED(op);
- RTE_SET_USED(data);
- return -1;
-}
-
-int
-rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
- int epfd, int op, void *data)
-{
- RTE_SET_USED(port_id);
- RTE_SET_USED(queue_id);
- RTE_SET_USED(epfd);
- RTE_SET_USED(op);
- RTE_SET_USED(data);
- return -1;
-}
-#endif
#ifdef RTE_NIC_BYPASS
int rte_eth_dev_bypass_init(uint8_t port_id)
struct rte_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
uint16_t lsc;
-#ifdef RTE_NEXT_ABI
/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
uint16_t rxq;
-#endif
};
/**
eth_queue_release_t rx_queue_release;/**< Release RX queue.*/
eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */
-#ifdef RTE_NEXT_ABI
/**< Enable Rx queue interrupt. */
eth_rx_enable_intr_t rx_queue_intr_enable;
/**< Disable Rx queue interrupt.*/
eth_rx_disable_intr_t rx_queue_intr_disable;
-#endif
eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/
eth_queue_release_t tx_queue_release;/**< Release TX queue.*/
eth_dev_led_on_t dev_led_on; /**< Turn on LED. */