struct rte_intr_source {
TAILQ_ENTRY(rte_intr_source) next;
- struct rte_intr_handle intr_handle; /**< interrupt handle */
+ struct rte_intr_handle *intr_handle; /**< interrupt handle */
struct rte_intr_cb_list callbacks; /**< user callbacks */
uint32_t active;
};
intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
{
/* alarm callbacks are special case */
- if (ih->type == RTE_INTR_HANDLE_ALARM) {
+ if (rte_intr_type_get(ih) == RTE_INTR_HANDLE_ALARM) {
uint64_t timeout_ns;
/* get soonest alarm timeout */
} else {
ke->filter = EVFILT_READ;
}
- ke->ident = ih->fd;
+ ke->ident = rte_intr_fd_get(ih);
return 0;
}
int ret = 0, add_event = 0;
/* first do parameter checking */
- if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
+ if (rte_intr_fd_get(intr_handle) < 0 || cb == NULL) {
RTE_LOG(ERR, EAL,
"Registering with invalid input parameter\n");
return -EINVAL;
/* find the source for this intr_handle */
TAILQ_FOREACH(src, &intr_sources, next) {
- if (src->intr_handle.fd == intr_handle->fd)
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
}
* thing on the list should be eal_alarm_callback() and we may
* be called just to reset the timer.
*/
- if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
- !TAILQ_EMPTY(&src->callbacks)) {
+ if (src != NULL &&
+ rte_intr_type_get(src->intr_handle) == RTE_INTR_HANDLE_ALARM &&
+ !TAILQ_EMPTY(&src->callbacks)) {
callback = NULL;
} else {
/* allocate a new interrupt callback entity */
ret = -ENOMEM;
goto fail;
} else {
- src->intr_handle = *intr_handle;
+ src->intr_handle = rte_intr_instance_dup(intr_handle);
+ if (src->intr_handle == NULL) {
+ RTE_LOG(ERR, EAL, "Can not create intr instance\n");
+ ret = -ENOMEM;
+ free(src);
+ src = NULL;
+ goto fail;
+ }
TAILQ_INIT(&src->callbacks);
TAILQ_INSERT_TAIL(&intr_sources, src, next);
}
/* add events to the queue. timer events are special as we need to
* re-set the timer.
*/
- if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
+ if (add_event ||
+ rte_intr_type_get(src->intr_handle) == RTE_INTR_HANDLE_ALARM) {
struct kevent ke;
memset(&ke, 0, sizeof(ke));
*/
if (errno == ENODEV)
RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
- src->intr_handle.fd);
+ rte_intr_fd_get(src->intr_handle));
else
- RTE_LOG(ERR, EAL, "Error adding fd %d "
- "kevent, %s\n",
- src->intr_handle.fd,
- strerror(errno));
+ RTE_LOG(ERR, EAL, "Error adding fd %d kevent, %s\n",
+ rte_intr_fd_get(src->intr_handle),
+ strerror(errno));
ret = -errno;
goto fail;
}
struct rte_intr_callback *cb, *next;
/* do parameter checking first */
- if (intr_handle == NULL || intr_handle->fd < 0) {
+ if (rte_intr_fd_get(intr_handle) < 0) {
RTE_LOG(ERR, EAL,
"Unregistering with invalid input parameter\n");
return -EINVAL;
/* check if the insterrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == intr_handle->fd)
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
/* No interrupt source registered for the fd */
struct rte_intr_callback *cb, *next;
/* do parameter checking first */
- if (intr_handle == NULL || intr_handle->fd < 0) {
+ if (rte_intr_fd_get(intr_handle) < 0) {
RTE_LOG(ERR, EAL,
"Unregistering with invalid input parameter\n");
return -EINVAL;
/* check if the insterrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == intr_handle->fd)
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
/* No interrupt source registered for the fd */
*/
if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
- src->intr_handle.fd, strerror(errno));
+ rte_intr_fd_get(src->intr_handle),
+ strerror(errno));
/* removing non-existent even is an expected condition
* in some circumstances (e.g. oneshot events).
*/
if (intr_handle == NULL)
return -1;
- if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
rc = 0;
goto out;
}
- if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ if (rte_intr_fd_get(intr_handle) < 0 ||
+ rte_intr_dev_fd_get(intr_handle) < 0) {
rc = -1;
goto out;
}
- switch (intr_handle->type) {
+ switch (rte_intr_type_get(intr_handle)) {
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
rc = -1;
break;
/* unknown handle type */
default:
- RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
+ rte_intr_fd_get(intr_handle));
rc = -1;
break;
}
if (intr_handle == NULL)
return -1;
- if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
rc = 0;
goto out;
}
- if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ if (rte_intr_fd_get(intr_handle) < 0 ||
+ rte_intr_dev_fd_get(intr_handle) < 0) {
rc = -1;
goto out;
}
- switch (intr_handle->type) {
+ switch (rte_intr_type_get(intr_handle)) {
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
rc = -1;
break;
/* unknown handle type */
default:
- RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
+ rte_intr_fd_get(intr_handle));
rc = -1;
break;
}
int
rte_intr_ack(const struct rte_intr_handle *intr_handle)
{
- if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
return 0;
return -1;
rte_spinlock_lock(&intr_lock);
TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == event_fd)
+ if (rte_intr_fd_get(src->intr_handle) == event_fd)
break;
if (src == NULL) {
rte_spinlock_unlock(&intr_lock);
rte_spinlock_unlock(&intr_lock);
/* set the length to be read dor different handle type */
- switch (src->intr_handle.type) {
+ switch (rte_intr_type_get(src->intr_handle)) {
case RTE_INTR_HANDLE_ALARM:
bytes_read = 0;
call = true;
/* mark for deletion from the queue */
ke.flags = EV_DELETE;
- if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
+ if (intr_source_to_kevent(src->intr_handle, &ke) < 0) {
RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
rte_spinlock_unlock(&intr_lock);
return;
* remove intr file descriptor from wait list.
*/
if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
- RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
- "%s\n", src->intr_handle.fd,
+ RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
+ rte_intr_fd_get(src->intr_handle),
strerror(errno));
/* removing non-existent even is an expected
* condition in some circumstances
TAILQ_REMOVE(&src->callbacks, cb, next);
if (cb->ucb_fn)
- cb->ucb_fn(&src->intr_handle, cb->cb_arg);
+ cb->ucb_fn(src->intr_handle, cb->cb_arg);
free(cb);
}
}
struct rte_intr_source {
TAILQ_ENTRY(rte_intr_source) next;
- struct rte_intr_handle intr_handle; /**< interrupt handle */
+ struct rte_intr_handle *intr_handle; /**< interrupt handle */
struct rte_intr_cb_list callbacks; /**< user callbacks */
uint32_t active;
};
vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
- int len, ret;
+ int len, ret, vfio_dev_fd;
int *fd_ptr;
len = sizeof(irq_set_buf);
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
- *fd_ptr = intr_handle->fd;
+ *fd_ptr = rte_intr_fd_get(intr_handle);
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
return 0;
vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
- int len, ret;
+ int len, ret, vfio_dev_fd;
len = sizeof(struct vfio_irq_set);
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
- RTE_LOG(ERR, EAL,
- "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Error disabling INTx interrupts for fd %d\n",
+ rte_intr_fd_get(intr_handle));
return -1;
}
return 0;
vfio_ack_intx(const struct rte_intr_handle *intr_handle)
{
struct vfio_irq_set irq_set;
+ int vfio_dev_fd;
/* unmask INTx */
memset(&irq_set, 0, sizeof(irq_set));
irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set.start = 0;
- if (ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
return 0;
int len, ret;
char irq_set_buf[IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
- int *fd_ptr;
+ int *fd_ptr, vfio_dev_fd;
len = sizeof(irq_set_buf);
irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
- *fd_ptr = intr_handle->fd;
+ *fd_ptr = rte_intr_fd_get(intr_handle);
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
return 0;
vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
- int len, ret;
+ int len, ret, vfio_dev_fd;
len = sizeof(struct vfio_irq_set);
irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- RTE_LOG(ERR, EAL,
- "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Error disabling MSI interrupts for fd %d\n",
+ rte_intr_fd_get(intr_handle));
return ret;
}
int len, ret;
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
- int *fd_ptr;
+ int *fd_ptr, vfio_dev_fd, i;
len = sizeof(irq_set_buf);
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
/* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
- irq_set->count = intr_handle->max_intr ?
- (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
- RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
+ irq_set->count = rte_intr_max_intr_get(intr_handle) ?
+ (rte_intr_max_intr_get(intr_handle) >
+ RTE_MAX_RXTX_INTR_VEC_ID + 1 ? RTE_MAX_RXTX_INTR_VEC_ID + 1 :
+ rte_intr_max_intr_get(intr_handle)) : 1;
+
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
/* INTR vector offset 0 reserve for non-efds mapping */
- fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
- memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
- sizeof(*intr_handle->efds) * intr_handle->nb_efd);
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = rte_intr_fd_get(intr_handle);
+ for (i = 0; i < rte_intr_nb_efd_get(intr_handle); i++) {
+ fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] =
+ rte_intr_efds_index_get(intr_handle, i);
+ }
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
- int len, ret;
+ int len, ret, vfio_dev_fd;
len = sizeof(struct vfio_irq_set);
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- RTE_LOG(ERR, EAL,
- "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Error disabling MSI-X interrupts for fd %d\n",
+ rte_intr_fd_get(intr_handle));
return ret;
}
int len, ret;
char irq_set_buf[IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
- int *fd_ptr;
+ int *fd_ptr, vfio_dev_fd;
len = sizeof(irq_set_buf);
irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
- *fd_ptr = intr_handle->fd;
+ *fd_ptr = rte_intr_fd_get(intr_handle);
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
RTE_LOG(ERR, EAL, "Error enabling req interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
{
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
- int len, ret;
+ int len, ret, vfio_dev_fd;
len = sizeof(struct vfio_irq_set);
irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
irq_set->start = 0;
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
RTE_LOG(ERR, EAL, "Error disabling req interrupts for fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return ret;
}
uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
{
unsigned char command_high;
+ int uio_cfg_fd;
/* use UIO config file descriptor for uio_pci_generic */
- if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
+ if (pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
RTE_LOG(ERR, EAL,
"Error reading interrupts status for fd %d\n",
- intr_handle->uio_cfg_fd);
+ uio_cfg_fd);
return -1;
}
/* disable interrupts */
command_high |= 0x4;
- if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
RTE_LOG(ERR, EAL,
"Error disabling interrupts for fd %d\n",
- intr_handle->uio_cfg_fd);
+ uio_cfg_fd);
return -1;
}
uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
{
unsigned char command_high;
+ int uio_cfg_fd;
/* use UIO config file descriptor for uio_pci_generic */
- if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
+ if (pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
RTE_LOG(ERR, EAL,
"Error reading interrupts status for fd %d\n",
- intr_handle->uio_cfg_fd);
+ uio_cfg_fd);
return -1;
}
/* enable interrupts */
command_high &= ~0x4;
- if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
RTE_LOG(ERR, EAL,
"Error enabling interrupts for fd %d\n",
- intr_handle->uio_cfg_fd);
+ uio_cfg_fd);
return -1;
}
{
const int value = 0;
- if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
- RTE_LOG(ERR, EAL,
- "Error disabling interrupts for fd %d (%s)\n",
- intr_handle->fd, strerror(errno));
+ if (write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
+ RTE_LOG(ERR, EAL, "Error disabling interrupts for fd %d (%s)\n",
+ rte_intr_fd_get(intr_handle), strerror(errno));
return -1;
}
return 0;
{
const int value = 1;
- if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
- RTE_LOG(ERR, EAL,
- "Error enabling interrupts for fd %d (%s)\n",
- intr_handle->fd, strerror(errno));
+ if (write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
+ RTE_LOG(ERR, EAL, "Error enabling interrupts for fd %d (%s)\n",
+ rte_intr_fd_get(intr_handle), strerror(errno));
return -1;
}
return 0;
wake_thread = 0;
/* first do parameter checking */
- if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
- RTE_LOG(ERR, EAL,
- "Registering with invalid input parameter\n");
+ if (rte_intr_fd_get(intr_handle) < 0 || cb == NULL) {
+ RTE_LOG(ERR, EAL, "Registering with invalid input parameter\n");
return -EINVAL;
}
/* check if there is at least one callback registered for the fd */
TAILQ_FOREACH(src, &intr_sources, next) {
- if (src->intr_handle.fd == intr_handle->fd) {
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle)) {
/* we had no interrupts for this */
if (TAILQ_EMPTY(&src->callbacks))
wake_thread = 1;
src = calloc(1, sizeof(*src));
if (src == NULL) {
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
- free(callback);
ret = -ENOMEM;
+ free(callback);
+ callback = NULL;
} else {
- src->intr_handle = *intr_handle;
- TAILQ_INIT(&src->callbacks);
- TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
- TAILQ_INSERT_TAIL(&intr_sources, src, next);
- wake_thread = 1;
- ret = 0;
+ src->intr_handle = rte_intr_instance_dup(intr_handle);
+ if (src->intr_handle == NULL) {
+ RTE_LOG(ERR, EAL, "Can not create intr instance\n");
+ ret = -ENOMEM;
+ free(callback);
+ callback = NULL;
+ free(src);
+ src = NULL;
+ } else {
+ TAILQ_INIT(&src->callbacks);
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback,
+ next);
+ TAILQ_INSERT_TAIL(&intr_sources, src, next);
+ wake_thread = 1;
+ ret = 0;
+ }
}
}
struct rte_intr_callback *cb, *next;
/* do parameter checking first */
- if (intr_handle == NULL || intr_handle->fd < 0) {
- RTE_LOG(ERR, EAL,
- "Unregistering with invalid input parameter\n");
+ if (rte_intr_fd_get(intr_handle) < 0) {
+ RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
return -EINVAL;
}
rte_spinlock_lock(&intr_lock);
/* check if the insterrupt source for the fd is existent */
- TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == intr_handle->fd)
+ TAILQ_FOREACH(src, &intr_sources, next) {
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
+ }
/* No interrupt source registered for the fd */
if (src == NULL) {
struct rte_intr_callback *cb, *next;
/* do parameter checking first */
- if (intr_handle == NULL || intr_handle->fd < 0) {
- RTE_LOG(ERR, EAL,
- "Unregistering with invalid input parameter\n");
+ if (rte_intr_fd_get(intr_handle) < 0) {
+ RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
return -EINVAL;
}
/* check if the insterrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == intr_handle->fd)
+ if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
/* No interrupt source registered for the fd */
/* all callbacks for that source are removed. */
if (TAILQ_EMPTY(&src->callbacks)) {
TAILQ_REMOVE(&intr_sources, src, next);
+ rte_intr_instance_free(src->intr_handle);
free(src);
}
}
int
rte_intr_enable(const struct rte_intr_handle *intr_handle)
{
- int rc = 0;
+ int rc = 0, uio_cfg_fd;
if (intr_handle == NULL)
return -1;
- if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
rc = 0;
goto out;
}
- if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
+ if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
rc = -1;
goto out;
}
- switch (intr_handle->type){
+ switch (rte_intr_type_get(intr_handle)) {
/* write to the uio fd to enable the interrupt */
case RTE_INTR_HANDLE_UIO:
if (uio_intr_enable(intr_handle))
break;
/* unknown handle type */
default:
- RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
+ rte_intr_fd_get(intr_handle));
rc = -1;
break;
}
int
rte_intr_ack(const struct rte_intr_handle *intr_handle)
{
- if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ int uio_cfg_fd;
+
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
return 0;
- if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
+ if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0)
return -1;
- switch (intr_handle->type) {
+ switch (rte_intr_type_get(intr_handle)) {
/* Both acking and enabling are same for UIO */
case RTE_INTR_HANDLE_UIO:
if (uio_intr_enable(intr_handle))
/* unknown handle type */
default:
RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ rte_intr_fd_get(intr_handle));
return -1;
}
int
rte_intr_disable(const struct rte_intr_handle *intr_handle)
{
- int rc = 0;
+ int rc = 0, uio_cfg_fd;
if (intr_handle == NULL)
return -1;
- if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
rc = 0;
goto out;
}
- if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
+ if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
rc = -1;
goto out;
}
- switch (intr_handle->type){
+ switch (rte_intr_type_get(intr_handle)) {
/* write to the uio fd to disable the interrupt */
case RTE_INTR_HANDLE_UIO:
if (uio_intr_disable(intr_handle))
break;
/* unknown handle type */
default:
- RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
+ rte_intr_fd_get(intr_handle));
rc = -1;
break;
}
}
rte_spinlock_lock(&intr_lock);
TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd ==
- events[n].data.fd)
+ if (rte_intr_fd_get(src->intr_handle) == events[n].data.fd)
break;
if (src == NULL){
rte_spinlock_unlock(&intr_lock);
rte_spinlock_unlock(&intr_lock);
/* set the length to be read dor different handle type */
- switch (src->intr_handle.type) {
+ switch (rte_intr_type_get(src->intr_handle)) {
case RTE_INTR_HANDLE_UIO:
case RTE_INTR_HANDLE_UIO_INTX:
bytes_read = sizeof(buf.uio_intr_count);
TAILQ_REMOVE(&src->callbacks, cb, next);
free(cb);
}
+ rte_intr_instance_free(src->intr_handle);
free(src);
return -1;
} else if (bytes_read == 0)
if (cb->pending_delete) {
TAILQ_REMOVE(&src->callbacks, cb, next);
if (cb->ucb_fn)
- cb->ucb_fn(&src->intr_handle, cb->cb_arg);
+ cb->ucb_fn(src->intr_handle, cb->cb_arg);
free(cb);
rv++;
}
/* all callbacks for that source are removed. */
if (TAILQ_EMPTY(&src->callbacks)) {
TAILQ_REMOVE(&intr_sources, src, next);
+ rte_intr_instance_free(src->intr_handle);
free(src);
}
continue; /* skip those with no callbacks */
memset(&ev, 0, sizeof(ev));
ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
- ev.data.fd = src->intr_handle.fd;
+ ev.data.fd = rte_intr_fd_get(src->intr_handle);
/**
* add all the uio device file descriptor
* into wait list.
*/
if (epoll_ctl(pfd, EPOLL_CTL_ADD,
- src->intr_handle.fd, &ev) < 0){
+ rte_intr_fd_get(src->intr_handle), &ev) < 0) {
rte_panic("Error adding fd %d epoll_ctl, %s\n",
- src->intr_handle.fd, strerror(errno));
+ rte_intr_fd_get(src->intr_handle),
+ strerror(errno));
}
else
numfds++;
int bytes_read = 0;
int nbytes;
- switch (intr_handle->type) {
+ switch (rte_intr_type_get(intr_handle)) {
case RTE_INTR_HANDLE_UIO:
case RTE_INTR_HANDLE_UIO_INTX:
bytes_read = sizeof(buf.uio_intr_count);
break;
#endif
case RTE_INTR_HANDLE_VDEV:
- bytes_read = intr_handle->efd_counter_size;
+ bytes_read = rte_intr_efd_counter_size_get(intr_handle);
/* For vdev, number of bytes to read is set by driver */
break;
case RTE_INTR_HANDLE_EXT:
efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
- if (!intr_handle || intr_handle->nb_efd == 0 ||
- efd_idx >= intr_handle->nb_efd) {
+ if (intr_handle == NULL || rte_intr_nb_efd_get(intr_handle) == 0 ||
+ efd_idx >= (unsigned int)rte_intr_nb_efd_get(intr_handle)) {
RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
return -EPERM;
}
switch (op) {
case RTE_INTR_EVENT_ADD:
epfd_op = EPOLL_CTL_ADD;
- rev = &intr_handle->elist[efd_idx];
+ rev = rte_intr_elist_index_get(intr_handle, efd_idx);
if (__atomic_load_n(&rev->status,
__ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
RTE_LOG(INFO, EAL, "Event already been added.\n");
epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
epdata->cb_arg = (void *)intr_handle;
rc = rte_epoll_ctl(epfd, epfd_op,
- intr_handle->efds[efd_idx], rev);
+ rte_intr_efds_index_get(intr_handle, efd_idx), rev);
if (!rc)
RTE_LOG(DEBUG, EAL,
"efd %d associated with vec %d added on epfd %d"
break;
case RTE_INTR_EVENT_DEL:
epfd_op = EPOLL_CTL_DEL;
- rev = &intr_handle->elist[efd_idx];
+ rev = rte_intr_elist_index_get(intr_handle, efd_idx);
if (__atomic_load_n(&rev->status,
__ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
RTE_LOG(INFO, EAL, "Event does not exist.\n");
uint32_t i;
struct rte_epoll_event *rev;
- for (i = 0; i < intr_handle->nb_efd; i++) {
- rev = &intr_handle->elist[i];
+ for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++) {
+ rev = rte_intr_elist_index_get(intr_handle, i);
if (__atomic_load_n(&rev->status,
__ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
continue;
assert(nb_efd != 0);
- if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX) {
for (i = 0; i < n; i++) {
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
errno, strerror(errno));
return -errno;
}
- intr_handle->efds[i] = fd;
+
+ if (rte_intr_efds_index_set(intr_handle, i, fd))
+ return -rte_errno;
}
- intr_handle->nb_efd = n;
- intr_handle->max_intr = NB_OTHER_INTR + n;
- } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+
+ if (rte_intr_nb_efd_set(intr_handle, n))
+ return -rte_errno;
+
+ if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR + n))
+ return -rte_errno;
+ } else if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
/* only check, initialization would be done in vdev driver.*/
- if (intr_handle->efd_counter_size >
+ if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) >
sizeof(union rte_intr_read_buffer)) {
RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
return -EINVAL;
}
} else {
- intr_handle->efds[0] = intr_handle->fd;
- intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
- intr_handle->max_intr = NB_OTHER_INTR;
+ if (rte_intr_efds_index_set(intr_handle, 0, rte_intr_fd_get(intr_handle)))
+ return -rte_errno;
+ if (rte_intr_nb_efd_set(intr_handle, RTE_MIN(nb_efd, 1U)))
+ return -rte_errno;
+ if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR))
+ return -rte_errno;
}
return 0;
uint32_t i;
rte_intr_free_epoll_fd(intr_handle);
- if (intr_handle->max_intr > intr_handle->nb_efd) {
- for (i = 0; i < intr_handle->nb_efd; i++)
- close(intr_handle->efds[i]);
+ if (rte_intr_max_intr_get(intr_handle) > rte_intr_nb_efd_get(intr_handle)) {
+ for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++)
+ close(rte_intr_efds_index_get(intr_handle, i));
}
- intr_handle->nb_efd = 0;
- intr_handle->max_intr = 0;
+ rte_intr_nb_efd_set(intr_handle, 0);
+ rte_intr_max_intr_set(intr_handle, 0);
}
int
rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
{
- return !(!intr_handle->nb_efd);
+ return !(!rte_intr_nb_efd_get(intr_handle));
}
int
if (!rte_intr_dp_is_en(intr_handle))
return 1;
else
- return !!(intr_handle->max_intr - intr_handle->nb_efd);
+ return !!(rte_intr_max_intr_get(intr_handle) -
+ rte_intr_nb_efd_get(intr_handle));
}
int
rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
{
- if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX)
return 1;
- if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
return 1;
return 0;