test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
{
if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
- printf("null or invalid intr_handle for %s\n", __FUNCTION__);
+ printf("null or invalid intr_handle for %s\n", __func__);
+ flag = -1;
return;
}
if (rte_intr_callback_unregister(intr_handle,
- test_interrupt_callback, arg) <= 0) {
- printf("fail to unregister callback\n");
+ test_interrupt_callback, arg) >= 0) {
+ printf("%s: unexpectedly able to unregister itself\n",
+ __func__);
+ flag = -1;
return;
}
}
static void
-test_interrupt_callback_1(struct rte_intr_handle *intr_handle, void *arg)
+test_interrupt_callback_1(struct rte_intr_handle *intr_handle,
+ __attribute__((unused)) void *arg)
{
if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
- printf("null or invalid intr_handle for %s\n", __FUNCTION__);
- return;
- }
- if (rte_intr_callback_unregister(intr_handle,
- test_interrupt_callback_1, arg) <= 0) {
- printf("fail to unregister callback\n");
+ printf("null or invalid intr_handle for %s\n", __func__);
+ flag = -1;
return;
}
}
int
test_interrupt(void)
{
- int count = 0, ret = -1;
+ int count, ret;
struct rte_intr_handle test_intr_handle;
if (test_interrupt_init() < 0) {
printf("check if callback registered can be called\n");
+ ret = -1;
+
/* check if callback registered can be called */
flag = 0;
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
goto out;
}
/* check flag in 3 seconds */
- while (flag == 0 && count++ < 3)
+ for (count = 0; flag == 0 && count < 3; count++)
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
+
+ rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
+
+ if ((ret = rte_intr_callback_unregister(&test_intr_handle,
+ test_interrupt_callback, NULL)) < 0) {
+ printf("rte_intr_callback_unregister() failed with error "
+ "code: %d\n", ret);
+ goto out;
+ }
+
+ ret = -1;
+
if (flag == 0) {
printf("registered callback has not been called\n");
goto out;
+ } else if (flag < 0) {
+ printf("registered callback failed\n");
+ ret = flag;
+ goto out;
}
- rte_delay_ms(1000);
printf("start register/unregister test\n");
"for all\n");
goto out;
}
- rte_delay_ms(1000);
+ rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
printf("start interrupt enable/disable test\n");
/* check interrupt enable/disable functions */
if (test_interrupt_enable() < 0)
goto out;
- rte_delay_ms(1000);
+ rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
if (test_interrupt_disable() < 0)
goto out;
- rte_delay_ms(1000);
+ rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
ret = 0;
rte_intr_callback_unregister(&test_intr_handle,
test_interrupt_callback_1, (void *)-1);
- rte_delay_ms(2000);
+ rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL);
/* deinit */
test_interrupt_deinit();
TAILQ_ENTRY(rte_intr_source) next;
struct rte_intr_handle intr_handle; /**< interrupt handle */
struct rte_intr_cb_list callbacks; /**< user callbacks */
+ uint32_t active;
};
/* global spinlock for interrupt data operation */
rte_intr_callback_register(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg)
{
- int ret = -1;
+ int ret, wake_thread;
struct rte_intr_source *src;
- int wake_thread = 0;
+ struct rte_intr_callback *callback;
+
+ wake_thread = 0;
/* first do parameter checking */
if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
}
/* allocate a new interrupt callback entity */
- struct rte_intr_callback *callback =
- rte_zmalloc("interrupt callback list",
+ callback = rte_zmalloc("interrupt callback list",
sizeof(*callback), 0);
if (callback == NULL) {
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
rte_spinlock_lock(&intr_lock);
/* check if there is at least one callback registered for the fd */
- TAILQ_FOREACH(src, &intr_sources, next)
- if (src->intr_handle.fd == intr_handle->fd) {
- if (src->callbacks.tqh_first == NULL)
+ TAILQ_FOREACH(src, &intr_sources, next) {
+ if (src->intr_handle.fd == intr_handle->fd) {
/* we had no interrupts for this */
- wake_thread = 1;
+ if TAILQ_EMPTY(&src->callbacks)
+ wake_thread = 1;
- TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
- break;
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ ret = 0;
+ break;
+ }
}
- /* No callback registered for this fd */
- if (src == NULL){
- /* no existing callbacks for this - add new source */
- src = rte_zmalloc("interrupt source list", sizeof(*src), 0);
- if (src == NULL){
+ /* no existing callbacks for this - add new source */
+ if (src == NULL) {
+ if ((src = rte_zmalloc("interrupt source list",
+ sizeof(*src), 0)) == NULL) {
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+ rte_free(callback);
ret = -ENOMEM;
- goto error;
+ } else {
+ src->intr_handle = *intr_handle;
+ TAILQ_INIT(&src->callbacks);
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ TAILQ_INSERT_TAIL(&intr_sources, src, next);
+ wake_thread = 1;
+ ret = 0;
}
- src->intr_handle = *intr_handle;
- TAILQ_INIT(&src->callbacks);
-
- TAILQ_INSERT_TAIL(&intr_sources, src, next);
- TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
- wake_thread = 1;
}
rte_spinlock_unlock(&intr_lock);
+
/**
* check if need to notify the pipe fd waited by epoll_wait to
* rebuild the wait list.
if (write(intr_pipe.writefd, "1", 1) < 0)
return -EPIPE;
- return 0;
-
-error:
- rte_spinlock_unlock(&intr_lock);
-
- return ret;
+ return (ret);
}
int
rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
{
- int ret = -1;
+ int ret;
struct rte_intr_source *src;
- struct rte_intr_callback *cb;
+ struct rte_intr_callback *cb, *next;
/* do parameter checking first */
if (intr_handle == NULL || intr_handle->fd < 0) {
/* No interrupt source registered for the fd */
if (src == NULL) {
ret = -ENOENT;
- goto error;
- }
- ret = 0;
- TAILQ_FOREACH(cb, &src->callbacks, next) {
- if (cb->cb_fn != cb_fn)
- continue;
- if (cb_arg == (void *)-1 || cb->cb_arg == cb_arg) {
- TAILQ_REMOVE(&src->callbacks, cb, next);
- rte_free(cb);
- ret ++;
+ /* interrupt source has some active callbacks right now. */
+ } else if (src->active != 0) {
+ ret = -EAGAIN;
+
+ /* ok to remove. */
+ } else {
+ ret = 0;
+
+ /*walk through the callbacks and remove all that match. */
+ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
+ cb->cb_arg == cb_arg)) {
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ rte_free(cb);
+ ret++;
+ }
}
- if (src->callbacks.tqh_first == NULL) {
+ /* all callbacks for that source are removed. */
+ if (TAILQ_EMPTY(&src->callbacks)) {
TAILQ_REMOVE(&intr_sources, src, next);
rte_free(src);
}
}
+ rte_spinlock_unlock(&intr_lock);
+
/* notify the pipe fd waited by epoll_wait to rebuild the wait list */
- if (write(intr_pipe.writefd, "1", 1) < 0) {
+ if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
ret = -EPIPE;
- goto error;
}
- rte_spinlock_unlock(&intr_lock);
-
- return ret;
-
-error:
- rte_spinlock_unlock(&intr_lock);
-
- return ret;
+ return (ret);
}
int
static int
eal_intr_process_interrupts(struct epoll_event *events, int nfds)
{
- int n, i, active_cb, bytes_read;
+ int n, bytes_read;
struct rte_intr_source *src;
struct rte_intr_callback *cb;
union rte_intr_read_buffer buf;
- struct rte_intr_callback active_cbs[32];
+ struct rte_intr_callback active_cb;
for (n = 0; n < nfds; n++) {
+
/**
* if the pipe fd is ready to read, return out to
* rebuild the wait list.
continue;
}
- /* for this source, make a copy of all the callbacks,
- * then unlock the lock, so the callbacks can
- * themselves manipulate the list for future
- * instances.
- */
- active_cb = 0;
- memset(active_cbs, 0, sizeof(active_cbs));
- TAILQ_FOREACH(cb, &src->callbacks, next)
- active_cbs[active_cb++] = *cb;
+ /* mark this interrupt source as active and release the lock. */
+ src->active = 1;
rte_spinlock_unlock(&intr_lock);
/* set the length to be read dor different handle type */
bytes_read = 1;
break;
}
+
/**
* read out to clear the ready-to-be-read flag
* for epoll_wait.
*/
bytes_read = read(events[n].data.fd, &buf, bytes_read);
+
if (bytes_read < 0) {
RTE_LOG(ERR, EAL, "Error reading from file descriptor"
" %d, error: %d\n", events[n].data.fd, errno);
- continue;
}
else if (bytes_read == 0) {
RTE_LOG(ERR, EAL,
"Read nothing from file descriptor %d.\n",
events[n].data.fd);
- continue;
}
- /**
- * Finally, call all callbacks from the copy
- * we made earlier.
- */
- for (i = 0; i < active_cb; i++) {
- if (active_cbs[i].cb_fn == NULL)
- continue;
- active_cbs[i].cb_fn(&src->intr_handle,
- active_cbs[i].cb_arg);
+
+ /* grab a lock, again to call callbacks and update status. */
+ rte_spinlock_lock(&intr_lock);
+
+ if (bytes_read > 0) {
+
+ /* Finally, call all callbacks. */
+ TAILQ_FOREACH(cb, &src->callbacks, next) {
+
+ /* make a copy and unlock. */
+ active_cb = *cb;
+ rte_spinlock_unlock(&intr_lock);
+
+ /* call the actual callback */
+ active_cb.cb_fn(&src->intr_handle,
+ active_cb.cb_arg);
+
+ /*get the lcok back. */
+ rte_spinlock_lock(&intr_lock);
+ }
}
+
+ /* we done with that interrupt source, release it. */
+ src->active = 0;
+ rte_spinlock_unlock(&intr_lock);
}
return 0;
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
enum rte_eth_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
};
enum {
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
- int ret = -1;
struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *user_cb = NULL;
+ struct rte_eth_dev_callback *user_cb;
if (!cb_fn)
- return -1;
+ return (-EINVAL);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
+ return (-EINVAL);
}
+
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
if (user_cb->cb_fn == cb_fn &&
user_cb->cb_arg == cb_arg &&
user_cb->event == event) {
- ret = 0;
- goto out;
+ break;
}
}
- user_cb = rte_malloc("INTR_USER_CALLBACK",
- sizeof(struct rte_eth_dev_callback), 0);
- if (!user_cb)
- goto out;
- user_cb->cb_fn = cb_fn;
- user_cb->cb_arg = cb_arg;
- user_cb->event = event;
- TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
- ret = 0;
-out:
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ /* create a new callback. */
+ if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
+ }
- return ret;
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return ((user_cb == NULL) ? -ENOMEM : 0);
}
int
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
- int ret = -1;
+ int ret;
struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *cb_lst = NULL;
+ struct rte_eth_dev_callback *cb, *next;
if (!cb_fn)
- return -1;
+ return (-EINVAL);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
+ return (-EINVAL);
}
+
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
- TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
- if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
continue;
- if (cb_lst->cb_arg == (void *)-1 ||
- cb_lst->cb_arg == cb_arg) {
- TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
- rte_free(cb_lst);
- ret = 0;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->callbacks), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
}
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-
- return ret;
+ return (ret);
}
void
-_rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
+_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event)
{
- struct rte_eth_dev_callback *cb_lst = NULL;
+ struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
rte_spinlock_lock(&rte_eth_dev_cb_lock);
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
continue;
dev_cb = *cb_lst;
+ cb_lst->active = 1;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
dev_cb.cb_arg);
rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ cb_lst->active = 0;
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
}
-
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
+ uint32_t mask;
};
/* local vfta copy */
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
return 0;
}
+static inline void
+igb_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+}
+
static void
igb_identify_hardware(struct rte_eth_dev *dev)
{
rte_intr_callback_register(&(pci_dev->intr_handle),
eth_igb_interrupt_handler, (void *)eth_dev);
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ igb_intr_enable(eth_dev);
+
return 0;
err_late:
PMD_INIT_LOG(DEBUG, ">>");
- igb_intr_disable(hw);
-
/* Power up the phy. Needed to make the link go Up */
e1000_power_up_phy(hw);
e1000_setup_link(hw);
/* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- ret = eth_igb_interrupt_setup(dev);
- if (ret) {
- PMD_INIT_LOG(ERR, "Unable to setup interrupts");
- igb_dev_clear_queues(dev);
- return ret;
- }
- }
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ret = eth_igb_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ igb_intr_enable(dev);
PMD_INIT_LOG(DEBUG, "<<");
}
}
-static void
-igb_intr_disable(struct e1000_hw *hw)
-{
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
-}
/**
* It enables the interrupt mask and then enable the interrupt.
* - On failure, a negative value.
*/
static int
-eth_igb_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
- E1000_WRITE_FLUSH(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ intr->mask |= E1000_ICR_LSC;
return 0;
}
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ igb_intr_disable(hw);
+
/* read-on-clear nic registers here */
icr = E1000_READ_REG(hw, E1000_ICR);
+
+ intr->flags = 0;
if (icr & E1000_ICR_LSC) {
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
}
struct rte_eth_link link;
int ret;
- if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
- return -1;
- intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+ igb_intr_enable(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
- /* set get_link_status to check register later */
- hw->mac.get_link_status = 1;
- ret = eth_igb_link_update(dev, 0);
-
- /* check if link has changed */
- if (ret < 0)
- return 0;
-
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- if (link.link_status) {
- PMD_INIT_LOG(INFO,
- " Port %d: Link Up - speed %u Mbps - %s\n",
- dev->data->port_id, (unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
- "full-duplex" : "half-duplex");
- } else {
- PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
- dev->data->port_id);
- }
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- if (link.link_status) {
- /* enable Tx/Rx */
- tctl |= E1000_TCTL_EN;
- rctl |= E1000_RCTL_EN;
- } else {
- /* disable Tx/Rx */
- tctl &= ~E1000_TCTL_EN;
- rctl &= ~E1000_RCTL_EN;
+ if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_igb_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO,
+ " Port %d: Link Up - speed %u Mbps - %s\n",
+ dev->data->port_id, (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
+ dev->data->port_id);
+ }
+ PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain,
+ dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid,
+ dev->pci_dev->addr.function);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (link.link_status) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
return 0;
}
eth_igb_interrupt_get_status(dev);
eth_igb_interrupt_action(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
static int
PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
/* Clear interrupt mask to stop from interrupts being generated */
- E1000_WRITE_REG(hw, E1000_EIMC, ~0);
+ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
E1000_WRITE_FLUSH(hw);
}
eth_igb_infos_get(dev, &dev_info);
/* Clear interrupt mask to stop from interrupts being generated */
- E1000_WRITE_REG(hw, E1000_EIMC, ~0);
+ igbvf_intr_disable(hw);
/* Clear any pending interrupts, flush previous writes */
E1000_READ_REG(hw, E1000_EICR);
static int
igbvf_dev_start(struct rte_eth_dev *dev)
{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
+ hw->mac.ops.reset_hw(hw);
+
/* Set all vfta */
igbvf_set_vfta_all(dev,1);
static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
}
}
+static inline void
+ixgbe_enable_intr(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/*
* This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
*/
}
hw->fc.send_xon = 1;
- ixgbe_disable_intr(hw);
-
/* Make sure we have a good EEPROM before we read from it */
diag = ixgbe_validate_eeprom_checksum(hw, &csum);
if (diag != IXGBE_SUCCESS) {
return -EIO;
}
+ /* disable interrupt */
+ ixgbe_disable_intr(hw);
+
/* pick up the PCI bus settings for reporting later */
ixgbe_get_bus_info(hw);
rte_intr_callback_register(&(pci_dev->intr_handle),
ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ ixgbe_enable_intr(eth_dev);
+
return 0;
}
/* reinitialize adapter
* this calls reset and start */
ixgbe_init_hw(hw);
+ hw->mac.ops.start_hw(hw);
/* initialize transmission unit */
ixgbe_dev_tx_init(dev);
-
+
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbe_dev_rx_init(dev);
if (err) {
goto error;
/* check if lsc interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- err = ixgbe_dev_interrupt_setup(dev);
- if (err)
- goto error;
- }
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ixgbe_dev_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ ixgbe_enable_intr(dev);
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
ixgbe_dev_link_status_print(dev);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ intr->mask |= IXGBE_EICR_LSC;
return 0;
}
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
+ /* clear all cause mask */
+ ixgbe_disable_intr(hw);
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
- PMD_INIT_LOG(INFO, "eicr %x", eicr);
+ PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+ intr->flags = 0;
if (eicr & IXGBE_EICR_LSC) {
/* set flag for async link update */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int64_t timeout;
+ struct rte_eth_link link;
+ int intr_enable_delay = false;
- if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
- return -1;
+ PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ /* get the link status before link update, for predicting later */
+ memset(&link, 0, sizeof(link));
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+ ixgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ ixgbe_dev_link_status_print(dev);
+
+ intr_enable_delay = true;
+ }
+
+ if (intr_enable_delay) {
+ if (rte_eal_alarm_set(timeout * 1000,
+ ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ } else {
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
}
- ixgbe_dev_link_update(dev, 0);
+
return 0;
}
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- IXGBE_READ_REG(hw, IXGBE_EICR);
- ixgbe_dev_interrupt_action(dev);
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- rte_intr_enable(&(dev->pci_dev->intr_handle));
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
- IXGBE_WRITE_FLUSH(hw);
ixgbe_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler\n");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
}
/**
ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
void *param)
{
- int64_t timeout;
- struct rte_eth_link link;
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct ixgbe_interrupt *intr =
- IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- /* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
ixgbe_dev_interrupt_get_status(dev);
ixgbe_dev_interrupt_action(dev);
-
- if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
- return;
-
- /* likely to up */
- if (!link.link_status)
- /* handle it 1 sec later, wait it being stable */
- timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
- /* likely to down */
- else
- /* handle it 4 sec later, wait it being stable */
- timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
-
- ixgbe_dev_link_status_print(dev);
- if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, param) < 0)
- PMD_INIT_LOG(ERR, "Error setting alarm");
}
static int
static int
ixgbevf_dev_start(struct rte_eth_dev *dev)
{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err, mask = 0;
PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+ hw->mac.ops.reset_hw(hw);
+
ixgbevf_dev_tx_init(dev);
/* This can fail when allocating mbufs for descriptor rings */
/* structure for interrupt relative data */
struct ixgbe_interrupt {
uint32_t flags;
+ uint32_t mask;
};
struct ixgbe_stat_mapping_registers {