#include <linux/sockios.h>
#include <linux/version.h>
#include <fcntl.h>
+#include <stdalign.h>
#include <rte_atomic.h>
#include <rte_ethdev.h>
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
-#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- __extension__ struct {
- struct ethtool_link_settings edata;
- uint32_t link_mode_data[3 *
- ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
- } ecmd;
-
+ struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- memset(&ecmd, 0, sizeof(ecmd));
- ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
- ifr.ifr_data = (void *)&ecmd;
+ ifr.ifr_data = (void *)&gcmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+ gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+ alignas(struct ethtool_link_settings)
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+ struct ethtool_link_settings *ecmd = (void *)data;
+
+ *ecmd = gcmd;
+ ifr.ifr_data = (void *)ecmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = ecmd.edata.speed;
- sc = ecmd.edata.link_mode_masks[0] |
- ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
+ dev_link.link_speed = ecmd->speed;
+ sc = ecmd->link_mode_masks[0] |
+ ((uint64_t)ecmd->link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
}
/**
- * Link status handler.
+ * Update the link status.
*
* @param priv
* Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
*
* @return
- * Nonzero if the callback process can be called immediately.
+ * Zero if the callback process can be called immediately.
*/
static int
-priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
+priv_link_status_update(struct priv *priv)
+{
+ struct rte_eth_link *link = &priv->dev->data->dev_link;
+
+ mlx5_link_update(priv->dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ /*
+ * Inconsistent status. Event likely occurred before the
+ * kernel netdevice exposes the new status.
+ */
+ if (!priv->pending_alarm) {
+ priv->pending_alarm = 1;
+ rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
+ mlx5_dev_link_status_handler,
+ priv->dev);
+ }
+ return 1;
+ } else if (unlikely(priv->pending_alarm)) {
+ /* Link interrupt occurred while alarm is already scheduled. */
+ priv->pending_alarm = 0;
+ rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
+ }
+ return 0;
+}
+
+/**
+ * Device status handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param events
+ * Pointer to event flags holder.
+ *
+ * @return
+ * Events bitmap of callback process which can be called immediately.
+ */
+static uint32_t
+priv_dev_status_handler(struct priv *priv)
{
struct ibv_async_event event;
- struct rte_eth_link *link = &dev->data->dev_link;
- int ret = 0;
+ uint32_t ret = 0;
/* Read all message and acknowledge them. */
for (;;) {
if (ibv_get_async_event(priv->ctx, &event))
break;
-
- if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
- event.event_type != IBV_EVENT_PORT_ERR)
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
- mlx5_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
- if (!priv->pending_alarm) {
- /* Inconsistent status, check again later. */
- priv->pending_alarm = 1;
- rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
- mlx5_dev_link_status_handler,
- dev);
- }
- } else {
- ret = 1;
- }
+ if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
+ if (priv_link_status_update(priv))
+ ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
return ret;
}
priv_lock(priv);
assert(priv->pending_alarm == 1);
priv->pending_alarm = 0;
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_link_status_update(priv);
priv_unlock(priv);
- if (ret)
+ if (!ret)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
}
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
- int ret;
+ uint32_t events;
priv_lock(priv);
- ret = priv_dev_link_status_handler(priv, dev);
+ events = priv_dev_status_handler(priv);
priv_unlock(priv);
- if (ret)
+ if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
+ if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
+ NULL);
}
/**
void
priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
{
- if (!dev->data->dev_conf.intr_conf.lsc)
+ if (!dev->data->dev_conf.intr_conf.lsc &&
+ !dev->data->dev_conf.intr_conf.rmv)
return;
rte_intr_callback_unregister(&priv->intr_handle,
mlx5_dev_interrupt_handler,
{
int rc, flags;
- if (!dev->data->dev_conf.intr_conf.lsc)
+ if (!dev->data->dev_conf.intr_conf.lsc &&
+ !dev->data->dev_conf.intr_conf.rmv)
return;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
if (rc < 0) {
INFO("failed to change file descriptor async event queue");
dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
} else {
priv->intr_handle.fd = priv->ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;