unsigned int dev_port_prev = ~0u;
char match[IF_NAMESIZE] = "";
+ assert(priv);
+ assert(priv->sh);
{
- MKSTR(path, "%s/device/net", priv->ibdev_path);
+ MKSTR(path, "%s/device/net", priv->sh->ibdev_path);
dir = opendir(path);
if (dir == NULL) {
continue;
MKSTR(path, "%s/device/net/%s/%s",
- priv->ibdev_path, name,
+ priv->sh->ibdev_path, name,
(dev_type ? "dev_id" : "dev_port"));
file = fopen(path, "rb");
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int ifindex =
priv->nl_socket_rdma >= 0 ?
- mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
+ mlx5_nl_ifindex(priv->nl_socket_rdma,
+ priv->sh->ibdev_name,
+ priv->ibv_port) : 0;
if (!ifindex) {
if (!priv->representor)
* Since we need one CQ per QP, the limit is the minimum number
* between the two values.
*/
- max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
- priv->device_attr.orig_attr.max_qp);
+ max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
+ priv->sh->device_attr.orig_attr.max_qp);
/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
if (max >= 65535)
max = 65535;
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_device_attr *attr = &priv->device_attr.orig_attr;
+ struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
if (fw_size < size)
}
/**
- * Device status handler.
+ * Handle shared asynchronous events the NIC (removal event
+ * and link status change). Supports multiport IB device.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param events
- * Pointer to event flags holder.
- *
- * @return
- * Events bitmap of callback process which can be called immediately.
+ * @param cb_arg
+ * Callback argument.
*/
-static uint32_t
-mlx5_dev_status_handler(struct rte_eth_dev *dev)
+void
+mlx5_dev_interrupt_handler(void *cb_arg)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = cb_arg;
struct ibv_async_event event;
- uint32_t ret = 0;
- if (mlx5_link_update(dev, 0) == -EAGAIN) {
- usleep(0);
- return 0;
- }
- /* Read all message and acknowledge them. */
+ /* Read all message from the IB device and acknowledge them. */
for (;;) {
- if (mlx5_glue->get_async_event(priv->ctx, &event))
+ struct rte_eth_dev *dev;
+ uint32_t tmp;
+
+ if (mlx5_glue->get_async_event(sh->ctx, &event))
break;
+ /* Retrieve and check IB port index. */
+ tmp = (uint32_t)event.element.port_num;
+ assert(tmp && (tmp <= sh->max_port));
+ if (!tmp ||
+ tmp > sh->max_port ||
+ sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
+ /*
+ * Invalid IB port index or no handler
+ * installed for this port.
+ */
+ mlx5_glue->ack_async_event(&event);
+ continue;
+ }
+ /* Retrieve ethernet device descriptor. */
+ tmp = sh->port[tmp - 1].ih_port_id;
+ dev = &rte_eth_devices[tmp];
+ tmp = 0;
+ assert(dev);
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
- event.event_type == IBV_EVENT_PORT_ERR) &&
- (dev->data->dev_conf.intr_conf.lsc == 1))
- ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
- else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
- dev->data->dev_conf.intr_conf.rmv == 1)
- ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
- else
- DRV_LOG(DEBUG,
- "port %u event type %d on not handled",
- dev->data->port_id, event.event_type);
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ dev->data->dev_conf.intr_conf.lsc) {
+ mlx5_glue->ack_async_event(&event);
+ if (mlx5_link_update(dev, 0) == -EAGAIN) {
+ usleep(0);
+ continue;
+ }
+ _rte_eth_dev_callback_process
+ (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ continue;
+ }
+ if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ dev->data->dev_conf.intr_conf.rmv) {
+ mlx5_glue->ack_async_event(&event);
+ _rte_eth_dev_callback_process
+ (dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+ continue;
+ }
+ DRV_LOG(DEBUG,
+ "port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
mlx5_glue->ack_async_event(&event);
}
- return ret;
}
/**
- * Handle interrupts from the NIC.
+ * Uninstall shared asynchronous device events handler.
+ * This function is implemeted to support event sharing
+ * between multiple ports of single IB device.
*
- * @param[in] intr_handle
- * Interrupt handler.
- * @param cb_arg
- * Callback argument.
+ * @param dev
+ * Pointer to Ethernet device.
*/
-void
-mlx5_dev_interrupt_handler(void *cb_arg)
+static void
+mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
{
- struct rte_eth_dev *dev = cb_arg;
- uint32_t events;
-
- events = mlx5_dev_status_handler(dev);
- if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
- if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+ pthread_mutex_lock(&sh->intr_mutex);
+ assert(priv->ibv_port);
+ assert(priv->ibv_port <= sh->max_port);
+ assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
+ goto exit;
+ assert(sh->port[priv->ibv_port - 1].ih_port_id ==
+ (uint32_t)dev->data->port_id);
+ assert(sh->intr_cnt);
+ sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+ if (!sh->intr_cnt || --sh->intr_cnt)
+ goto exit;
+ rte_intr_callback_unregister(&sh->intr_handle,
+ mlx5_dev_interrupt_handler, sh);
+ sh->intr_handle.fd = 0;
+ sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+exit:
+ pthread_mutex_unlock(&sh->intr_mutex);
}
/**
- * Handle interrupts from the socket.
+ * Install shared asyncronous device events handler.
+ * This function is implemeted to support event sharing
+ * between multiple ports of single IB device.
*
- * @param cb_arg
- * Callback argument.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-mlx5_dev_handler_socket(void *cb_arg)
+mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
{
- struct rte_eth_dev *dev = cb_arg;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ int ret;
+ int flags;
- mlx5_socket_handle(dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+ pthread_mutex_lock(&sh->intr_mutex);
+ assert(priv->ibv_port);
+ assert(priv->ibv_port <= sh->max_port);
+ assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
+ /* The handler is already installed for this port. */
+ assert(sh->intr_cnt);
+ goto exit;
+ }
+ sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id;
+ if (sh->intr_cnt) {
+ sh->intr_cnt++;
+ goto exit;
+ }
+ /* No shared handler installed. */
+ assert(sh->ctx->async_fd > 0);
+ flags = fcntl(sh->ctx->async_fd, F_GETFL);
+ ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
+ DRV_LOG(INFO, "failed to change file descriptor"
+ " async event queue");
+ /* Indicate there will be no interrupts. */
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+ goto exit;
+ }
+ sh->intr_handle.fd = sh->ctx->async_fd;
+ sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&sh->intr_handle,
+ mlx5_dev_interrupt_handler, sh);
+ sh->intr_cnt++;
+exit:
+ pthread_mutex_unlock(&sh->intr_mutex);
}
/**
void
mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (dev->data->dev_conf.intr_conf.lsc ||
- dev->data->dev_conf.intr_conf.rmv)
- rte_intr_callback_unregister(&priv->intr_handle,
- mlx5_dev_interrupt_handler, dev);
- if (priv->primary_socket)
- rte_intr_callback_unregister(&priv->intr_handle_socket,
- mlx5_dev_handler_socket, dev);
- priv->intr_handle.fd = 0;
- priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
- priv->intr_handle_socket.fd = 0;
- priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
+ mlx5_dev_shared_handler_uninstall(dev);
}
/**
void
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- int ret;
- int flags;
-
- assert(priv->ctx->async_fd > 0);
- flags = fcntl(priv->ctx->async_fd, F_GETFL);
- ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (ret) {
- DRV_LOG(INFO,
- "port %u failed to change file descriptor async event"
- " queue",
- dev->data->port_id);
- dev->data->dev_conf.intr_conf.lsc = 0;
- dev->data->dev_conf.intr_conf.rmv = 0;
- }
- if (dev->data->dev_conf.intr_conf.lsc ||
- dev->data->dev_conf.intr_conf.rmv) {
- priv->intr_handle.fd = priv->ctx->async_fd;
- priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
- rte_intr_callback_register(&priv->intr_handle,
- mlx5_dev_interrupt_handler, dev);
- }
- ret = mlx5_socket_init(dev);
- if (ret)
- DRV_LOG(ERR, "port %u cannot initialise socket: %s",
- dev->data->port_id, strerror(rte_errno));
- else if (priv->primary_socket) {
- priv->intr_handle_socket.fd = priv->primary_socket;
- priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
- rte_intr_callback_register(&priv->intr_handle_socket,
- mlx5_dev_handler_socket, dev);
- }
+ mlx5_dev_shared_handler_install(dev);
}
/**
struct ibv_device_attr device_attr;
struct mlx5_priv *priv = dev->data->dev_private;
- if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+ if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
return 1;
return 0;
}
uint16_t id;
unsigned int n = 0;
- RTE_ETH_FOREACH_DEV(id) {
- struct rte_eth_dev *ldev = &rte_eth_devices[id];
-
- if (ldev->device != dev)
- continue;
+ RTE_ETH_FOREACH_DEV_OF(id, dev) {
if (n < port_list_n)
port_list[n] = id;
n++;
.port_name = 0,
.switch_id = 0,
};
+ DIR *dir;
bool port_name_set = false;
bool port_switch_id_set = false;
+ bool device_dir = false;
char c;
int ret;
ifname);
MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
ifname);
+ MKSTR(pci_device, "/sys/class/net/%s/device",
+ ifname);
file = fopen(phys_port_name, "rb");
if (file != NULL) {
fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
c == '\n';
fclose(file);
- data.master = port_switch_id_set && !port_name_set;
- data.representor = port_switch_id_set && port_name_set;
+ dir = opendir(pci_device);
+ if (dir != NULL) {
+ closedir(dir);
+ device_dir = true;
+ }
+ data.master = port_switch_id_set && (!port_name_set || device_dir);
+ data.representor = port_switch_id_set && port_name_set && !device_dir;
*info = data;
+ assert(!(data.master && data.representor));
+ if (data.master && data.representor) {
+ DRV_LOG(ERR, "ifindex %u device is recognized as master"
+ " and as representor", ifindex);
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
return 0;
}