X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=eddf888e109adf666a142cc44822c52c57c2843a;hb=7b4f1e6bd367;hp=d1a70fca3618899ec2d7d9157e3ee6e791da827e;hpb=0333b2f584d95577681ea88e4238be6cb4369569;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index d1a70fca36..eddf888e10 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -33,9 +34,12 @@ #include #include #include +#include + +#include +#include #include "mlx5.h" -#include "mlx5_glue.h" #include "mlx5_rxtx.h" #include "mlx5_utils.h" @@ -223,10 +227,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) assert(priv); assert(priv->sh); - ifindex = priv->nl_socket_rdma >= 0 ? - mlx5_nl_ifindex(priv->nl_socket_rdma, - priv->sh->ibdev_name, - priv->ibv_port) : 0; + ifindex = mlx5_ifindex(dev); if (!ifindex) { if (!priv->representor) return mlx5_get_master_ifname(priv->sh->ibdev_path, @@ -252,14 +253,14 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) unsigned int mlx5_ifindex(const struct rte_eth_dev *dev) { - char ifname[IF_NAMESIZE]; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int ifindex; - if (mlx5_get_ifname(dev, &ifname)) - return 0; - ifindex = if_nametoindex(ifname); + assert(priv); + assert(priv->if_index); + ifindex = priv->if_index; if (!ifindex) - rte_errno = errno; + rte_errno = ENXIO; return ifindex; } @@ -384,9 +385,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; unsigned int txqs_n = dev->data->nb_tx_queues; - unsigned int i; - unsigned int j; - unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; int ret = 0; @@ -408,6 +406,10 @@ mlx5_dev_configure(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + memcpy(priv->rss_conf.rss_key, use_app_rss_key ? dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : @@ -428,32 +430,93 @@ mlx5_dev_configure(struct rte_eth_dev *dev) rte_errno = EINVAL; return -rte_errno; } - if (rxqs_n == priv->rxqs_n) - return 0; + if (rxqs_n != priv->rxqs_n) { + DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", + dev->data->port_id, priv->rxqs_n, rxqs_n); + priv->rxqs_n = rxqs_n; + } + priv->skip_default_rss_reta = 0; + ret = mlx5_proc_priv_init(dev); + if (ret) + return ret; + return 0; +} + +/** + * Configure default RSS reta. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int i; + unsigned int j; + unsigned int reta_idx_n; + int ret = 0; + unsigned int *rss_queue_arr = NULL; + unsigned int rss_queue_n = 0; + + if (priv->skip_default_rss_reta) + return ret; + rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0); + if (!rss_queue_arr) { + DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)", + dev->data->port_id, rxqs_n); + rte_errno = ENOMEM; + return -rte_errno; + } + for (i = 0, j = 0; i < rxqs_n; i++) { + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + + rxq_data = (*priv->rxqs)[i]; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) + rss_queue_arr[j++] = i; + } + rss_queue_n = j; + if (rss_queue_n > priv->config.ind_table_max_size) { + DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", + dev->data->port_id, rss_queue_n); + rte_errno = EINVAL; + rte_free(rss_queue_arr); + return -rte_errno; + } DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", dev->data->port_id, priv->rxqs_n, rxqs_n); priv->rxqs_n = rxqs_n; - /* If the requested number of RX queues is not a power of two, use the - * maximum indirection table size for better balancing. - * The result is always rounded to the next power of two. */ - reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? - priv->config.ind_table_max_size : - rxqs_n)); + /* + * If the requested number of RX queues is not a power of two, + * use the maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. + */ + reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ? + priv->config.ind_table_max_size : + rss_queue_n)); ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); - if (ret) + if (ret) { + rte_free(rss_queue_arr); return ret; - /* When the number of RX queues is not a power of two, the remaining - * table entries are padded with reused WQs and hashes are not spread - * uniformly. */ + } + /* + * When the number of RX queues is not a power of two, + * the remaining table entries are padded with reused WQs + * and hashes are not spread uniformly. + */ for (i = 0, j = 0; (i != reta_idx_n); ++i) { - (*priv->reta_idx)[i] = j; - if (++j == rxqs_n) + (*priv->reta_idx)[i] = rss_queue_arr[j]; + if (++j == rss_queue_n) j = 0; } - ret = mlx5_proc_priv_init(dev); - if (ret) - return ret; - return 0; + rte_free(rss_queue_arr); + return ret; } /** @@ -472,8 +535,8 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) /* Minimum CPU utilization. */ info->default_rxportconf.ring_size = 256; info->default_txportconf.ring_size = 256; - info->default_rxportconf.burst_size = 64; - info->default_txportconf.burst_size = 64; + info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST; + info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST; if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { info->default_rxportconf.nb_queues = 16; info->default_txportconf.nb_queues = 16; @@ -495,6 +558,42 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) } } +/** + * Sets tx mbuf limiting parameters. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] info + * Info structure output buffer. + */ +static void +mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + unsigned int inlen; + uint16_t nb_max; + + inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ? + MLX5_SEND_DEF_INLINE_LEN : + (unsigned int)config->txq_inline_max; + assert(config->txq_inline_min >= 0); + inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min); + inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE * 2); + nb_max = (MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE - + inlen) / MLX5_WSEG_SIZE; + info->tx_desc_lim.nb_seg_max = nb_max; + info->tx_desc_lim.nb_mtu_seg_max = nb_max; +} + /** * DPDK callback to get information about the device. * @@ -503,17 +602,17 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) * @param[out] info * Info structure output buffer. */ -void +int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; unsigned int max; - char ifname[IF_NAMESIZE]; /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; + info->max_lro_pkt_size = MLX5_MAX_LRO_SIZE; /* * Since we need one CQ per QP, the limit is the minimum number * between the two values. @@ -530,28 +629,46 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); - if (mlx5_get_ifname(dev, &ifname) == 0) - info->if_index = if_nametoindex(ifname); + info->if_index = mlx5_ifindex(dev); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; mlx5_set_default_params(dev, info); + mlx5_set_txlimit_params(dev, info); info->switch_info.name = dev->data->name; info->switch_info.domain_id = priv->domain_id; info->switch_info.port_id = priv->representor_id; if (priv->representor) { - unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); - uint16_t port_id[i]; + uint16_t port_id; - i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); - while (i--) { + if (priv->pf_bond >= 0) { + /* + * Switch port ID is opaque value with driver defined + * format. Push the PF index in bonding configurations + * in upper four bits of port ID. If we get too many + * representors (more than 4K) or PFs (more than 15) + * this approach must be reconsidered. + */ + if ((info->switch_info.port_id >> + MLX5_PORT_ID_BONDING_PF_SHIFT) || + priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) { + DRV_LOG(ERR, "can't update switch port ID" + " for bonding device"); + assert(false); + return -ENODEV; + } + info->switch_info.port_id |= + priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT; + } + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { struct mlx5_priv *opriv = - rte_eth_devices[port_id[i]].data->dev_private; + rte_eth_devices[port_id].data->dev_private; if (!opriv || opriv->representor || + opriv->sh != priv->sh || opriv->domain_id != priv->domain_id) continue; /* @@ -562,6 +679,37 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) break; } } + return 0; +} + +/** + * Get device current raw clock counter + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] time + * Current raw clock counter of the device. + * + * @return + * 0 if the clock has correctly been read + * The value of errno in case of error + */ +int +mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_values_ex values; + int err = 0; + + values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; + err = mlx5_glue->query_rt_values_ex(ctx, &values); + if (err != 0) { + DRV_LOG(WARNING, "Could not query the clock !"); + return err; + } + *clock = values.raw_clock.tv_nsec; + return 0; } /** @@ -647,11 +795,13 @@ mlx5_find_master_dev(struct rte_eth_dev *dev) priv = dev->data->dev_private; domain_id = priv->domain_id; assert(priv->representor); - RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { - priv = rte_eth_devices[port_id].data->dev_private; - if (priv && - priv->master && - priv->domain_id == domain_id) + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + if (opriv && + opriv->master && + opriv->domain_id == domain_id && + opriv->sh == priv->sh) return &rte_eth_devices[port_id]; } return NULL; @@ -840,7 +990,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, dev->data->port_id, strerror(rte_errno)); return ret; } - dev_link.link_speed = ecmd->speed; + dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? ETH_SPEED_NUM_NONE : + ecmd->speed; sc = ecmd->link_mode_masks[0] | ((uint64_t)ecmd->link_mode_masks[1] << 32); priv->link_speed_capa = 0; @@ -909,15 +1060,16 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) int ret; struct rte_eth_link dev_link; time_t start_time = time(NULL); + int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; do { ret = mlx5_link_update_unlocked_gs(dev, &dev_link); - if (ret) + if (ret == -ENOTSUP) ret = mlx5_link_update_unlocked_gset(dev, &dev_link); if (ret == 0) break; /* Handle wait to complete situation. */ - if (wait_to_complete && ret == -EAGAIN) { + if ((wait_to_complete || retry) && ret == -EAGAIN) { if (abs((int)difftime(time(NULL), start_time)) < MLX5_LINK_STATUS_TIMEOUT) { usleep(0); @@ -929,7 +1081,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) } else if (ret < 0) { return ret; } - } while (wait_to_complete); + } while (wait_to_complete || retry-- > 0); ret = !!memcmp(&dev->data->dev_link, &dev_link, sizeof(struct rte_eth_link)); dev->data->dev_link = dev_link; @@ -1060,10 +1212,10 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } /** - * Get PCI information from struct ibv_device. + * Get PCI information by sysfs device path. * - * @param device - * Pointer to Ethernet device structure. + * @param dev_path + * Pointer to device sysfs folder name. * @param[out] pci_addr * PCI bus address output buffer. * @@ -1071,12 +1223,12 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, - struct rte_pci_addr *pci_addr) +mlx5_dev_to_pci_addr(const char *dev_path, + struct rte_pci_addr *pci_addr) { FILE *file; char line[32]; - MKSTR(path, "%s/device/uevent", device->ibdev_path); + MKSTR(path, "%s/device/uevent", dev_path); file = fopen(path, "rb"); if (file == NULL) { @@ -1111,6 +1263,35 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, return 0; } +/** + * Handle asynchronous removal event for entire multiport device. + * + * @param sh + * Infiniband device shared context. + */ +static void +mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh) +{ + uint32_t i; + + for (i = 0; i < sh->max_port; ++i) { + struct rte_eth_dev *dev; + + if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { + /* + * Or not existing port either no + * handler installed for this port. + */ + continue; + } + dev = &rte_eth_devices[sh->port[i].ih_port_id]; + assert(dev); + if (dev->data->dev_conf.intr_conf.rmv) + _rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_RMV, NULL); + } +} + /** * Handle shared asynchronous events the NIC (removal event * and link status change). Supports multiport IB device. @@ -1133,21 +1314,46 @@ mlx5_dev_interrupt_handler(void *cb_arg) break; /* Retrieve and check IB port index. */ tmp = (uint32_t)event.element.port_num; - assert(tmp && (tmp <= sh->max_port)); - if (!tmp || - tmp > sh->max_port || - sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { + if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { /* - * Invalid IB port index or no handler - * installed for this port. + * The DEVICE_FATAL event is called once for + * entire device without port specifying. + * We should notify all existing ports. */ mlx5_glue->ack_async_event(&event); + mlx5_dev_interrupt_device_fatal(sh); + continue; + } + assert(tmp && (tmp <= sh->max_port)); + if (!tmp) { + /* Unsupported devive level event. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "unsupported common event (type %d)", + event.event_type); + continue; + } + if (tmp > sh->max_port) { + /* Invalid IB port index. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "cannot handle an event (type %d)" + "due to invalid IB port index (%u)", + event.event_type, tmp); + continue; + } + if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { + /* No handler installed. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "cannot handle an event (type %d)" + "due to no handler installed for port %u", + event.event_type, tmp); continue; } /* Retrieve ethernet device descriptor. */ tmp = sh->port[tmp - 1].ih_port_id; dev = &rte_eth_devices[tmp]; - tmp = 0; assert(dev); if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && @@ -1161,23 +1367,119 @@ mlx5_dev_interrupt_handler(void *cb_arg) (dev, RTE_ETH_EVENT_INTR_LSC, NULL); continue; } - if (event.event_type == IBV_EVENT_DEVICE_FATAL && - dev->data->dev_conf.intr_conf.rmv) { - mlx5_glue->ack_async_event(&event); - _rte_eth_dev_callback_process - (dev, RTE_ETH_EVENT_INTR_RMV, NULL); - continue; - } DRV_LOG(DEBUG, - "port %u event type %d on not handled", + "port %u cannot handle an unknown event (type %d)", dev->data->port_id, event.event_type); mlx5_glue->ack_async_event(&event); } } +/* + * Unregister callback handler safely. The handler may be active + * while we are trying to unregister it, in this case code -EAGAIN + * is returned by rte_intr_callback_unregister(). This routine checks + * the return code and tries to unregister handler again. + * + * @param handle + * interrupt handle + * @param cb_fn + * pointer to callback routine + * @cb_arg + * opaque callback parameter + */ +void +mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, + rte_intr_callback_fn cb_fn, void *cb_arg) +{ + /* + * Try to reduce timeout management overhead by not calling + * the timer related routines on the first iteration. If the + * unregistering succeeds on first call there will be no + * timer calls at all. + */ + uint64_t twait = 0; + uint64_t start = 0; + + do { + int ret; + + ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg); + if (ret >= 0) + return; + if (ret != -EAGAIN) { + DRV_LOG(INFO, "failed to unregister interrupt" + " handler (error: %d)", ret); + assert(false); + return; + } + if (twait) { + struct timespec onems; + + /* Wait one millisecond and try again. */ + onems.tv_sec = 0; + onems.tv_nsec = NS_PER_S / MS_PER_S; + nanosleep(&onems, 0); + /* Check whether one second elapsed. */ + if ((rte_get_timer_cycles() - start) <= twait) + continue; + } else { + /* + * We get the amount of timer ticks for one second. + * If this amount elapsed it means we spent one + * second in waiting. This branch is executed once + * on first iteration. + */ + twait = rte_get_timer_hz(); + assert(twait); + } + /* + * Timeout elapsed, show message (once a second) and retry. + * We have no other acceptable option here, if we ignore + * the unregistering return code the handler will not + * be unregistered, fd will be closed and we may get the + * crush. Hanging and messaging in the loop seems not to be + * the worst choice. + */ + DRV_LOG(INFO, "Retrying to unregister interrupt handler"); + start = rte_get_timer_cycles(); + } while (true); +} + +/** + * Handle DEVX interrupts from the NIC. + * This function is probably called from the DPDK host thread. + * + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler_devx(void *cb_arg) +{ +#ifndef HAVE_IBV_DEVX_ASYNC + (void)cb_arg; + return; +#else + struct mlx5_ibv_shared *sh = cb_arg; + union { + struct mlx5dv_devx_async_cmd_hdr cmd_resp; + uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) + + sizeof(struct mlx5dv_devx_async_cmd_hdr)]; + } out; + uint8_t *buf = out.buf + sizeof(out.cmd_resp); + + while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, + &out.cmd_resp, + sizeof(out.buf))) + mlx5_flow_async_pool_query_handle + (sh, (uint64_t)out.cmd_resp.wr_id, + mlx5_devx_get_out_command_status(buf)); +#endif /* HAVE_IBV_DEVX_ASYNC */ +} + /** * Uninstall shared asynchronous device events handler. - * This function is implemeted to support event sharing + * This function is implemented to support event sharing * between multiple ports of single IB device. * * @param dev @@ -1203,7 +1505,7 @@ mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; if (!sh->intr_cnt || --sh->intr_cnt) goto exit; - rte_intr_callback_unregister(&sh->intr_handle, + mlx5_intr_callback_unregister(&sh->intr_handle, mlx5_dev_interrupt_handler, sh); sh->intr_handle.fd = 0; sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; @@ -1212,7 +1514,7 @@ exit: } /** - * Install shared asyncronous device events handler. + * Uninstall devx shared asynchronous device events handler. * This function is implemeted to support event sharing * between multiple ports of single IB device. * @@ -1220,6 +1522,48 @@ exit: * Pointer to Ethernet device. */ static void +mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + assert(priv->ibv_port); + assert(priv->ibv_port <= sh->max_port); + assert(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS) + goto exit; + assert(sh->port[priv->ibv_port - 1].devx_ih_port_id == + (uint32_t)dev->data->port_id); + sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; + if (!sh->devx_intr_cnt || --sh->devx_intr_cnt) + goto exit; + if (sh->intr_handle_devx.fd) { + rte_intr_callback_unregister(&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, + sh); + sh->intr_handle_devx.fd = 0; + sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN; + } + if (sh->devx_comp) { + mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); + sh->devx_comp = NULL; + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Install shared asynchronous device events handler. + * This function is implemented to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; @@ -1238,8 +1582,9 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) assert(sh->intr_cnt); goto exit; } - sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id; if (sh->intr_cnt) { + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; sh->intr_cnt++; goto exit; } @@ -1248,19 +1593,81 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) flags = fcntl(sh->ctx->async_fd, F_GETFL); ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); if (ret) { - DRV_LOG(INFO, "failed to change file descriptor" - " async event queue"); + DRV_LOG(INFO, "failed to change file descriptor async event" + " queue"); /* Indicate there will be no interrupts. */ dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; - sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; + } else { + sh->intr_handle.fd = sh->ctx->async_fd; + sh->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&sh->intr_handle, + mlx5_dev_interrupt_handler, sh); + sh->intr_cnt++; + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Install devx shared asyncronous device events handler. + * This function is implemeted to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + assert(priv->ibv_port); + assert(priv->ibv_port <= sh->max_port); + assert(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) { + /* The handler is already installed for this port. */ + assert(sh->devx_intr_cnt); + goto exit; + } + if (sh->devx_intr_cnt) { + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; goto exit; } - sh->intr_handle.fd = sh->ctx->async_fd; - sh->intr_handle.type = RTE_INTR_HANDLE_EXT; - rte_intr_callback_register(&sh->intr_handle, - mlx5_dev_interrupt_handler, sh); - sh->intr_cnt++; + if (priv->config.devx) { +#ifndef HAVE_IBV_DEVX_ASYNC + goto exit; +#else + sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); + if (sh->devx_comp) { + int flags = fcntl(sh->devx_comp->fd, F_GETFL); + int ret = fcntl(sh->devx_comp->fd, F_SETFL, + flags | O_NONBLOCK); + + if (ret) { + DRV_LOG(INFO, "failed to change file descriptor" + " devx async event queue"); + } else { + sh->intr_handle_devx.fd = sh->devx_comp->fd; + sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register + (&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, sh); + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; + } + } +#endif /* HAVE_IBV_DEVX_ASYNC */ + } exit: pthread_mutex_unlock(&sh->intr_mutex); } @@ -1289,6 +1696,30 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) mlx5_dev_shared_handler_install(dev); } +/** + * Devx uninstall interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_uninstall(dev); +} + +/** + * Devx install interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_install(dev); +} + /** * DPDK callback to bring the link DOWN. * @@ -1319,64 +1750,6 @@ mlx5_set_link_up(struct rte_eth_dev *dev) return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); } -/** - * Configure the TX function to use. - * - * @param dev - * Pointer to private data structure. - * - * @return - * Pointer to selected Tx burst function. - */ -eth_tx_burst_t -mlx5_select_tx_function(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; - struct mlx5_dev_config *config = &priv->config; - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_IP_TNL_TSO | - DEV_TX_OFFLOAD_UDP_TNL_TSO)); - int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | - DEV_TX_OFFLOAD_UDP_TNL_TSO | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); - int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); - - assert(priv != NULL); - /* Select appropriate TX function. */ - if (vlan_insert || tso || swp) - return tx_pkt_burst; - if (config->mps == MLX5_MPW_ENHANCED) { - if (mlx5_check_vec_tx_support(dev) > 0) { - if (mlx5_check_raw_vec_tx_support(dev) > 0) - tx_pkt_burst = mlx5_tx_burst_raw_vec; - else - tx_pkt_burst = mlx5_tx_burst_vec; - DRV_LOG(DEBUG, - "port %u selected enhanced MPW Tx vectorized" - " function", - dev->data->port_id); - } else { - tx_pkt_burst = mlx5_tx_burst_empw; - DRV_LOG(DEBUG, - "port %u selected enhanced MPW Tx function", - dev->data->port_id); - } - } else if (config->mps && (config->txq_inline > 0)) { - tx_pkt_burst = mlx5_tx_burst_mpw_inline; - DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", - dev->data->port_id); - } else if (config->mps) { - tx_pkt_burst = mlx5_tx_burst_mpw; - DRV_LOG(DEBUG, "port %u selected MPW Tx function", - dev->data->port_id); - } - return tx_pkt_burst; -} - /** * Configure the RX function to use. * @@ -1423,37 +1796,48 @@ mlx5_is_removed(struct rte_eth_dev *dev) } /** - * Get port ID list of mlx5 instances sharing a common device. + * Get the E-Switch parameters by port id. * - * @param[in] dev - * Device to look for. - * @param[out] port_list - * Result buffer for collected port IDs. - * @param port_list_n - * Maximum number of entries in result buffer. If 0, @p port_list can be - * NULL. + * @param[in] port + * Device port id. + * @param[in] valid + * Device port id is valid, skip check. This flag is useful + * when trials are performed from probing and device is not + * flagged as valid yet (in attaching process). + * @param[out] es_domain_id + * E-Switch domain id. + * @param[out] es_port_id + * The port id of the port in the E-Switch. * * @return - * Number of matching instances regardless of the @p port_list_n - * parameter, 0 if none were found. + * pointer to device private data structure containing data needed + * on success, NULL otherwise and rte_errno is set. */ -unsigned int -mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, - unsigned int port_list_n) +struct mlx5_priv * +mlx5_port_to_eswitch_info(uint16_t port, bool valid) { - uint16_t id; - unsigned int n = 0; + struct rte_eth_dev *dev; + struct mlx5_priv *priv; - RTE_ETH_FOREACH_DEV_OF(id, dev) { - if (n < port_list_n) - port_list[n] = id; - n++; + if (port >= RTE_MAX_ETHPORTS) { + rte_errno = EINVAL; + return NULL; + } + if (!valid && !rte_eth_dev_is_valid_port(port)) { + rte_errno = ENODEV; + return NULL; + } + dev = &rte_eth_devices[port]; + priv = dev->data->dev_private; + if (!(priv->representor || priv->master)) { + rte_errno = EINVAL; + return NULL; } - return n; + return priv; } /** - * Get the E-Switch domain id this port belongs to. + * Get the E-Switch parameters by device instance. * * @param[in] port * Device port id. @@ -1463,34 +1847,20 @@ mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, * The port id of the port in the E-Switch. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * pointer to device private data structure containing data needed + * on success, NULL otherwise and rte_errno is set. */ -int -mlx5_port_to_eswitch_info(uint16_t port, - uint16_t *es_domain_id, uint16_t *es_port_id) +struct mlx5_priv * +mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev) { - struct rte_eth_dev *dev; struct mlx5_priv *priv; - if (port >= RTE_MAX_ETHPORTS) { - rte_errno = EINVAL; - return -rte_errno; - } - if (!rte_eth_dev_is_valid_port(port)) { - rte_errno = ENODEV; - return -rte_errno; - } - dev = &rte_eth_devices[port]; priv = dev->data->dev_private; if (!(priv->representor || priv->master)) { rte_errno = EINVAL; - return -rte_errno; + return NULL; } - if (es_domain_id) - *es_domain_id = priv->domain_id; - if (es_port_id) - *es_port_id = priv->vport_id; - return 0; + return priv; } /** @@ -1723,3 +2093,116 @@ mlx5_translate_port_name(const char *port_name_in, port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN; return; } + +/** + * DPDK callback to retrieve plug-in module EEPROM information (type and size). + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] modinfo + * Storage for plug-in module EEPROM information. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ethtool_modinfo info = { + .cmd = ETHTOOL_GMODULEINFO, + }; + struct ifreq ifr = (struct ifreq) { + .ifr_data = (void *)&info, + }; + int ret = 0; + + if (!dev || !modinfo) { + DRV_LOG(WARNING, "missing argument, cannot get module info"); + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + modinfo->type = info.type; + modinfo->eeprom_len = info.eeprom_len; + return ret; +} + +/** + * DPDK callback to retrieve plug-in module EEPROM data. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Storage for plug-in module EEPROM data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ethtool_eeprom *eeprom; + struct ifreq ifr; + int ret = 0; + + if (!dev || !info) { + DRV_LOG(WARNING, "missing argument, cannot get module eeprom"); + rte_errno = EINVAL; + return -rte_errno; + } + eeprom = rte_calloc(__func__, 1, + (sizeof(struct ethtool_eeprom) + info->length), 0); + if (!eeprom) { + DRV_LOG(WARNING, "port %u cannot allocate memory for " + "eeprom data", dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } + eeprom->cmd = ETHTOOL_GMODULEEEPROM; + eeprom->offset = info->offset; + eeprom->len = info->length; + ifr = (struct ifreq) { + .ifr_data = (void *)eeprom, + }; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) + DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", + dev->data->port_id, strerror(rte_errno)); + else + rte_memcpy(info->data, eeprom->data, info->length); + rte_free(eeprom); + return ret; +} + +/** + * DPDK callback to retrieve hairpin capabilities. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] cap + * Storage for hairpin capability data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, + struct rte_eth_hairpin_cap *cap) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->devx == 0) { + rte_errno = ENOTSUP; + return -rte_errno; + } + cap->max_nb_queues = UINT16_MAX; + cap->max_rx_2_tx = 1; + cap->max_tx_2_rx = 1; + cap->max_nb_desc = 8192; + return 0; +}