X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=6704382b821aedf5b539df6a30f35b79c532dd01;hb=0497ddaac511ea86a4d7f5239a723512fc176a98;hp=5df5fa10291a674eb666ada43834e8fc167bf8f7;hpb=02d754304391cced3d0701610ce6b7bfdcd15b39;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 5df5fa1029..6704382b82 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -47,6 +47,7 @@ #include #include #include +#include /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC @@ -56,6 +57,8 @@ #include #include #include +#include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -394,7 +397,6 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) * Ethernet device configuration. * * Prepare the driver for a given number of TX and RX queues. - * Allocate parent RSS queue when several RX queues are requested. * * @param dev * Pointer to Ethernet device structure. @@ -408,8 +410,9 @@ dev_configure(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; unsigned int txqs_n = dev->data->nb_tx_queues; - unsigned int tmp; - int ret; + unsigned int i; + unsigned int j; + unsigned int reta_idx_n; priv->rxqs = (void *)dev->data->rx_queues; priv->txqs = (void *)dev->data->tx_queues; @@ -418,51 +421,32 @@ dev_configure(struct rte_eth_dev *dev) (void *)dev, priv->txqs_n, txqs_n); priv->txqs_n = txqs_n; } + if (rxqs_n > priv->ind_table_max_size) { + ERROR("cannot handle this many RX queues (%u)", rxqs_n); + return EINVAL; + } if (rxqs_n == priv->rxqs_n) return 0; INFO("%p: RX queues number update: %u -> %u", (void *)dev, priv->rxqs_n, rxqs_n); - /* If RSS is enabled, disable it first. */ - if (priv->rss) { - unsigned int i; - - /* Only if there are no remaining child RX queues. */ - for (i = 0; (i != priv->rxqs_n); ++i) - if ((*priv->rxqs)[i] != NULL) - return EINVAL; - rxq_cleanup(&priv->rxq_parent); - priv->rss = 0; - priv->rxqs_n = 0; - } - if (rxqs_n <= 1) { - /* Nothing else to do. */ - priv->rxqs_n = rxqs_n; - return 0; - } - /* Allocate a new RSS parent queue if supported by hardware. */ - if (!priv->hw_rss) { - ERROR("%p: only a single RX queue can be configured when" - " hardware doesn't support RSS", - (void *)dev); - return EINVAL; - } - /* Fail if hardware doesn't support that many RSS queues. */ - if (rxqs_n >= priv->max_rss_tbl_sz) { - ERROR("%p: only %u RX queues can be configured for RSS", - (void *)dev, priv->max_rss_tbl_sz); - return EINVAL; - } - priv->rss = 1; - tmp = priv->rxqs_n; priv->rxqs_n = rxqs_n; - ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, NULL, NULL); - if (!ret) - return 0; - /* Failure, rollback. */ - priv->rss = 0; - priv->rxqs_n = tmp; - assert(ret > 0); - return ret; + /* If the requested number of RX queues is not a power of two, use the + * maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. */ + reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? + priv->ind_table_max_size : + rxqs_n)); + if (priv_rss_reta_index_resize(priv, reta_idx_n)) + return ENOMEM; + /* When the number of RX queues is not a power of two, the remaining + * table entries are padded with reused WQs and hashes are not spread + * uniformly. */ + for (i = 0, j = 0; (i != reta_idx_n); ++i) { + (*priv->reta_idx)[i] = j; + if (++j == rxqs_n) + j = 0; + } + return 0; } /** @@ -517,8 +501,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) max = 65535; info->max_rx_queues = max; info->max_tx_queues = max; - /* Last array entry is reserved for broadcast. */ - info->max_mac_addrs = (RTE_DIM(priv->mac) - 1); + info->max_mac_addrs = RTE_DIM(priv->mac); info->rx_offload_capa = (priv->hw_csum ? (DEV_RX_OFFLOAD_IPV4_CKSUM | @@ -533,6 +516,12 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 0); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); + /* FIXME: RETA update/query API expects the callee to know the size of + * the indirection table, for this PMD the size varies depending on + * the number of RX queues, it becomes impossible to find the correct + * size if it is not fixed. + * The API should be updated to solve this problem. */ + info->reta_size = priv->ind_table_max_size; priv_unlock(priv); } @@ -671,16 +660,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) rx_func = mlx5_rx_burst_sp; break; } - /* Reenable non-RSS queue attributes. No need to check - * for errors at this stage. */ - if (!priv->rss) { - if (priv->started) - rxq_mac_addrs_add(rxq); - if (priv->started && priv->promisc_req) - rxq_promiscuous_enable(rxq); - if (priv->started && priv->allmulti_req) - rxq_allmulticast_enable(rxq); - } /* Scattered burst function takes priority. */ if (rxq->sp) rx_func = mlx5_rx_burst_sp; @@ -842,3 +821,150 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, fclose(file); return 0; } + +/** + * Link status handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + * + * @return + * Nonzero if the callback process can be called immediately. + */ +static int +priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev) +{ + struct ibv_async_event event; + int port_change = 0; + int ret = 0; + + /* Read all message and acknowledge them. */ + for (;;) { + if (ibv_get_async_event(priv->ctx, &event)) + break; + + if (event.event_type == IBV_EVENT_PORT_ACTIVE || + event.event_type == IBV_EVENT_PORT_ERR) + port_change = 1; + else + DEBUG("event type %d on port %d not handled", + event.event_type, event.element.port_num); + ibv_ack_async_event(&event); + } + + if (port_change ^ priv->pending_alarm) { + struct rte_eth_link *link = &dev->data->dev_link; + + priv->pending_alarm = 0; + mlx5_link_update_unlocked(dev, 0); + if (((link->link_speed == 0) && link->link_status) || + ((link->link_speed != 0) && !link->link_status)) { + /* Inconsistent status, check again later. */ + priv->pending_alarm = 1; + rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, + mlx5_dev_link_status_handler, + dev); + } else + ret = 1; + } + return ret; +} + +/** + * Handle delayed link status event. + * + * @param arg + * Registered argument. + */ +void +mlx5_dev_link_status_handler(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + assert(priv->pending_alarm == 1); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Handle interrupts from the NIC. + * + * @param[in] intr_handle + * Interrupt handler. + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) +{ + struct rte_eth_dev *dev = cb_arg; + struct priv *priv = dev->data->dev_private; + int ret; + + (void)intr_handle; + priv_lock(priv); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Uninstall interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +{ + if (!dev->data->dev_conf.intr_conf.lsc) + return; + rte_intr_callback_unregister(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + if (priv->pending_alarm) + rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); + priv->pending_alarm = 0; + priv->intr_handle.fd = 0; + priv->intr_handle.type = 0; +} + +/** + * Install interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +{ + int rc, flags; + + if (!dev->data->dev_conf.intr_conf.lsc) + return; + assert(priv->ctx->async_fd > 0); + flags = fcntl(priv->ctx->async_fd, F_GETFL); + rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + INFO("failed to change file descriptor async event queue"); + dev->data->dev_conf.intr_conf.lsc = 0; + } else { + priv->intr_handle.fd = priv->ctx->async_fd; + priv->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + } +}