X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=1159fa3d451b0a42e6a424f854d98f96cee3bf2e;hb=7ee177a72fd0e4cd752efceb896a850875a7af1f;hp=b6c7d7a4dcab851c312e80b0ca3135528dd5f654;hpb=771fa900b73aae1ac8ec0ae6ac086e9e164da7b2;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b6c7d7a4dc..1159fa3d45 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -32,6 +32,7 @@ */ #include +#include #include #include #include @@ -44,6 +45,9 @@ #include #include #include +#include +#include +#include /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC @@ -53,11 +57,14 @@ #include #include #include +#include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif #include "mlx5.h" +#include "mlx5_rxtx.h" #include "mlx5_utils.h" /** @@ -344,6 +351,23 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) return 0; } +/** + * Set device MTU. + * + * @param priv + * Pointer to private structure. + * @param mtu + * MTU value to set. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_mtu(struct priv *priv, uint16_t mtu) +{ + return priv_set_sysfs_ulong(priv, "mtu", mtu); +} + /** * Set device flags. * @@ -369,6 +393,386 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) return priv_set_sysfs_ulong(priv, "flags", tmp); } +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int txqs_n = dev->data->nb_tx_queues; + unsigned int i; + unsigned int j; + unsigned int reta_idx_n; + + priv->rxqs = (void *)dev->data->rx_queues; + priv->txqs = (void *)dev->data->tx_queues; + if (txqs_n != priv->txqs_n) { + INFO("%p: TX queues number update: %u -> %u", + (void *)dev, priv->txqs_n, txqs_n); + priv->txqs_n = txqs_n; + } + if (rxqs_n > priv->ind_table_max_size) { + ERROR("cannot handle this many RX queues (%u)", rxqs_n); + return EINVAL; + } + if (rxqs_n == priv->rxqs_n) + return 0; + INFO("%p: RX queues number update: %u -> %u", + (void *)dev, priv->rxqs_n, rxqs_n); + priv->rxqs_n = rxqs_n; + /* If the requested number of RX queues is not a power of two, use the + * maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. */ + reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? + priv->ind_table_max_size : + rxqs_n)); + if (priv_rss_reta_index_resize(priv, reta_idx_n)) + return ENOMEM; + /* When the number of RX queues is not a power of two, the remaining + * table entries are padded with reused WQs and hashes are not spread + * uniformly. */ + for (i = 0, j = 0; (i != reta_idx_n); ++i) { + (*priv->reta_idx)[i] = j; + if (++j == rxqs_n) + j = 0; + } + return 0; +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + ret = dev_configure(dev); + assert(ret >= 0); + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +void +mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct priv *priv = dev->data->dev_private; + unsigned int max; + char ifname[IF_NAMESIZE]; + + priv_lock(priv); + /* FIXME: we should ask the device for these values. */ + info->min_rx_bufsize = 32; + info->max_rx_pktlen = 65536; + /* + * Since we need one CQ per QP, the limit is the minimum number + * between the two values. + */ + max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? + priv->device_attr.max_qp : priv->device_attr.max_cq); + /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ + if (max >= 65535) + max = 65535; + info->max_rx_queues = max; + info->max_tx_queues = max; + /* Last array entry is reserved for broadcast. */ + info->max_mac_addrs = (RTE_DIM(priv->mac) - 1); + info->rx_offload_capa = + (priv->hw_csum ? + (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM) : + 0); + info->tx_offload_capa = + (priv->hw_csum ? + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM) : + 0); + if (priv_get_ifname(priv, &ifname) == 0) + info->if_index = if_nametoindex(ifname); + /* FIXME: RETA update/query API expects the callee to know the size of + * the indirection table, for this PMD the size varies depending on + * the number of RX queues, it becomes impossible to find the correct + * size if it is not fixed. + * The API should be updated to solve this problem. */ + info->reta_size = priv->ind_table_max_size; + priv_unlock(priv); +} + +/** + * DPDK callback to retrieve physical link information (unlocked version). + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +static int +mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = dev->data->dev_private; + struct ethtool_cmd edata = { + .cmd = ETHTOOL_GSET + }; + struct ifreq ifr; + struct rte_eth_link dev_link; + int link_speed = 0; + + (void)wait_to_complete; + if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); + return -1; + } + memset(&dev_link, 0, sizeof(dev_link)); + dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)); + ifr.ifr_data = &edata; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + strerror(errno)); + return -1; + } + link_speed = ethtool_cmd_speed(&edata); + if (link_speed == -1) + dev_link.link_speed = 0; + else + dev_link.link_speed = link_speed; + dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { + /* Link status changed. */ + dev->data->dev_link = dev_link; + return 0; + } + /* Link status is still the same. */ + return -1; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +int +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + ret = mlx5_link_update_unlocked(dev, wait_to_complete); + priv_unlock(priv); + return ret; +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be + * received). Use this as a hint to enable/disable scattered packets support + * and improve performance when not needed. + * Since failure is not an option, reconfiguring queues on the fly is not + * recommended. + * + * @param dev + * Pointer to Ethernet device structure. + * @param in_mtu + * New MTU. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct priv *priv = dev->data->dev_private; + int ret = 0; + unsigned int i; + uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) = + mlx5_rx_burst; + + priv_lock(priv); + /* Set kernel interface MTU first. */ + if (priv_set_mtu(priv, mtu)) { + ret = errno; + WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, + strerror(ret)); + goto out; + } else + DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + priv->mtu = mtu; + /* Temporarily replace RX handler with a fake one, assuming it has not + * been copied elsewhere. */ + dev->rx_pkt_burst = removed_rx_burst; + /* Make sure everyone has left mlx5_rx_burst() and uses + * removed_rx_burst() instead. */ + rte_wmb(); + usleep(1000); + /* Reconfigure each RX queue. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int max_frame_len; + int sp; + + if (rxq == NULL) + continue; + /* Calculate new maximum frame length according to MTU and + * toggle scattered support (sp) if necessary. */ + max_frame_len = (priv->mtu + ETHER_HDR_LEN + + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); + sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + /* Provide new values to rxq_setup(). */ + dev->data->dev_conf.rxmode.jumbo_frame = sp; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; + ret = rxq_rehash(dev, rxq); + if (ret) { + /* Force SP RX if that queue requires it and abort. */ + if (rxq->sp) + rx_func = mlx5_rx_burst_sp; + break; + } + /* Scattered burst function takes priority. */ + if (rxq->sp) + rx_func = mlx5_rx_burst_sp; + } + /* Burst functions can now be called again. */ + rte_wmb(); + dev->rx_pkt_burst = rx_func; +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to get flow control status. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] fc_conf + * Flow control output buffer. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_GPAUSEPARAM + }; + int ret; + + ifr.ifr_data = ðpause; + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + + fc_conf->autoneg = ethpause.autoneg; + if (ethpause.rx_pause && ethpause.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (ethpause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (ethpause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to modify flow control parameters. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] fc_conf + * Flow control parameters. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_SPAUSEPARAM + }; + int ret; + + ifr.ifr_data = ðpause; + ethpause.autoneg = fc_conf->autoneg; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + ethpause.rx_pause = 1; + else + ethpause.rx_pause = 0; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + ethpause.tx_pause = 1; + else + ethpause.tx_pause = 0; + + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + /** * Get PCI information from struct ibv_device. * @@ -418,3 +822,150 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, fclose(file); return 0; } + +/** + * Link status handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + * + * @return + * Nonzero if the callback process can be called immediately. + */ +static int +priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev) +{ + struct ibv_async_event event; + int port_change = 0; + int ret = 0; + + /* Read all message and acknowledge them. */ + for (;;) { + if (ibv_get_async_event(priv->ctx, &event)) + break; + + if (event.event_type == IBV_EVENT_PORT_ACTIVE || + event.event_type == IBV_EVENT_PORT_ERR) + port_change = 1; + else + DEBUG("event type %d on port %d not handled", + event.event_type, event.element.port_num); + ibv_ack_async_event(&event); + } + + if (port_change ^ priv->pending_alarm) { + struct rte_eth_link *link = &dev->data->dev_link; + + priv->pending_alarm = 0; + mlx5_link_update_unlocked(dev, 0); + if (((link->link_speed == 0) && link->link_status) || + ((link->link_speed != 0) && !link->link_status)) { + /* Inconsistent status, check again later. */ + priv->pending_alarm = 1; + rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, + mlx5_dev_link_status_handler, + dev); + } else + ret = 1; + } + return ret; +} + +/** + * Handle delayed link status event. + * + * @param arg + * Registered argument. + */ +void +mlx5_dev_link_status_handler(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + assert(priv->pending_alarm == 1); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Handle interrupts from the NIC. + * + * @param[in] intr_handle + * Interrupt handler. + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) +{ + struct rte_eth_dev *dev = cb_arg; + struct priv *priv = dev->data->dev_private; + int ret; + + (void)intr_handle; + priv_lock(priv); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Uninstall interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +{ + if (!dev->data->dev_conf.intr_conf.lsc) + return; + rte_intr_callback_unregister(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + if (priv->pending_alarm) + rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); + priv->pending_alarm = 0; + priv->intr_handle.fd = 0; + priv->intr_handle.type = 0; +} + +/** + * Install interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +{ + int rc, flags; + + if (!dev->data->dev_conf.intr_conf.lsc) + return; + assert(priv->ctx->async_fd > 0); + flags = fcntl(priv->ctx->async_fd, F_GETFL); + rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + INFO("failed to change file descriptor async event queue"); + dev->data->dev_conf.intr_conf.lsc = 0; + } else { + priv->intr_handle.fd = priv->ctx->async_fd; + priv->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + } +}