X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=666507691c027cea314c0b90497fa109ad913460;hb=8fd92a66c60a7310cf5ab91996b9b09447512a61;hp=28183534aae2e0c78ac8513b45bf6e4a55d3053c;hpb=1cfa649ba614693165fc4659ace525d8b45bf110;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 28183534aa..666507691c 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. */ #define _GNU_SOURCE @@ -55,7 +27,7 @@ #include #include -#include +#include #include #include #include @@ -64,6 +36,7 @@ #include #include "mlx5.h" +#include "mlx5_glue.h" #include "mlx5_rxtx.h" #include "mlx5_utils.h" @@ -551,7 +524,25 @@ dev_configure(struct rte_eth_dev *dev) unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - + uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t supp_rx_offloads = + (mlx5_priv_get_rx_port_offloads(priv) | + mlx5_priv_get_rx_queue_offloads(priv)); + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + + if ((tx_offloads & supp_tx_offloads) != tx_offloads) { + ERROR("Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + tx_offloads, supp_tx_offloads); + return ENOTSUP; + } + if ((rx_offloads & supp_rx_offloads) != rx_offloads) { + ERROR("Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + rx_offloads, supp_rx_offloads); + return ENOTSUP; + } if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { @@ -579,7 +570,7 @@ dev_configure(struct rte_eth_dev *dev) (void *)dev, priv->txqs_n, txqs_n); priv->txqs_n = txqs_n; } - if (rxqs_n > priv->ind_table_max_size) { + if (rxqs_n > priv->config.ind_table_max_size) { ERROR("cannot handle this many RX queues (%u)", rxqs_n); return EINVAL; } @@ -592,7 +583,7 @@ dev_configure(struct rte_eth_dev *dev) * maximum indirection table size for better balancing. * The result is always rounded to the next power of two. */ reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? - priv->ind_table_max_size : + priv->config.ind_table_max_size : rxqs_n)); if (priv_rss_reta_index_resize(priv, reta_idx_n)) return ENOMEM; @@ -641,6 +632,7 @@ void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; unsigned int max; char ifname[IF_NAMESIZE]; @@ -662,34 +654,18 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_offload_capa = - (priv->hw_csum ? - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM) : - 0) | - (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) | - DEV_RX_OFFLOAD_TIMESTAMP; - - if (!priv->mps) - info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; - if (priv->hw_csum) - info->tx_offload_capa |= - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); - if (priv->tso) - info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; - if (priv->tunnel_en) - info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO); + info->rx_queue_offload_capa = + mlx5_priv_get_rx_queue_offloads(priv); + info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | + info->rx_queue_offload_capa); + info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? - priv->reta_idx_n : priv->ind_table_max_size; + priv->reta_idx_n : config->ind_table_max_size; info->hash_key_size = priv->rss_conf.rss_key_len; info->speed_capa = priv->link_speed_capa; + info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; priv_unlock(priv); } @@ -883,25 +859,131 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) } /** - * DPDK callback to retrieve physical link information. + * Enable receiving and transmitting traffic. * - * @param dev - * Pointer to Ethernet device structure. + * @param priv + * Pointer to private structure. + */ +static void +priv_link_start(struct priv *priv) +{ + struct rte_eth_dev *dev = priv->dev; + int err; + + dev->tx_pkt_burst = priv_select_tx_function(priv, dev); + dev->rx_pkt_burst = priv_select_rx_function(priv, dev); + err = priv_dev_traffic_enable(priv, dev); + if (err) + ERROR("%p: error occurred while configuring control flows: %s", + (void *)priv, strerror(err)); + err = priv_flow_start(priv, &priv->flows); + if (err) + ERROR("%p: error occurred while configuring flows: %s", + (void *)priv, strerror(err)); +} + +/** + * Disable receiving and transmitting traffic. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_link_stop(struct priv *priv) +{ + struct rte_eth_dev *dev = priv->dev; + + priv_flow_stop(priv, &priv->flows); + priv_dev_traffic_disable(priv, dev); + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; +} + +/** + * Retrieve physical link information and update rx/tx_pkt_burst callbacks + * accordingly. + * + * @param priv + * Pointer to private structure. * @param wait_to_complete * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete) { + struct rte_eth_dev *dev = priv->dev; struct utsname utsname; int ver[3]; + int ret; + struct rte_eth_link dev_link = dev->data->dev_link; if (uname(&utsname) == -1 || sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - return mlx5_link_update_unlocked_gset(dev, wait_to_complete); - return mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + else + ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + /* If lsc interrupt is disabled, should always be ready for traffic. */ + if (!dev->data->dev_conf.intr_conf.lsc) { + priv_link_start(priv); + return ret; + } + /* Re-select burst callbacks only if link status has been changed. */ + if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { + if (dev->data->dev_link.link_status == ETH_LINK_UP) + priv_link_start(priv); + else + priv_link_stop(priv); + } + return ret; +} + +/** + * Querying the link status till it changes to the desired state. + * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. + * + * @param priv + * Pointer to private structure. + * @param status + * Link desired status. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +priv_force_link_status_change(struct priv *priv, int status) +{ + int try = 0; + + while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { + priv_link_update(priv, 0); + if (priv->dev->data->dev_link.link_status == status) + return 0; + try++; + sleep(1); + } + return -EAGAIN; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +int +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + ret = priv_link_update(priv, wait_to_complete); + priv_unlock(priv); + return ret; } /** @@ -1111,7 +1193,7 @@ priv_link_status_update(struct priv *priv) { struct rte_eth_link *link = &priv->dev->data->dev_link; - mlx5_link_update(priv->dev, 0); + priv_link_update(priv, 0); if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -1152,7 +1234,7 @@ priv_dev_status_handler(struct priv *priv) /* Read all message and acknowledge them. */ for (;;) { - if (ibv_get_async_event(priv->ctx, &event)) + if (mlx5_glue->get_async_event(priv->ctx, &event)) break; if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && @@ -1164,7 +1246,7 @@ priv_dev_status_handler(struct priv *priv) else DEBUG("event type %d on port %d not handled", event.event_type, event.element.port_num); - ibv_ack_async_event(&event); + mlx5_glue->ack_async_event(&event); } if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) if (priv_link_status_update(priv)) @@ -1185,14 +1267,17 @@ mlx5_dev_link_status_handler(void *arg) struct priv *priv = dev->data->dev_private; int ret; - priv_lock(priv); - assert(priv->pending_alarm == 1); + while (!priv_trylock(priv)) { + /* Alarm is being canceled. */ + if (priv->pending_alarm == 0) + return; + rte_pause(); + } priv->pending_alarm = 0; ret = priv_link_status_update(priv); priv_unlock(priv); if (!ret) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, - NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } /** @@ -1214,11 +1299,9 @@ mlx5_dev_interrupt_handler(void *cb_arg) events = priv_dev_status_handler(priv); priv_unlock(priv); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, - NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL, - NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL); } /** @@ -1256,9 +1339,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) if (priv->primary_socket) rte_intr_callback_unregister(&priv->intr_handle_socket, mlx5_dev_handler_socket, dev); - if (priv->pending_alarm) + if (priv->pending_alarm) { + priv->pending_alarm = 0; rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); - priv->pending_alarm = 0; + } priv->intr_handle.fd = 0; priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; priv->intr_handle_socket.fd = 0; @@ -1308,8 +1392,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) * * @param priv * Pointer to private data structure. - * @param dev - * Pointer to rte_eth_dev structure. * @param up * Nonzero for link up, otherwise link down. * @@ -1317,24 +1399,9 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) * 0 on success, errno value on failure. */ static int -priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up) +priv_dev_set_link(struct priv *priv, int up) { - int err; - - if (up) { - err = priv_set_flags(priv, ~IFF_UP, IFF_UP); - if (err) - return err; - dev->tx_pkt_burst = priv_select_tx_function(priv, dev); - dev->rx_pkt_burst = priv_select_rx_function(priv, dev); - } else { - err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); - if (err) - return err; - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; - } - return 0; + return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); } /** @@ -1353,7 +1420,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) int err; priv_lock(priv); - err = priv_dev_set_link(priv, dev, 0); + err = priv_dev_set_link(priv, 0); priv_unlock(priv); return err; } @@ -1374,7 +1441,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev) int err; priv_lock(priv); - err = priv_dev_set_link(priv, dev, 1); + err = priv_dev_set_link(priv, 1); priv_unlock(priv); return err; } @@ -1391,15 +1458,23 @@ mlx5_set_link_up(struct rte_eth_dev *dev) * Pointer to selected Tx burst function. */ eth_tx_burst_t -priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) +priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) { eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; + struct mlx5_dev_config *config = &priv->config; + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)); + int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); assert(priv != NULL); /* Select appropriate TX function. */ - if (priv->mps == MLX5_MPW_ENHANCED) { - if (priv_check_vec_tx_support(priv) > 0) { - if (priv_check_raw_vec_tx_support(priv) > 0) + if (vlan_insert || tso) + return tx_pkt_burst; + if (config->mps == MLX5_MPW_ENHANCED) { + if (priv_check_vec_tx_support(priv, dev) > 0) { + if (priv_check_raw_vec_tx_support(priv, dev) > 0) tx_pkt_burst = mlx5_tx_burst_raw_vec; else tx_pkt_burst = mlx5_tx_burst_vec; @@ -1408,10 +1483,10 @@ priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) tx_pkt_burst = mlx5_tx_burst_empw; DEBUG("selected Enhanced MPW TX function"); } - } else if (priv->mps && priv->txq_inline) { + } else if (config->mps && (config->txq_inline > 0)) { tx_pkt_burst = mlx5_tx_burst_mpw_inline; DEBUG("selected MPW inline TX function"); - } else if (priv->mps) { + } else if (config->mps) { tx_pkt_burst = mlx5_tx_burst_mpw; DEBUG("selected MPW TX function"); } @@ -1441,3 +1516,23 @@ priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) } return rx_pkt_burst; } + +/** + * Check if mlx5 device was removed. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 1 when device is removed, otherwise 0. + */ +int +mlx5_is_removed(struct rte_eth_dev *dev) +{ + struct ibv_device_attr device_attr; + struct priv *priv = dev->data->dev_private; + + if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO) + return 1; + return 0; +}