X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4.c;h=50a55ee5272f91d6cc4377e4d28c25055d98eb54;hb=27cea11686ff;hp=e8f704812b525c2a0114afa6ea957341d128487b;hpb=655588afc8475fe7a51bd09c319b677be49842e8;p=dpdk.git diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index e8f704812b..50a55ee527 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -31,30 +31,46 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* System headers. */ +/** + * @file + * mlx4 driver initialization. + */ + +#include +#include +#include +#include #include +#include #include #include -#include -#include #include -#include #include -#include -#include -#include -#include +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include #include -#include #include -#include -#include +#include +#include +#include +#include #include -#include +#include +#include +#include -/* PMD headers. */ #include "mlx4.h" +#include "mlx4_glue.h" #include "mlx4_flow.h" #include "mlx4_rxtx.h" #include "mlx4_utils.h" @@ -73,13 +89,9 @@ const char *pmd_mlx4_init_params[] = { NULL, }; -/* Device configuration. */ - /** * DPDK callback for Ethernet device configuration. * - * Prepare the driver for a given number of TX and RX queues. - * * @param dev * Pointer to Ethernet device structure. * @@ -90,28 +102,31 @@ static int mlx4_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - unsigned int rxqs_n = dev->data->nb_rx_queues; - unsigned int txqs_n = dev->data->nb_tx_queues; - - priv->rxqs = (void *)dev->data->rx_queues; - priv->txqs = (void *)dev->data->tx_queues; - if (txqs_n != priv->txqs_n) { - INFO("%p: TX queues number update: %u -> %u", - (void *)dev, priv->txqs_n, txqs_n); - priv->txqs_n = txqs_n; - } - if (rxqs_n != priv->rxqs_n) { - INFO("%p: Rx queues number update: %u -> %u", - (void *)dev, priv->rxqs_n, rxqs_n); - priv->rxqs_n = rxqs_n; + struct rte_flow_error error; + int ret; + + /* Prepare internal flow rules. */ + ret = mlx4_flow_sync(priv, &error); + if (ret) { + ERROR("cannot set up internal flow rules (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + -ret, strerror(-ret), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + goto exit; } - return 0; + ret = mlx4_intr_install(priv); + if (ret) + ERROR("%p: interrupt handler installation failed", + (void *)dev); +exit: + return ret; } /** * DPDK callback to start the device. * - * Simulate device start by attaching all configured flows. + * Simulate device start by initializing common RSS resources and attaching + * all configured flows. * * @param dev * Pointer to Ethernet device structure. @@ -123,31 +138,40 @@ static int mlx4_dev_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; + struct rte_flow_error error; int ret; if (priv->started) return 0; DEBUG("%p: attaching configured flows to all RX queues", (void *)dev); priv->started = 1; - ret = mlx4_mac_addr_add(priv); - if (ret) + ret = mlx4_rss_init(priv); + if (ret) { + ERROR("%p: cannot initialize RSS resources: %s", + (void *)dev, strerror(-ret)); goto err; - ret = mlx4_intr_install(priv); + } + ret = mlx4_rxq_intr_enable(priv); if (ret) { ERROR("%p: interrupt handler installation failed", (void *)dev); goto err; } - ret = mlx4_flow_start(priv); + ret = mlx4_flow_sync(priv, &error); if (ret) { - ERROR("%p: flow start failed: %s", - (void *)dev, strerror(ret)); + ERROR("%p: cannot attach flow rules (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + (void *)dev, + -ret, strerror(-ret), error.type, error.cause, + error.message ? error.message : "(unspecified)"); goto err; } + rte_wmb(); + dev->tx_pkt_burst = mlx4_tx_burst; + dev->rx_pkt_burst = mlx4_rx_burst; return 0; err: /* Rollback. */ - mlx4_mac_addr_del(priv); priv->started = 0; return ret; } @@ -169,9 +193,12 @@ mlx4_dev_stop(struct rte_eth_dev *dev) return; DEBUG("%p: detaching flows from all RX queues", (void *)dev); priv->started = 0; - mlx4_flow_stop(priv); - mlx4_intr_uninstall(priv); - mlx4_mac_addr_del(priv); + dev->tx_pkt_burst = mlx4_tx_burst_removed; + dev->rx_pkt_burst = mlx4_rx_burst_removed; + rte_wmb(); + mlx4_flow_sync(priv, NULL); + mlx4_rxq_intr_disable(priv); + mlx4_rss_deinit(priv); } /** @@ -186,54 +213,23 @@ static void mlx4_dev_close(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - void *tmp; unsigned int i; - if (priv == NULL) - return; DEBUG("%p: closing device \"%s\"", (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); - mlx4_mac_addr_del(priv); - /* - * Prevent crashes when queues are still in use. This is unfortunately - * still required for DPDK 1.3 because some programs (such as testpmd) - * never release them before closing the device. - */ dev->rx_pkt_burst = mlx4_rx_burst_removed; dev->tx_pkt_burst = mlx4_tx_burst_removed; - if (priv->rxqs != NULL) { - /* XXX race condition if mlx4_rx_burst() is still running. */ - usleep(1000); - for (i = 0; (i != priv->rxqs_n); ++i) { - tmp = (*priv->rxqs)[i]; - if (tmp == NULL) - continue; - (*priv->rxqs)[i] = NULL; - mlx4_rxq_cleanup(tmp); - rte_free(tmp); - } - priv->rxqs_n = 0; - priv->rxqs = NULL; - } - if (priv->txqs != NULL) { - /* XXX race condition if mlx4_tx_burst() is still running. */ - usleep(1000); - for (i = 0; (i != priv->txqs_n); ++i) { - tmp = (*priv->txqs)[i]; - if (tmp == NULL) - continue; - (*priv->txqs)[i] = NULL; - mlx4_txq_cleanup(tmp); - rte_free(tmp); - } - priv->txqs_n = 0; - priv->txqs = NULL; - } + rte_wmb(); + mlx4_flow_clean(priv); + for (i = 0; i != dev->data->nb_rx_queues; ++i) + mlx4_rx_queue_release(dev->data->rx_queues[i]); + for (i = 0; i != dev->data->nb_tx_queues; ++i) + mlx4_tx_queue_release(dev->data->tx_queues[i]); if (priv->pd != NULL) { assert(priv->ctx != NULL); - claim_zero(ibv_dealloc_pd(priv->pd)); - claim_zero(ibv_close_device(priv->ctx)); + claim_zero(mlx4_glue->dealloc_pd(priv->pd)); + claim_zero(mlx4_glue->close_device(priv->ctx)); } else assert(priv->ctx == NULL); mlx4_intr_uninstall(priv); @@ -248,9 +244,18 @@ static const struct eth_dev_ops mlx4_dev_ops = { .dev_set_link_up = mlx4_dev_set_link_up, .dev_close = mlx4_dev_close, .link_update = mlx4_link_update, + .promiscuous_enable = mlx4_promiscuous_enable, + .promiscuous_disable = mlx4_promiscuous_disable, + .allmulticast_enable = mlx4_allmulticast_enable, + .allmulticast_disable = mlx4_allmulticast_disable, + .mac_addr_remove = mlx4_mac_addr_remove, + .mac_addr_add = mlx4_mac_addr_add, + .mac_addr_set = mlx4_mac_addr_set, .stats_get = mlx4_stats_get, .stats_reset = mlx4_stats_reset, .dev_infos_get = mlx4_dev_infos_get, + .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get, + .vlan_filter_set = mlx4_vlan_filter_set, .rx_queue_setup = mlx4_rx_queue_setup, .tx_queue_setup = mlx4_tx_queue_setup, .rx_queue_release = mlx4_rx_queue_release, @@ -261,6 +266,7 @@ static const struct eth_dev_ops mlx4_dev_ops = { .filter_ctrl = mlx4_filter_ctrl, .rx_queue_intr_enable = mlx4_rx_intr_enable, .rx_queue_intr_disable = mlx4_rx_intr_disable, + .is_removed = mlx4_is_removed, }; /** @@ -341,7 +347,7 @@ mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf) return -rte_errno; } if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) { - uint32_t ports = rte_log2_u32(conf->ports.present); + uint32_t ports = rte_log2_u32(conf->ports.present + 1); if (tmp >= ports) { ERROR("port index %lu outside range [0,%" PRIu32 ")", @@ -431,6 +437,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr device_attr; + struct ibv_device_attr_ex device_attr_ex; struct mlx4_conf conf = { .ports.present = 0, }; @@ -439,7 +446,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (void)pci_drv; assert(pci_drv == &mlx4_driver); - list = ibv_get_device_list(&i); + list = mlx4_glue->get_device_list(&i); if (list == NULL) { rte_errno = errno; assert(rte_errno); @@ -468,12 +475,12 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) PCI_DEVICE_ID_MELLANOX_CONNECTX3VF); INFO("PCI information matches, using device \"%s\" (VF: %s)", list[i]->name, (vf ? "true" : "false")); - attr_ctx = ibv_open_device(list[i]); + attr_ctx = mlx4_glue->open_device(list[i]); err = errno; break; } if (attr_ctx == NULL) { - ibv_free_device_list(list); + mlx4_glue->free_device_list(list); switch (err) { case 0: rte_errno = ENODEV; @@ -490,7 +497,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) } ibv_dev = list[i]; DEBUG("device opened"); - if (ibv_query_device(attr_ctx, &device_attr)) { + if (mlx4_glue->query_device(attr_ctx, &device_attr)) { rte_errno = ENODEV; goto error; } @@ -504,6 +511,12 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) /* Use all ports when none are defined */ if (!conf.ports.enabled) conf.ports.enabled = conf.ports.present; + /* Retrieve extended device attributes. */ + if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) { + rte_errno = ENODEV; + goto error; + } + assert(device_attr.max_sge >= MLX4_MAX_SGE); for (i = 0; i < device_attr.phys_port_cnt; i++) { uint32_t port = i + 1; /* ports are indexed from one */ struct ibv_context *ctx = NULL; @@ -517,13 +530,13 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) if (!(conf.ports.enabled & (1 << i))) continue; DEBUG("using port %u", port); - ctx = ibv_open_device(ibv_dev); + ctx = mlx4_glue->open_device(ibv_dev); if (ctx == NULL) { rte_errno = ENODEV; goto port_error; } /* Check port status. */ - err = ibv_query_port(ctx, port, &port_attr); + err = mlx4_glue->query_port(ctx, port, &port_attr); if (err) { rte_errno = err; ERROR("port query failed: %s", strerror(rte_errno)); @@ -537,7 +550,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) } if (port_attr.state != IBV_PORT_ACTIVE) DEBUG("port %d is not active: \"%s\" (%d)", - port, ibv_port_state_str(port_attr.state), + port, mlx4_glue->port_state_str(port_attr.state), port_attr.state); /* Make asynchronous FD non-blocking to handle interrupts. */ if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) { @@ -546,7 +559,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) goto port_error; } /* Allocate protection domain. */ - pd = ibv_alloc_pd(ctx); + pd = mlx4_glue->alloc_pd(ctx); if (pd == NULL) { rte_errno = ENOMEM; ERROR("PD allocation failure"); @@ -567,6 +580,32 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->pd = pd; priv->mtu = ETHER_MTU; priv->vf = vf; + priv->hw_csum = !!(device_attr.device_cap_flags & + IBV_DEVICE_RAW_IP_CSUM); + DEBUG("checksum offloading is %ssupported", + (priv->hw_csum ? "" : "not ")); + /* Only ConnectX-3 Pro supports tunneling. */ + priv->hw_csum_l2tun = + priv->hw_csum && + (device_attr.vendor_part_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); + DEBUG("L2 tunnel checksum offloads are %ssupported", + (priv->hw_csum_l2tun ? "" : "not ")); + priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask; + if (!priv->hw_rss_sup) { + WARN("no RSS capabilities reported; disabling support" + " for UDP RSS and inner VXLAN RSS"); + /* Fake support for all possible RSS hash fields. */ + priv->hw_rss_sup = ~UINT64_C(0); + priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1); + /* Filter out known unsupported fields. */ + priv->hw_rss_sup &= + ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP | + IBV_RX_HASH_INNER); + } + DEBUG("supported RSS hash fields mask: %016" PRIx64, + priv->hw_rss_sup); /* Configure the first MAC address by default. */ if (mlx4_get_mac(priv, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx4_en loaded?" @@ -579,9 +618,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) mac.addr_bytes[2], mac.addr_bytes[3], mac.addr_bytes[4], mac.addr_bytes[5]); /* Register MAC address. */ - priv->mac = mac; - if (mlx4_mac_addr_add(priv)) - goto port_error; + priv->mac[0] = mac; #ifndef NDEBUG { char ifname[IF_NAMESIZE]; @@ -601,7 +638,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) char name[RTE_ETH_NAME_MAX_LEN]; snprintf(name, sizeof(name), "%s port %u", - ibv_get_device_name(ibv_dev), port); + mlx4_glue->get_device_name(ibv_dev), port); eth_dev = rte_eth_dev_allocate(name); } if (eth_dev == NULL) { @@ -610,7 +647,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) goto port_error; } eth_dev->data->dev_private = priv; - eth_dev->data->mac_addrs = &priv->mac; + eth_dev->data->mac_addrs = priv->mac; eth_dev->device = &pci_dev->device; rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->device->driver = &mlx4_driver.driver; @@ -634,7 +671,6 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev->intr_handle = &priv->intr_handle; priv->dev = eth_dev; eth_dev->dev_ops = &mlx4_dev_ops; - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); mlx4_dev_set_link_up(priv->dev); @@ -645,9 +681,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) port_error: rte_free(priv); if (pd) - claim_zero(ibv_dealloc_pd(pd)); + claim_zero(mlx4_glue->dealloc_pd(pd)); if (ctx) - claim_zero(ibv_close_device(ctx)); + claim_zero(mlx4_glue->close_device(ctx)); if (eth_dev) rte_eth_dev_release_port(eth_dev); break; @@ -662,9 +698,9 @@ port_error: */ error: if (attr_ctx) - claim_zero(ibv_close_device(attr_ctx)); + claim_zero(mlx4_glue->close_device(attr_ctx)); if (list) - ibv_free_device_list(list); + mlx4_glue->free_device_list(list); assert(rte_errno >= 0); return -rte_errno; } @@ -697,6 +733,47 @@ static struct rte_pci_driver mlx4_driver = { RTE_PCI_DRV_INTR_RMV, }; +#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS + +/** + * Initialization routine for run-time dependency on rdma-core. + */ +static int +mlx4_glue_init(void) +{ + void *handle = NULL; + void **sym; + const char *dlmsg; + + handle = dlopen(MLX4_GLUE, RTLD_LAZY); + if (!handle) { + rte_errno = EINVAL; + dlmsg = dlerror(); + if (dlmsg) + WARN("cannot load glue library: %s", dlmsg); + goto glue_error; + } + sym = dlsym(handle, "mlx4_glue"); + if (!sym || !*sym) { + rte_errno = EINVAL; + dlmsg = dlerror(); + if (dlmsg) + ERROR("cannot resolve glue symbol: %s", dlmsg); + goto glue_error; + } + mlx4_glue = *sym; + return 0; +glue_error: + if (handle) + dlclose(handle); + WARN("cannot initialize PMD due to missing run-time" + " dependency on rdma-core libraries (libibverbs," + " libmlx4)"); + return -rte_errno; +} + +#endif + /** * Driver initialization routine. */ @@ -704,6 +781,12 @@ RTE_INIT(rte_mlx4_pmd_init); static void rte_mlx4_pmd_init(void) { + /* + * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we + * want to get success errno value in case of calling them + * when the device was removed. + */ + setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1); /* * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use * huge pages. Calling ibv_fork_init() during init allows @@ -711,7 +794,12 @@ rte_mlx4_pmd_init(void) * using this PMD, which is not supported in forked processes. */ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); - ibv_fork_init(); +#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS + if (mlx4_glue_init()) + return; + assert(mlx4_glue); +#endif + mlx4_glue->fork_init(); rte_pci_register(&mlx4_driver); }