X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4.c;h=5ef2e7f41e042e8e79ea8f4625fe636c8266057e;hb=4182ee7f0239563916b1f730d5a58f534769ee5e;hp=9f8ecd0729b4155171794b237465aaeae6c187f5;hpb=fbe90cdd776c3ac99c4c56d1b5318a90fcf01602;p=dpdk.git diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 9f8ecd0729..5ef2e7f41e 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -81,7 +81,7 @@ static void mlx4_dev_stop(struct rte_eth_dev *dev); static int mlx4_dev_configure(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct rte_flow_error error; int ret; @@ -98,20 +98,6 @@ mlx4_dev_configure(struct rte_eth_dev *dev) if (ret) ERROR("%p: interrupt handler installation failed", (void *)dev); - /* - * Once the device is added to the list of memory event callback, its - * global MR cache table cannot be expanded on the fly because of - * deadlock. If it overflows, lookup should be done by searching MR list - * linearly, which is slow. - */ - if (mlx4_mr_btree_init(&priv->mr.cache, MLX4_MR_BTREE_CACHE_N * 2, - dev->device->numa_node)) { - /* rte_errno is already set. */ - return -rte_errno; - } - rte_rwlock_write_lock(&mlx4_mem_event_rwlock); - LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb); - rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); exit: return ret; } @@ -131,7 +117,7 @@ exit: static int mlx4_dev_start(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct rte_flow_error error; int ret; @@ -183,7 +169,7 @@ err: static void mlx4_dev_stop(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; if (!priv->started) return; @@ -208,7 +194,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev) static void mlx4_dev_close(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; unsigned int i; DEBUG("%p: closing device \"%s\"", @@ -251,6 +237,7 @@ static const struct eth_dev_ops mlx4_dev_ops = { .mac_addr_set = mlx4_mac_addr_set, .stats_get = mlx4_stats_get, .stats_reset = mlx4_stats_reset, + .fw_version_get = mlx4_fw_version_get, .dev_infos_get = mlx4_dev_infos_get, .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get, .vlan_filter_set = mlx4_vlan_filter_set, @@ -589,14 +576,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_dev = list[i]; DEBUG("device opened"); if (mlx4_glue->query_device(attr_ctx, &device_attr)) { - rte_errno = ENODEV; + err = ENODEV; goto error; } INFO("%u port(s) detected", device_attr.phys_port_cnt); conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1; if (mlx4_args(pci_dev->device.devargs, &conf)) { ERROR("failed to process device arguments"); - rte_errno = EINVAL; + err = EINVAL; goto error; } /* Use all ports when none are defined */ @@ -604,7 +591,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) conf.ports.enabled = conf.ports.present; /* Retrieve extended device attributes. */ if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) { - rte_errno = ENODEV; + err = ENODEV; goto error; } assert(device_attr.max_sge >= MLX4_MAX_SGE); @@ -613,7 +600,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_context *ctx = NULL; struct ibv_port_attr port_attr; struct ibv_pd *pd = NULL; - struct priv *priv = NULL; + struct mlx4_priv *priv = NULL; struct rte_eth_dev *eth_dev = NULL; struct ether_addr mac; @@ -623,18 +610,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("using port %u", port); ctx = mlx4_glue->open_device(ibv_dev); if (ctx == NULL) { - rte_errno = ENODEV; + err = ENODEV; goto port_error; } /* Check port status. */ err = mlx4_glue->query_port(ctx, port, &port_attr); if (err) { - rte_errno = err; - ERROR("port query failed: %s", strerror(rte_errno)); + err = ENODEV; + ERROR("port query failed: %s", strerror(err)); goto port_error; } if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - rte_errno = ENOTSUP; + err = ENOTSUP; ERROR("port %d is not configured in Ethernet mode", port); goto port_error; @@ -644,15 +631,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) port, mlx4_glue->port_state_str(port_attr.state), port_attr.state); /* Make asynchronous FD non-blocking to handle interrupts. */ - if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) { + err = mlx4_fd_set_non_blocking(ctx->async_fd); + if (err) { ERROR("cannot make asynchronous FD non-blocking: %s", - strerror(rte_errno)); + strerror(err)); goto port_error; } /* Allocate protection domain. */ pd = mlx4_glue->alloc_pd(ctx); if (pd == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("PD allocation failure"); goto port_error; } @@ -661,7 +649,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) sizeof(*priv), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("priv allocation failure"); goto port_error; } @@ -686,14 +674,27 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) &device_attr_ex); DEBUG("supported RSS hash fields mask: %016" PRIx64, priv->hw_rss_sup); + priv->hw_rss_max_qps = + device_attr_ex.rss_caps.max_rwq_indirection_table_size; + DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps); priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & IBV_RAW_PACKET_CAP_SCATTER_FCS); DEBUG("FCS stripping toggling is %ssupported", priv->hw_fcs_strip ? "" : "not "); + priv->tso = + ((device_attr_ex.tso_caps.max_tso > 0) && + (device_attr_ex.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (priv->tso) + priv->tso_max_payload_sz = + device_attr_ex.tso_caps.max_tso; + DEBUG("TSO is %ssupported", + priv->tso ? "" : "not "); /* Configure the first MAC address by default. */ - if (mlx4_get_mac(priv, &mac.addr_bytes)) { + err = mlx4_get_mac(priv, &mac.addr_bytes); + if (err) { ERROR("cannot get MAC address, is mlx4_en loaded?" - " (rte_errno: %s)", strerror(rte_errno)); + " (error: %s)", strerror(err)); goto port_error; } INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", @@ -726,15 +727,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev = rte_eth_dev_allocate(name); } if (eth_dev == NULL) { + err = ENOMEM; ERROR("can not allocate rte ethdev"); - rte_errno = ENOMEM; goto port_error; } eth_dev->data->dev_private = priv; eth_dev->data->mac_addrs = priv->mac; eth_dev->device = &pci_dev->device; rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx4_driver.driver; /* Initialize local interrupt handle for current port. */ priv->intr_handle = (struct rte_intr_handle){ .fd = -1, @@ -761,20 +761,40 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) /* Update link status once if waiting for LSC. */ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) mlx4_link_update(eth_dev, 0); + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx4_mr_btree_init(&priv->mr.cache, + MLX4_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + /* rte_errno is already set. */ + goto port_error; + } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); rte_eth_dev_probing_finish(eth_dev); continue; port_error: rte_free(priv); + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; if (pd) claim_zero(mlx4_glue->dealloc_pd(pd)); if (ctx) claim_zero(mlx4_glue->close_device(ctx)); - if (eth_dev) + if (eth_dev != NULL) { + /* mac_addrs must not be freed because part of dev_private */ + eth_dev->data->mac_addrs = NULL; rte_eth_dev_release_port(eth_dev); + } break; } - if (i == device_attr.phys_port_cnt) - return 0; /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as @@ -786,8 +806,9 @@ error: claim_zero(mlx4_glue->close_device(attr_ctx)); if (list) mlx4_glue->free_device_list(list); - assert(rte_errno >= 0); - return -rte_errno; + if (err) + rte_errno = err; + return -err; } static const struct rte_pci_id mlx4_pci_id_map[] = { @@ -818,7 +839,7 @@ static struct rte_pci_driver mlx4_driver = { RTE_PCI_DRV_INTR_RMV, }; -#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS +#ifdef RTE_IBVERBS_LINK_DLOPEN /** * Suffix RTE_EAL_PMD_PATH with "-glue". @@ -951,9 +972,7 @@ glue_error: /** * Driver initialization routine. */ -RTE_INIT(rte_mlx4_pmd_init); -static void -rte_mlx4_pmd_init(void) +RTE_INIT(rte_mlx4_pmd_init) { /* * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we @@ -968,7 +987,7 @@ rte_mlx4_pmd_init(void) * using this PMD, which is not supported in forked processes. */ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); -#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS +#ifdef RTE_IBVERBS_LINK_DLOPEN if (mlx4_glue_init()) return; assert(mlx4_glue);