4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
46 #pragma GCC diagnostic ignored "-pedantic"
48 #include <infiniband/verbs.h>
50 #pragma GCC diagnostic error "-pedantic"
53 /* DPDK headers don't like -pedantic. */
55 #pragma GCC diagnostic ignored "-pedantic"
57 #include <rte_malloc.h>
58 #include <rte_ethdev.h>
60 #include <rte_common.h>
61 #include <rte_kvargs.h>
63 #pragma GCC diagnostic error "-pedantic"
67 #include "mlx5_utils.h"
68 #include "mlx5_rxtx.h"
69 #include "mlx5_autoconf.h"
70 #include "mlx5_defs.h"
72 /* Device parameter to enable RX completion queue compression. */
73 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
75 /* Device parameter to configure inline send. */
76 #define MLX5_TXQ_INLINE "txq_inline"
79 * Device parameter to configure the number of TX queues threshold for
80 * enabling inline send.
82 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
85 * Retrieve integer value from environment variable.
88 * Environment variable name.
91 * Integer value, 0 if the variable is not set.
94 mlx5_getenv_int(const char *name)
96 const char *val = getenv(name);
104 * DPDK callback to close the device.
106 * Destroy all queues and objects, free memory.
109 * Pointer to Ethernet device structure.
112 mlx5_dev_close(struct rte_eth_dev *dev)
114 struct priv *priv = mlx5_get_priv(dev);
118 DEBUG("%p: closing device \"%s\"",
120 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
121 /* In case mlx5_dev_stop() has not been called. */
122 priv_dev_interrupt_handler_uninstall(priv, dev);
123 priv_special_flow_disable_all(priv);
124 priv_mac_addrs_disable(priv);
125 priv_destroy_hash_rxqs(priv);
127 /* Remove flow director elements. */
128 priv_fdir_disable(priv);
129 priv_fdir_delete_filters_list(priv);
131 /* Prevent crashes when queues are still in use. */
132 dev->rx_pkt_burst = removed_rx_burst;
133 dev->tx_pkt_burst = removed_tx_burst;
134 if (priv->rxqs != NULL) {
135 /* XXX race condition if mlx5_rx_burst() is still running. */
137 for (i = 0; (i != priv->rxqs_n); ++i) {
138 struct rxq *rxq = (*priv->rxqs)[i];
139 struct rxq_ctrl *rxq_ctrl;
143 rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
144 (*priv->rxqs)[i] = NULL;
145 rxq_cleanup(rxq_ctrl);
151 if (priv->txqs != NULL) {
152 /* XXX race condition if mlx5_tx_burst() is still running. */
154 for (i = 0; (i != priv->txqs_n); ++i) {
155 struct txq *txq = (*priv->txqs)[i];
156 struct txq_ctrl *txq_ctrl;
160 txq_ctrl = container_of(txq, struct txq_ctrl, txq);
161 (*priv->txqs)[i] = NULL;
162 txq_cleanup(txq_ctrl);
168 if (priv->pd != NULL) {
169 assert(priv->ctx != NULL);
170 claim_zero(ibv_dealloc_pd(priv->pd));
171 claim_zero(ibv_close_device(priv->ctx));
173 assert(priv->ctx == NULL);
174 if (priv->rss_conf != NULL) {
175 for (i = 0; (i != hash_rxq_init_n); ++i)
176 rte_free((*priv->rss_conf)[i]);
177 rte_free(priv->rss_conf);
179 if (priv->reta_idx != NULL)
180 rte_free(priv->reta_idx);
182 memset(priv, 0, sizeof(*priv));
185 static const struct eth_dev_ops mlx5_dev_ops = {
186 .dev_configure = mlx5_dev_configure,
187 .dev_start = mlx5_dev_start,
188 .dev_stop = mlx5_dev_stop,
189 .dev_set_link_down = mlx5_set_link_down,
190 .dev_set_link_up = mlx5_set_link_up,
191 .dev_close = mlx5_dev_close,
192 .promiscuous_enable = mlx5_promiscuous_enable,
193 .promiscuous_disable = mlx5_promiscuous_disable,
194 .allmulticast_enable = mlx5_allmulticast_enable,
195 .allmulticast_disable = mlx5_allmulticast_disable,
196 .link_update = mlx5_link_update,
197 .stats_get = mlx5_stats_get,
198 .stats_reset = mlx5_stats_reset,
199 .dev_infos_get = mlx5_dev_infos_get,
200 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
201 .vlan_filter_set = mlx5_vlan_filter_set,
202 .rx_queue_setup = mlx5_rx_queue_setup,
203 .tx_queue_setup = mlx5_tx_queue_setup,
204 .rx_queue_release = mlx5_rx_queue_release,
205 .tx_queue_release = mlx5_tx_queue_release,
206 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
207 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
208 .mac_addr_remove = mlx5_mac_addr_remove,
209 .mac_addr_add = mlx5_mac_addr_add,
210 .mac_addr_set = mlx5_mac_addr_set,
211 .mtu_set = mlx5_dev_set_mtu,
212 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
213 .vlan_offload_set = mlx5_vlan_offload_set,
214 .reta_update = mlx5_dev_rss_reta_update,
215 .reta_query = mlx5_dev_rss_reta_query,
216 .rss_hash_update = mlx5_rss_hash_update,
217 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
218 .filter_ctrl = mlx5_dev_filter_ctrl,
222 struct rte_pci_addr pci_addr; /* associated PCI address */
223 uint32_t ports; /* physical ports bitfield. */
227 * Get device index in mlx5_dev[] from PCI bus address.
229 * @param[in] pci_addr
230 * PCI bus address to look for.
233 * mlx5_dev[] index on success, -1 on failure.
236 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
241 assert(pci_addr != NULL);
242 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
243 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
244 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
245 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
246 (mlx5_dev[i].pci_addr.function == pci_addr->function))
248 if ((mlx5_dev[i].ports == 0) && (ret == -1))
255 * Verify and store value for device argument.
258 * Key argument to verify.
260 * Value associated with key.
265 * 0 on success, negative errno value on failure.
268 mlx5_args_check(const char *key, const char *val, void *opaque)
270 struct priv *priv = opaque;
274 tmp = strtoul(val, NULL, 0);
276 WARN("%s: \"%s\" is not a valid integer", key, val);
279 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
280 priv->cqe_comp = !!tmp;
281 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
282 priv->txq_inline = tmp;
283 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
284 priv->txqs_inline = tmp;
286 WARN("%s: unknown parameter", key);
293 * Parse device parameters.
296 * Pointer to private structure.
298 * Device arguments structure.
301 * 0 on success, errno value on failure.
304 mlx5_args(struct priv *priv, struct rte_devargs *devargs)
306 const char **params = (const char *[]){
307 MLX5_RXQ_CQE_COMP_EN,
309 MLX5_TXQS_MIN_INLINE,
312 struct rte_kvargs *kvlist;
318 /* Following UGLY cast is done to pass checkpatch. */
319 kvlist = rte_kvargs_parse(devargs->args, params);
322 /* Process parameters. */
323 for (i = 0; (params[i] != NULL); ++i) {
324 if (rte_kvargs_count(kvlist, params[i])) {
325 ret = rte_kvargs_process(kvlist, params[i],
326 mlx5_args_check, priv);
331 rte_kvargs_free(kvlist);
335 static struct eth_driver mlx5_driver;
338 * DPDK callback to register a PCI device.
340 * This function creates an Ethernet device for each port of a given
344 * PCI driver structure (mlx5_driver).
346 * PCI device information.
349 * 0 on success, negative errno value on failure.
352 mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
354 struct ibv_device **list;
355 struct ibv_device *ibv_dev;
357 struct ibv_context *attr_ctx = NULL;
358 struct ibv_device_attr device_attr;
365 assert(pci_drv == &mlx5_driver.pci_drv);
366 /* Get mlx5_dev[] index. */
367 idx = mlx5_dev_idx(&pci_dev->addr);
369 ERROR("this driver cannot support any more adapters");
372 DEBUG("using driver device index %d", idx);
374 /* Save PCI address. */
375 mlx5_dev[idx].pci_addr = pci_dev->addr;
376 list = ibv_get_device_list(&i);
379 if (errno == ENOSYS) {
380 WARN("cannot list devices, is ib_uverbs loaded?");
387 * For each listed device, check related sysfs entry against
388 * the provided PCI ID.
391 struct rte_pci_addr pci_addr;
394 DEBUG("checking device \"%s\"", list[i]->name);
395 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
397 if ((pci_dev->addr.domain != pci_addr.domain) ||
398 (pci_dev->addr.bus != pci_addr.bus) ||
399 (pci_dev->addr.devid != pci_addr.devid) ||
400 (pci_dev->addr.function != pci_addr.function))
402 sriov = ((pci_dev->id.device_id ==
403 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
404 (pci_dev->id.device_id ==
405 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
406 /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
407 mps = (pci_dev->id.device_id ==
408 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
409 INFO("PCI information matches, using device \"%s\""
410 " (SR-IOV: %s, MPS: %s)",
412 sriov ? "true" : "false",
413 mps ? "true" : "false");
414 attr_ctx = ibv_open_device(list[i]);
418 if (attr_ctx == NULL) {
419 ibv_free_device_list(list);
422 WARN("cannot access device, is mlx5_ib loaded?");
425 WARN("cannot use device, are drivers up to date?");
433 DEBUG("device opened");
434 if (ibv_query_device(attr_ctx, &device_attr))
436 INFO("%u port(s) detected", device_attr.phys_port_cnt);
438 for (i = 0; i < device_attr.phys_port_cnt; i++) {
439 uint32_t port = i + 1; /* ports are indexed from one */
440 uint32_t test = (1 << i);
441 struct ibv_context *ctx = NULL;
442 struct ibv_port_attr port_attr;
443 struct ibv_pd *pd = NULL;
444 struct priv *priv = NULL;
445 struct rte_eth_dev *eth_dev;
446 struct ibv_exp_device_attr exp_device_attr;
447 struct ether_addr mac;
448 uint16_t num_vfs = 0;
450 exp_device_attr.comp_mask =
451 IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
452 IBV_EXP_DEVICE_ATTR_RX_HASH |
453 IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
454 IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
457 DEBUG("using port %u (%08" PRIx32 ")", port, test);
459 ctx = ibv_open_device(ibv_dev);
463 /* Check port status. */
464 err = ibv_query_port(ctx, port, &port_attr);
466 ERROR("port query failed: %s", strerror(err));
470 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
471 ERROR("port %d is not configured in Ethernet mode",
476 if (port_attr.state != IBV_PORT_ACTIVE)
477 DEBUG("port %d is not active: \"%s\" (%d)",
478 port, ibv_port_state_str(port_attr.state),
481 /* Allocate protection domain. */
482 pd = ibv_alloc_pd(ctx);
484 ERROR("PD allocation failure");
489 mlx5_dev[idx].ports |= test;
491 /* from rte_ethdev.c */
492 priv = rte_zmalloc("ethdev private structure",
494 RTE_CACHE_LINE_SIZE);
496 ERROR("priv allocation failure");
502 priv->device_attr = device_attr;
505 priv->mtu = ETHER_MTU;
506 priv->cqe_comp = 1; /* Enable compression by default. */
507 err = mlx5_args(priv, pci_dev->devargs);
509 ERROR("failed to process device arguments: %s",
513 if (ibv_exp_query_device(ctx, &exp_device_attr)) {
514 ERROR("ibv_exp_query_device() failed");
519 ((exp_device_attr.exp_device_cap_flags &
520 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
521 (exp_device_attr.exp_device_cap_flags &
522 IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
523 DEBUG("checksum offloading is %ssupported",
524 (priv->hw_csum ? "" : "not "));
526 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
527 IBV_EXP_DEVICE_VXLAN_SUPPORT);
528 DEBUG("L2 tunnel checksum offloads are %ssupported",
529 (priv->hw_csum_l2tun ? "" : "not "));
531 priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;
532 /* Remove this check once DPDK supports larger/variable
533 * indirection tables. */
534 if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE)
535 priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
536 DEBUG("maximum RX indirection table size is %u",
537 priv->ind_table_max_size);
538 priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
539 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
540 DEBUG("VLAN stripping is %ssupported",
541 (priv->hw_vlan_strip ? "" : "not "));
543 priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &
544 IBV_EXP_DEVICE_SCATTER_FCS);
545 DEBUG("FCS stripping configuration is %ssupported",
546 (priv->hw_fcs_strip ? "" : "not "));
548 priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;
549 DEBUG("hardware RX end alignment padding is %ssupported",
550 (priv->hw_padding ? "" : "not "));
552 priv_get_num_vfs(priv, &num_vfs);
553 priv->sriov = (num_vfs || sriov);
555 /* Allocate and register default RSS hash keys. */
556 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
557 sizeof((*priv->rss_conf)[0]), 0);
558 if (priv->rss_conf == NULL) {
562 err = rss_hash_rss_conf_new_key(priv,
563 rss_hash_default_key,
564 rss_hash_default_key_len,
568 /* Configure the first MAC address by default. */
569 if (priv_get_mac(priv, &mac.addr_bytes)) {
570 ERROR("cannot get MAC address, is mlx5_en loaded?"
571 " (errno: %s)", strerror(errno));
574 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
576 mac.addr_bytes[0], mac.addr_bytes[1],
577 mac.addr_bytes[2], mac.addr_bytes[3],
578 mac.addr_bytes[4], mac.addr_bytes[5]);
579 /* Register MAC address. */
580 claim_zero(priv_mac_addr_add(priv, 0,
581 (const uint8_t (*)[ETHER_ADDR_LEN])
583 /* Initialize FD filters list. */
584 err = fdir_init_filters_list(priv);
589 char ifname[IF_NAMESIZE];
591 if (priv_get_ifname(priv, &ifname) == 0)
592 DEBUG("port %u ifname is \"%s\"",
595 DEBUG("port %u ifname is unknown", priv->port);
598 /* Get actual MTU if possible. */
599 priv_get_mtu(priv, &priv->mtu);
600 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
602 /* from rte_ethdev.c */
604 char name[RTE_ETH_NAME_MAX_LEN];
606 snprintf(name, sizeof(name), "%s port %u",
607 ibv_get_device_name(ibv_dev), port);
608 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
610 if (eth_dev == NULL) {
611 ERROR("can not allocate rte ethdev");
616 /* Secondary processes have to use local storage for their
617 * private data as well as a copy of eth_dev->data, but this
618 * pointer must not be modified before burst functions are
619 * actually called. */
620 if (mlx5_is_secondary()) {
621 struct mlx5_secondary_data *sd =
622 &mlx5_secondary_data[eth_dev->data->port_id];
623 sd->primary_priv = eth_dev->data->dev_private;
624 if (sd->primary_priv == NULL) {
625 ERROR("no private data for port %u",
626 eth_dev->data->port_id);
630 sd->shared_dev_data = eth_dev->data;
631 rte_spinlock_init(&sd->lock);
632 memcpy(sd->data.name, sd->shared_dev_data->name,
633 sizeof(sd->data.name));
634 sd->data.dev_private = priv;
635 sd->data.rx_mbuf_alloc_failed = 0;
636 sd->data.mtu = ETHER_MTU;
637 sd->data.port_id = sd->shared_dev_data->port_id;
638 sd->data.mac_addrs = priv->mac;
639 eth_dev->tx_pkt_burst = mlx5_tx_burst_secondary_setup;
640 eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup;
642 eth_dev->data->dev_private = priv;
643 eth_dev->data->rx_mbuf_alloc_failed = 0;
644 eth_dev->data->mtu = ETHER_MTU;
645 eth_dev->data->mac_addrs = priv->mac;
648 eth_dev->pci_dev = pci_dev;
649 rte_eth_copy_pci_info(eth_dev, pci_dev);
650 eth_dev->driver = &mlx5_driver;
652 eth_dev->dev_ops = &mlx5_dev_ops;
654 TAILQ_INIT(ð_dev->link_intr_cbs);
656 /* Bring Ethernet device up. */
657 DEBUG("forcing Ethernet interface up");
658 priv_set_flags(priv, ~IFF_UP, IFF_UP);
663 rte_free(priv->rss_conf);
667 claim_zero(ibv_dealloc_pd(pd));
669 claim_zero(ibv_close_device(ctx));
674 * XXX if something went wrong in the loop above, there is a resource
675 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
676 * long as the dpdk does not provide a way to deallocate a ethdev and a
677 * way to enumerate the registered ethdevs to free the previous ones.
680 /* no port found, complain */
681 if (!mlx5_dev[idx].ports) {
688 claim_zero(ibv_close_device(attr_ctx));
690 ibv_free_device_list(list);
695 static const struct rte_pci_id mlx5_pci_id_map[] = {
697 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
698 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
701 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
702 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
705 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
706 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
709 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
710 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
717 static struct eth_driver mlx5_driver = {
719 .name = MLX5_DRIVER_NAME,
720 .id_table = mlx5_pci_id_map,
721 .devinit = mlx5_pci_devinit,
722 .drv_flags = RTE_PCI_DRV_INTR_LSC,
724 .dev_private_size = sizeof(struct priv)
728 * Driver initialization routine.
731 rte_mlx5_pmd_init(const char *name, const char *args)
736 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
737 * huge pages. Calling ibv_fork_init() during init allows
738 * applications to use fork() safely for purposes other than
739 * using this PMD, which is not supported in forked processes.
741 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
743 rte_eal_pci_register(&mlx5_driver.pci_drv);
747 static struct rte_driver rte_mlx5_driver = {
749 .name = MLX5_DRIVER_NAME,
750 .init = rte_mlx5_pmd_init,
753 PMD_REGISTER_DRIVER(rte_mlx5_driver)