4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
48 #pragma GCC diagnostic ignored "-Wpedantic"
50 #include <infiniband/verbs.h>
52 #pragma GCC diagnostic error "-Wpedantic"
55 #include <rte_malloc.h>
56 #include <rte_ethdev_driver.h>
57 #include <rte_ethdev_pci.h>
59 #include <rte_bus_pci.h>
60 #include <rte_common.h>
61 #include <rte_config.h>
62 #include <rte_eal_memconfig.h>
63 #include <rte_kvargs.h>
66 #include "mlx5_utils.h"
67 #include "mlx5_rxtx.h"
68 #include "mlx5_autoconf.h"
69 #include "mlx5_defs.h"
70 #include "mlx5_glue.h"
72 /* Device parameter to enable RX completion queue compression. */
73 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
75 /* Device parameter to configure inline send. */
76 #define MLX5_TXQ_INLINE "txq_inline"
79 * Device parameter to configure the number of TX queues threshold for
80 * enabling inline send.
82 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
84 /* Device parameter to enable multi-packet send WQEs. */
85 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
87 /* Device parameter to include 2 dsegs in the title WQEBB. */
88 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
90 /* Device parameter to limit the size of inlining packet. */
91 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
93 /* Device parameter to enable hardware Tx vector. */
94 #define MLX5_TX_VEC_EN "tx_vec_en"
96 /* Device parameter to enable hardware Rx vector. */
97 #define MLX5_RX_VEC_EN "rx_vec_en"
99 #ifndef HAVE_IBV_MLX5_MOD_MPW
100 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
101 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
104 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
105 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
109 * Retrieve integer value from environment variable.
112 * Environment variable name.
115 * Integer value, 0 if the variable is not set.
118 mlx5_getenv_int(const char *name)
120 const char *val = getenv(name);
128 * Verbs callback to allocate a memory. This function should allocate the space
129 * according to the size provided residing inside a huge page.
130 * Please note that all allocation must respect the alignment from libmlx5
131 * (i.e. currently sysconf(_SC_PAGESIZE)).
134 * The size in bytes of the memory to allocate.
136 * A pointer to the callback data.
139 * a pointer to the allocate space.
142 mlx5_alloc_verbs_buf(size_t size, void *data)
144 struct priv *priv = data;
146 size_t alignment = sysconf(_SC_PAGESIZE);
147 unsigned int socket = SOCKET_ID_ANY;
149 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
150 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
152 socket = ctrl->socket;
153 } else if (priv->verbs_alloc_ctx.type ==
154 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
155 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
157 socket = ctrl->socket;
159 assert(data != NULL);
160 ret = rte_malloc_socket(__func__, size, alignment, socket);
161 DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
166 * Verbs callback to free a memory.
169 * A pointer to the memory to free.
171 * A pointer to the callback data.
174 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
176 assert(data != NULL);
177 DEBUG("Extern free request: %p", ptr);
182 * DPDK callback to close the device.
184 * Destroy all queues and objects, free memory.
187 * Pointer to Ethernet device structure.
190 mlx5_dev_close(struct rte_eth_dev *dev)
192 struct priv *priv = dev->data->dev_private;
197 DEBUG("%p: closing device \"%s\"",
199 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
200 /* In case mlx5_dev_stop() has not been called. */
201 priv_dev_interrupt_handler_uninstall(priv, dev);
202 priv_dev_traffic_disable(priv, dev);
203 /* Prevent crashes when queues are still in use. */
204 dev->rx_pkt_burst = removed_rx_burst;
205 dev->tx_pkt_burst = removed_tx_burst;
206 if (priv->rxqs != NULL) {
207 /* XXX race condition if mlx5_rx_burst() is still running. */
209 for (i = 0; (i != priv->rxqs_n); ++i)
210 mlx5_priv_rxq_release(priv, i);
214 if (priv->txqs != NULL) {
215 /* XXX race condition if mlx5_tx_burst() is still running. */
217 for (i = 0; (i != priv->txqs_n); ++i)
218 mlx5_priv_txq_release(priv, i);
222 if (priv->pd != NULL) {
223 assert(priv->ctx != NULL);
224 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
225 claim_zero(mlx5_glue->close_device(priv->ctx));
227 assert(priv->ctx == NULL);
228 if (priv->rss_conf.rss_key != NULL)
229 rte_free(priv->rss_conf.rss_key);
230 if (priv->reta_idx != NULL)
231 rte_free(priv->reta_idx);
232 priv_socket_uninit(priv);
233 ret = mlx5_priv_hrxq_ibv_verify(priv);
235 WARN("%p: some Hash Rx queue still remain", (void *)priv);
236 ret = mlx5_priv_ind_table_ibv_verify(priv);
238 WARN("%p: some Indirection table still remain", (void *)priv);
239 ret = mlx5_priv_rxq_ibv_verify(priv);
241 WARN("%p: some Verbs Rx queue still remain", (void *)priv);
242 ret = mlx5_priv_rxq_verify(priv);
244 WARN("%p: some Rx Queues still remain", (void *)priv);
245 ret = mlx5_priv_txq_ibv_verify(priv);
247 WARN("%p: some Verbs Tx queue still remain", (void *)priv);
248 ret = mlx5_priv_txq_verify(priv);
250 WARN("%p: some Tx Queues still remain", (void *)priv);
251 ret = priv_flow_verify(priv);
253 WARN("%p: some flows still remain", (void *)priv);
254 ret = priv_mr_verify(priv);
256 WARN("%p: some Memory Region still remain", (void *)priv);
258 memset(priv, 0, sizeof(*priv));
261 const struct eth_dev_ops mlx5_dev_ops = {
262 .dev_configure = mlx5_dev_configure,
263 .dev_start = mlx5_dev_start,
264 .dev_stop = mlx5_dev_stop,
265 .dev_set_link_down = mlx5_set_link_down,
266 .dev_set_link_up = mlx5_set_link_up,
267 .dev_close = mlx5_dev_close,
268 .promiscuous_enable = mlx5_promiscuous_enable,
269 .promiscuous_disable = mlx5_promiscuous_disable,
270 .allmulticast_enable = mlx5_allmulticast_enable,
271 .allmulticast_disable = mlx5_allmulticast_disable,
272 .link_update = mlx5_link_update,
273 .stats_get = mlx5_stats_get,
274 .stats_reset = mlx5_stats_reset,
275 .xstats_get = mlx5_xstats_get,
276 .xstats_reset = mlx5_xstats_reset,
277 .xstats_get_names = mlx5_xstats_get_names,
278 .dev_infos_get = mlx5_dev_infos_get,
279 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
280 .vlan_filter_set = mlx5_vlan_filter_set,
281 .rx_queue_setup = mlx5_rx_queue_setup,
282 .tx_queue_setup = mlx5_tx_queue_setup,
283 .rx_queue_release = mlx5_rx_queue_release,
284 .tx_queue_release = mlx5_tx_queue_release,
285 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
286 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
287 .mac_addr_remove = mlx5_mac_addr_remove,
288 .mac_addr_add = mlx5_mac_addr_add,
289 .mac_addr_set = mlx5_mac_addr_set,
290 .mtu_set = mlx5_dev_set_mtu,
291 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
292 .vlan_offload_set = mlx5_vlan_offload_set,
293 .reta_update = mlx5_dev_rss_reta_update,
294 .reta_query = mlx5_dev_rss_reta_query,
295 .rss_hash_update = mlx5_rss_hash_update,
296 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
297 .filter_ctrl = mlx5_dev_filter_ctrl,
298 .rx_descriptor_status = mlx5_rx_descriptor_status,
299 .tx_descriptor_status = mlx5_tx_descriptor_status,
300 .rx_queue_intr_enable = mlx5_rx_intr_enable,
301 .rx_queue_intr_disable = mlx5_rx_intr_disable,
302 .is_removed = mlx5_is_removed,
305 static const struct eth_dev_ops mlx5_dev_sec_ops = {
306 .stats_get = mlx5_stats_get,
307 .stats_reset = mlx5_stats_reset,
308 .xstats_get = mlx5_xstats_get,
309 .xstats_reset = mlx5_xstats_reset,
310 .xstats_get_names = mlx5_xstats_get_names,
311 .dev_infos_get = mlx5_dev_infos_get,
312 .rx_descriptor_status = mlx5_rx_descriptor_status,
313 .tx_descriptor_status = mlx5_tx_descriptor_status,
316 /* Available operators in flow isolated mode. */
317 const struct eth_dev_ops mlx5_dev_ops_isolate = {
318 .dev_configure = mlx5_dev_configure,
319 .dev_start = mlx5_dev_start,
320 .dev_stop = mlx5_dev_stop,
321 .dev_set_link_down = mlx5_set_link_down,
322 .dev_set_link_up = mlx5_set_link_up,
323 .dev_close = mlx5_dev_close,
324 .link_update = mlx5_link_update,
325 .stats_get = mlx5_stats_get,
326 .stats_reset = mlx5_stats_reset,
327 .xstats_get = mlx5_xstats_get,
328 .xstats_reset = mlx5_xstats_reset,
329 .xstats_get_names = mlx5_xstats_get_names,
330 .dev_infos_get = mlx5_dev_infos_get,
331 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
332 .vlan_filter_set = mlx5_vlan_filter_set,
333 .rx_queue_setup = mlx5_rx_queue_setup,
334 .tx_queue_setup = mlx5_tx_queue_setup,
335 .rx_queue_release = mlx5_rx_queue_release,
336 .tx_queue_release = mlx5_tx_queue_release,
337 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
338 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
339 .mac_addr_remove = mlx5_mac_addr_remove,
340 .mac_addr_add = mlx5_mac_addr_add,
341 .mac_addr_set = mlx5_mac_addr_set,
342 .mtu_set = mlx5_dev_set_mtu,
343 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
344 .vlan_offload_set = mlx5_vlan_offload_set,
345 .filter_ctrl = mlx5_dev_filter_ctrl,
346 .rx_descriptor_status = mlx5_rx_descriptor_status,
347 .tx_descriptor_status = mlx5_tx_descriptor_status,
348 .rx_queue_intr_enable = mlx5_rx_intr_enable,
349 .rx_queue_intr_disable = mlx5_rx_intr_disable,
350 .is_removed = mlx5_is_removed,
354 struct rte_pci_addr pci_addr; /* associated PCI address */
355 uint32_t ports; /* physical ports bitfield. */
359 * Get device index in mlx5_dev[] from PCI bus address.
361 * @param[in] pci_addr
362 * PCI bus address to look for.
365 * mlx5_dev[] index on success, -1 on failure.
368 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
373 assert(pci_addr != NULL);
374 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
375 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
376 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
377 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
378 (mlx5_dev[i].pci_addr.function == pci_addr->function))
380 if ((mlx5_dev[i].ports == 0) && (ret == -1))
387 * Verify and store value for device argument.
390 * Key argument to verify.
392 * Value associated with key.
397 * 0 on success, negative errno value on failure.
400 mlx5_args_check(const char *key, const char *val, void *opaque)
402 struct mlx5_dev_config *config = opaque;
406 tmp = strtoul(val, NULL, 0);
408 WARN("%s: \"%s\" is not a valid integer", key, val);
411 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
412 config->cqe_comp = !!tmp;
413 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
414 config->txq_inline = tmp;
415 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
416 config->txqs_inline = tmp;
417 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
418 config->mps = !!tmp ? config->mps : 0;
419 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
420 config->mpw_hdr_dseg = !!tmp;
421 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
422 config->inline_max_packet_sz = tmp;
423 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
424 config->tx_vec_en = !!tmp;
425 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
426 config->rx_vec_en = !!tmp;
428 WARN("%s: unknown parameter", key);
435 * Parse device parameters.
438 * Pointer to device configuration structure.
440 * Device arguments structure.
443 * 0 on success, errno value on failure.
446 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
448 const char **params = (const char *[]){
449 MLX5_RXQ_CQE_COMP_EN,
451 MLX5_TXQS_MIN_INLINE,
453 MLX5_TXQ_MPW_HDR_DSEG_EN,
454 MLX5_TXQ_MAX_INLINE_LEN,
459 struct rte_kvargs *kvlist;
465 /* Following UGLY cast is done to pass checkpatch. */
466 kvlist = rte_kvargs_parse(devargs->args, params);
469 /* Process parameters. */
470 for (i = 0; (params[i] != NULL); ++i) {
471 if (rte_kvargs_count(kvlist, params[i])) {
472 ret = rte_kvargs_process(kvlist, params[i],
473 mlx5_args_check, config);
475 rte_kvargs_free(kvlist);
480 rte_kvargs_free(kvlist);
484 static struct rte_pci_driver mlx5_driver;
487 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
488 * local resource used by both primary and secondary to avoid duplicate
490 * The space has to be available on both primary and secondary process,
491 * TXQ UAR maps to this area using fixed mmap w/o double check.
493 static void *uar_base;
496 * Reserve UAR address space for primary process.
499 * Pointer to private structure.
502 * 0 on success, errno value on failure.
505 priv_uar_init_primary(struct priv *priv)
507 void *addr = (void *)0;
509 const struct rte_mem_config *mcfg;
512 if (uar_base) { /* UAR address space mapped. */
513 priv->uar_base = uar_base;
516 /* find out lower bound of hugepage segments */
517 mcfg = rte_eal_get_configuration()->mem_config;
518 for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
520 addr = RTE_MIN(addr, mcfg->memseg[i].addr);
522 addr = mcfg->memseg[i].addr;
524 /* keep distance to hugepages to minimize potential conflicts. */
525 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
526 /* anonymous mmap, no real memory consumption. */
527 addr = mmap(addr, MLX5_UAR_SIZE,
528 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
529 if (addr == MAP_FAILED) {
530 ERROR("Failed to reserve UAR address space, please adjust "
531 "MLX5_UAR_SIZE or try --base-virtaddr");
535 /* Accept either same addr or a new addr returned from mmap if target
538 INFO("Reserved UAR address space: %p", addr);
539 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
540 uar_base = addr; /* process local, don't reserve again. */
545 * Reserve UAR address space for secondary process, align with
549 * Pointer to private structure.
552 * 0 on success, errno value on failure.
555 priv_uar_init_secondary(struct priv *priv)
560 assert(priv->uar_base);
561 if (uar_base) { /* already reserved. */
562 assert(uar_base == priv->uar_base);
565 /* anonymous mmap, no real memory consumption. */
566 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
567 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
568 if (addr == MAP_FAILED) {
569 ERROR("UAR mmap failed: %p size: %llu",
570 priv->uar_base, MLX5_UAR_SIZE);
574 if (priv->uar_base != addr) {
575 ERROR("UAR address %p size %llu occupied, please adjust "
576 "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
577 priv->uar_base, MLX5_UAR_SIZE);
581 uar_base = addr; /* process local, don't reserve again */
582 INFO("Reserved UAR address space: %p", addr);
587 * DPDK callback to register a PCI device.
589 * This function creates an Ethernet device for each port of a given
593 * PCI driver structure (mlx5_driver).
595 * PCI device information.
598 * 0 on success, negative errno value on failure.
601 mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
603 struct ibv_device **list;
604 struct ibv_device *ibv_dev;
606 struct ibv_context *attr_ctx = NULL;
607 struct ibv_device_attr_ex device_attr;
610 unsigned int cqe_comp;
611 unsigned int tunnel_en = 0;
614 struct mlx5dv_context attrs_out;
615 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
616 struct ibv_counter_set_description cs_desc;
620 assert(pci_drv == &mlx5_driver);
621 /* Get mlx5_dev[] index. */
622 idx = mlx5_dev_idx(&pci_dev->addr);
624 ERROR("this driver cannot support any more adapters");
627 DEBUG("using driver device index %d", idx);
629 /* Save PCI address. */
630 mlx5_dev[idx].pci_addr = pci_dev->addr;
631 list = mlx5_glue->get_device_list(&i);
635 ERROR("cannot list devices, is ib_uverbs loaded?");
640 * For each listed device, check related sysfs entry against
641 * the provided PCI ID.
644 struct rte_pci_addr pci_addr;
647 DEBUG("checking device \"%s\"", list[i]->name);
648 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
650 if ((pci_dev->addr.domain != pci_addr.domain) ||
651 (pci_dev->addr.bus != pci_addr.bus) ||
652 (pci_dev->addr.devid != pci_addr.devid) ||
653 (pci_dev->addr.function != pci_addr.function))
655 sriov = ((pci_dev->id.device_id ==
656 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
657 (pci_dev->id.device_id ==
658 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
659 (pci_dev->id.device_id ==
660 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
661 (pci_dev->id.device_id ==
662 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
663 switch (pci_dev->id.device_id) {
664 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
667 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
668 case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
669 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
670 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
671 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
677 INFO("PCI information matches, using device \"%s\""
680 sriov ? "true" : "false");
681 attr_ctx = mlx5_glue->open_device(list[i]);
685 if (attr_ctx == NULL) {
686 mlx5_glue->free_device_list(list);
689 ERROR("cannot access device, is mlx5_ib loaded?");
692 ERROR("cannot use device, are drivers up to date?");
700 DEBUG("device opened");
702 * Multi-packet send is supported by ConnectX-4 Lx PF as well
703 * as all ConnectX-5 devices.
705 mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
706 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
707 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
708 DEBUG("Enhanced MPW is supported");
709 mps = MLX5_MPW_ENHANCED;
711 DEBUG("MPW is supported");
715 DEBUG("MPW isn't supported");
716 mps = MLX5_MPW_DISABLED;
718 if (RTE_CACHE_LINE_SIZE == 128 &&
719 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
723 if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
725 INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
727 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
728 char name[RTE_ETH_NAME_MAX_LEN];
730 uint32_t port = i + 1; /* ports are indexed from one */
731 uint32_t test = (1 << i);
732 struct ibv_context *ctx = NULL;
733 struct ibv_port_attr port_attr;
734 struct ibv_pd *pd = NULL;
735 struct priv *priv = NULL;
736 struct rte_eth_dev *eth_dev;
737 struct ibv_device_attr_ex device_attr_ex;
738 struct ether_addr mac;
739 uint16_t num_vfs = 0;
740 struct ibv_device_attr_ex device_attr;
741 struct mlx5_dev_config config = {
742 .cqe_comp = cqe_comp,
744 .tunnel_en = tunnel_en,
748 .txq_inline = MLX5_ARG_UNSET,
749 .txqs_inline = MLX5_ARG_UNSET,
750 .inline_max_packet_sz = MLX5_ARG_UNSET,
753 len = snprintf(name, sizeof(name), PCI_PRI_FMT,
754 pci_dev->addr.domain, pci_dev->addr.bus,
755 pci_dev->addr.devid, pci_dev->addr.function);
756 if (device_attr.orig_attr.phys_port_cnt > 1)
757 snprintf(name + len, sizeof(name), " port %u", i);
759 mlx5_dev[idx].ports |= test;
761 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
762 eth_dev = rte_eth_dev_attach_secondary(name);
763 if (eth_dev == NULL) {
764 ERROR("can not attach rte ethdev");
768 eth_dev->device = &pci_dev->device;
769 eth_dev->dev_ops = &mlx5_dev_sec_ops;
770 priv = eth_dev->data->dev_private;
771 err = priv_uar_init_secondary(priv);
776 /* Receive command fd from primary process */
777 err = priv_socket_connect(priv);
782 /* Remap UAR for Tx queues. */
783 err = priv_tx_uar_remap(priv, err);
787 * Ethdev pointer is still required as input since
788 * the primary device is not accessible from the
791 eth_dev->rx_pkt_burst =
792 priv_select_rx_function(priv, eth_dev);
793 eth_dev->tx_pkt_burst =
794 priv_select_tx_function(priv, eth_dev);
798 DEBUG("using port %u (%08" PRIx32 ")", port, test);
800 ctx = mlx5_glue->open_device(ibv_dev);
806 mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
807 /* Check port status. */
808 err = mlx5_glue->query_port(ctx, port, &port_attr);
810 ERROR("port query failed: %s", strerror(err));
814 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
815 ERROR("port %d is not configured in Ethernet mode",
821 if (port_attr.state != IBV_PORT_ACTIVE)
822 DEBUG("port %d is not active: \"%s\" (%d)",
823 port, mlx5_glue->port_state_str(port_attr.state),
826 /* Allocate protection domain. */
827 pd = mlx5_glue->alloc_pd(ctx);
829 ERROR("PD allocation failure");
834 mlx5_dev[idx].ports |= test;
836 /* from rte_ethdev.c */
837 priv = rte_zmalloc("ethdev private structure",
839 RTE_CACHE_LINE_SIZE);
841 ERROR("priv allocation failure");
847 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
848 sizeof(priv->ibdev_path));
849 priv->device_attr = device_attr;
852 priv->mtu = ETHER_MTU;
853 err = mlx5_args(&config, pci_dev->device.devargs);
855 ERROR("failed to process device arguments: %s",
859 if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
860 ERROR("ibv_query_device_ex() failed");
864 config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
865 IBV_DEVICE_RAW_IP_CSUM);
866 DEBUG("checksum offloading is %ssupported",
867 (config.hw_csum ? "" : "not "));
869 #ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
870 config.hw_csum_l2tun =
871 !!(exp_device_attr.exp_device_cap_flags &
872 IBV_DEVICE_VXLAN_SUPPORT);
874 DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
875 (config.hw_csum_l2tun ? "" : "not "));
877 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
878 config.flow_counter_en = !!(device_attr.max_counter_sets);
879 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
880 DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
881 cs_desc.counter_type, cs_desc.num_of_cs,
884 config.ind_table_max_size =
885 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
886 /* Remove this check once DPDK supports larger/variable
887 * indirection tables. */
888 if (config.ind_table_max_size >
889 (unsigned int)ETH_RSS_RETA_SIZE_512)
890 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
891 DEBUG("maximum RX indirection table size is %u",
892 config.ind_table_max_size);
893 config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
894 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
895 DEBUG("VLAN stripping is %ssupported",
896 (config.hw_vlan_strip ? "" : "not "));
898 config.hw_fcs_strip =
899 !!(device_attr_ex.orig_attr.device_cap_flags &
900 IBV_WQ_FLAGS_SCATTER_FCS);
901 DEBUG("FCS stripping configuration is %ssupported",
902 (config.hw_fcs_strip ? "" : "not "));
904 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
905 config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
907 DEBUG("hardware RX end alignment padding is %ssupported",
908 (config.hw_padding ? "" : "not "));
910 priv_get_num_vfs(priv, &num_vfs);
911 config.sriov = (num_vfs || sriov);
912 config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
913 (device_attr_ex.tso_caps.supported_qpts &
914 (1 << IBV_QPT_RAW_PACKET)));
916 config.tso_max_payload_sz =
917 device_attr_ex.tso_caps.max_tso;
918 if (config.mps && !mps) {
919 ERROR("multi-packet send not supported on this device"
920 " (" MLX5_TXQ_MPW_EN ")");
925 config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
926 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
927 if (config.cqe_comp && !cqe_comp) {
928 WARN("Rx CQE compression isn't supported");
931 err = priv_uar_init_primary(priv);
934 /* Configure the first MAC address by default. */
935 if (priv_get_mac(priv, &mac.addr_bytes)) {
936 ERROR("cannot get MAC address, is mlx5_en loaded?"
937 " (errno: %s)", strerror(errno));
941 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
943 mac.addr_bytes[0], mac.addr_bytes[1],
944 mac.addr_bytes[2], mac.addr_bytes[3],
945 mac.addr_bytes[4], mac.addr_bytes[5]);
948 char ifname[IF_NAMESIZE];
950 if (priv_get_ifname(priv, &ifname) == 0)
951 DEBUG("port %u ifname is \"%s\"",
954 DEBUG("port %u ifname is unknown", priv->port);
957 /* Get actual MTU if possible. */
958 priv_get_mtu(priv, &priv->mtu);
959 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
961 eth_dev = rte_eth_dev_allocate(name);
962 if (eth_dev == NULL) {
963 ERROR("can not allocate rte ethdev");
967 eth_dev->data->dev_private = priv;
968 eth_dev->data->mac_addrs = priv->mac;
969 eth_dev->device = &pci_dev->device;
970 rte_eth_copy_pci_info(eth_dev, pci_dev);
971 eth_dev->device->driver = &mlx5_driver.driver;
973 * Initialize burst functions to prevent crashes before link-up.
975 eth_dev->rx_pkt_burst = removed_rx_burst;
976 eth_dev->tx_pkt_burst = removed_tx_burst;
978 eth_dev->dev_ops = &mlx5_dev_ops;
979 /* Register MAC address. */
980 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
981 TAILQ_INIT(&priv->flows);
982 TAILQ_INIT(&priv->ctrl_flows);
984 /* Hint libmlx5 to use PMD allocator for data plane resources */
985 struct mlx5dv_ctx_allocators alctr = {
986 .alloc = &mlx5_alloc_verbs_buf,
987 .free = &mlx5_free_verbs_buf,
990 mlx5_glue->dv_set_context_attr(ctx,
991 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
992 (void *)((uintptr_t)&alctr));
994 /* Bring Ethernet device up. */
995 DEBUG("forcing Ethernet interface up");
996 priv_set_flags(priv, ~IFF_UP, IFF_UP);
997 /* Store device configuration on private structure. */
998 priv->config = config;
1005 claim_zero(mlx5_glue->dealloc_pd(pd));
1007 claim_zero(mlx5_glue->close_device(ctx));
1012 * XXX if something went wrong in the loop above, there is a resource
1013 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1014 * long as the dpdk does not provide a way to deallocate a ethdev and a
1015 * way to enumerate the registered ethdevs to free the previous ones.
1018 /* no port found, complain */
1019 if (!mlx5_dev[idx].ports) {
1026 claim_zero(mlx5_glue->close_device(attr_ctx));
1028 mlx5_glue->free_device_list(list);
1033 static const struct rte_pci_id mlx5_pci_id_map[] = {
1035 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1036 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1039 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1040 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1043 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1044 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1047 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1048 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1051 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1052 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1055 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1056 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1059 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1060 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1063 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1064 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1071 static struct rte_pci_driver mlx5_driver = {
1073 .name = MLX5_DRIVER_NAME
1075 .id_table = mlx5_pci_id_map,
1076 .probe = mlx5_pci_probe,
1077 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1080 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1083 * Initialization routine for run-time dependency on rdma-core.
1086 mlx5_glue_init(void)
1088 void *handle = NULL;
1092 handle = dlopen(MLX5_GLUE, RTLD_LAZY);
1097 WARN("cannot load glue library: %s", dlmsg);
1100 sym = dlsym(handle, "mlx5_glue");
1101 if (!sym || !*sym) {
1105 ERROR("cannot resolve glue symbol: %s", dlmsg);
1113 WARN("cannot initialize PMD due to missing run-time"
1114 " dependency on rdma-core libraries (libibverbs,"
1122 * Driver initialization routine.
1124 RTE_INIT(rte_mlx5_pmd_init);
1126 rte_mlx5_pmd_init(void)
1128 /* Build the static table for ptype conversion. */
1129 mlx5_set_ptype_table();
1131 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1132 * huge pages. Calling ibv_fork_init() during init allows
1133 * applications to use fork() safely for purposes other than
1134 * using this PMD, which is not supported in forked processes.
1136 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1137 /* Match the size of Rx completion entry to the size of a cacheline. */
1138 if (RTE_CACHE_LINE_SIZE == 128)
1139 setenv("MLX5_CQE_SIZE", "128", 0);
1140 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1141 if (mlx5_glue_init())
1145 mlx5_glue->fork_init();
1146 rte_pci_register(&mlx5_driver);
1149 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1150 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1151 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");