net/mlx5: query vport index match mode and parameters
[dpdk.git] / drivers / net / mlx5 / mlx5.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <assert.h>
10 #include <dlfcn.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <errno.h>
14 #include <net/if.h>
15 #include <sys/mman.h>
16 #include <linux/rtnetlink.h>
17
18 /* Verbs header. */
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic ignored "-Wpedantic"
22 #endif
23 #include <infiniband/verbs.h>
24 #ifdef PEDANTIC
25 #pragma GCC diagnostic error "-Wpedantic"
26 #endif
27
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_kvargs.h>
36 #include <rte_rwlock.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_alarm.h>
40
41 #include "mlx5.h"
42 #include "mlx5_utils.h"
43 #include "mlx5_rxtx.h"
44 #include "mlx5_autoconf.h"
45 #include "mlx5_defs.h"
46 #include "mlx5_glue.h"
47 #include "mlx5_mr.h"
48 #include "mlx5_flow.h"
49
50 /* Device parameter to enable RX completion queue compression. */
51 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
52
53 /* Device parameter to enable RX completion entry padding to 128B. */
54 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
55
56 /* Device parameter to enable padding Rx packet to cacheline size. */
57 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
58
59 /* Device parameter to enable Multi-Packet Rx queue. */
60 #define MLX5_RX_MPRQ_EN "mprq_en"
61
62 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
63 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
64
65 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
66 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
67
68 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
69 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
70
71 /* Device parameter to configure inline send. Deprecated, ignored.*/
72 #define MLX5_TXQ_INLINE "txq_inline"
73
74 /* Device parameter to limit packet size to inline with ordinary SEND. */
75 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
76
77 /* Device parameter to configure minimal data size to inline. */
78 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
79
80 /* Device parameter to limit packet size to inline with Enhanced MPW. */
81 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
82
83 /*
84  * Device parameter to configure the number of TX queues threshold for
85  * enabling inline send.
86  */
87 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
88
89 /*
90  * Device parameter to configure the number of TX queues threshold for
91  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
92  */
93 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
94
95 /* Device parameter to enable multi-packet send WQEs. */
96 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
97
98 /*
99  * Device parameter to include 2 dsegs in the title WQEBB.
100  * Deprecated, ignored.
101  */
102 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
103
104 /*
105  * Device parameter to limit the size of inlining packet.
106  * Deprecated, ignored.
107  */
108 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
109
110 /*
111  * Device parameter to enable hardware Tx vector.
112  * Deprecated, ignored (no vectorized Tx routines anymore).
113  */
114 #define MLX5_TX_VEC_EN "tx_vec_en"
115
116 /* Device parameter to enable hardware Rx vector. */
117 #define MLX5_RX_VEC_EN "rx_vec_en"
118
119 /* Allow L3 VXLAN flow creation. */
120 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
121
122 /* Activate DV E-Switch flow steering. */
123 #define MLX5_DV_ESW_EN "dv_esw_en"
124
125 /* Activate DV flow steering. */
126 #define MLX5_DV_FLOW_EN "dv_flow_en"
127
128 /* Activate Netlink support in VF mode. */
129 #define MLX5_VF_NL_EN "vf_nl_en"
130
131 /* Enable extending memsegs when creating a MR. */
132 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
133
134 /* Select port representors to instantiate. */
135 #define MLX5_REPRESENTOR "representor"
136
137 /* Device parameter to configure the maximum number of dump files per queue. */
138 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
139
140 /* Configure timeout of LRO session (in microseconds). */
141 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
142
143 #ifndef HAVE_IBV_MLX5_MOD_MPW
144 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
145 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
146 #endif
147
148 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
149 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
150 #endif
151
152 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
153
154 /* Shared memory between primary and secondary processes. */
155 struct mlx5_shared_data *mlx5_shared_data;
156
157 /* Spinlock for mlx5_shared_data allocation. */
158 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
159
160 /* Process local data for secondary processes. */
161 static struct mlx5_local_data mlx5_local_data;
162
163 /** Driver-specific log messages type. */
164 int mlx5_logtype;
165
166 /** Data associated with devices to spawn. */
167 struct mlx5_dev_spawn_data {
168         uint32_t ifindex; /**< Network interface index. */
169         uint32_t max_port; /**< IB device maximal port index. */
170         uint32_t ibv_port; /**< IB device physical port index. */
171         int pf_bond; /**< bonding device PF index. < 0 - no bonding */
172         struct mlx5_switch_info info; /**< Switch information. */
173         struct ibv_device *ibv_dev; /**< Associated IB device. */
174         struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
175         struct rte_pci_device *pci_dev; /**< Backend PCI device. */
176 };
177
178 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
179 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
180
181 /**
182  * Initialize the counters management structure.
183  *
184  * @param[in] sh
185  *   Pointer to mlx5_ibv_shared object to free
186  */
187 static void
188 mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
189 {
190         uint8_t i;
191
192         TAILQ_INIT(&sh->cmng.flow_counters);
193         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
194                 TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
195 }
196
197 /**
198  * Destroy all the resources allocated for a counter memory management.
199  *
200  * @param[in] mng
201  *   Pointer to the memory management structure.
202  */
203 static void
204 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
205 {
206         uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
207
208         LIST_REMOVE(mng, next);
209         claim_zero(mlx5_devx_cmd_destroy(mng->dm));
210         claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
211         rte_free(mem);
212 }
213
214 /**
215  * Close and release all the resources of the counters management.
216  *
217  * @param[in] sh
218  *   Pointer to mlx5_ibv_shared object to free.
219  */
220 static void
221 mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
222 {
223         struct mlx5_counter_stats_mem_mng *mng;
224         uint8_t i;
225         int j;
226         int retries = 1024;
227
228         rte_errno = 0;
229         while (--retries) {
230                 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
231                 if (rte_errno != EINPROGRESS)
232                         break;
233                 rte_pause();
234         }
235         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
236                 struct mlx5_flow_counter_pool *pool;
237                 uint32_t batch = !!(i % 2);
238
239                 if (!sh->cmng.ccont[i].pools)
240                         continue;
241                 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
242                 while (pool) {
243                         if (batch) {
244                                 if (pool->min_dcs)
245                                         claim_zero
246                                         (mlx5_devx_cmd_destroy(pool->min_dcs));
247                         }
248                         for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
249                                 if (pool->counters_raw[j].action)
250                                         claim_zero
251                                         (mlx5_glue->destroy_flow_action
252                                                (pool->counters_raw[j].action));
253                                 if (!batch && pool->counters_raw[j].dcs)
254                                         claim_zero(mlx5_devx_cmd_destroy
255                                                   (pool->counters_raw[j].dcs));
256                         }
257                         TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
258                                      next);
259                         rte_free(pool);
260                         pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
261                 }
262                 rte_free(sh->cmng.ccont[i].pools);
263         }
264         mng = LIST_FIRST(&sh->cmng.mem_mngs);
265         while (mng) {
266                 mlx5_flow_destroy_counter_stat_mem_mng(mng);
267                 mng = LIST_FIRST(&sh->cmng.mem_mngs);
268         }
269         memset(&sh->cmng, 0, sizeof(sh->cmng));
270 }
271
272 /**
273  * Extract pdn of PD object using DV API.
274  *
275  * @param[in] pd
276  *   Pointer to the verbs PD object.
277  * @param[out] pdn
278  *   Pointer to the PD object number variable.
279  *
280  * @return
281  *   0 on success, error value otherwise.
282  */
283 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
284 static int
285 mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused)
286 {
287         struct mlx5dv_obj obj;
288         struct mlx5dv_pd pd_info;
289         int ret = 0;
290
291         obj.pd.in = pd;
292         obj.pd.out = &pd_info;
293         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
294         if (ret) {
295                 DRV_LOG(DEBUG, "Fail to get PD object info");
296                 return ret;
297         }
298         *pdn = pd_info.pdn;
299         return 0;
300 }
301 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
302
303 /**
304  * Allocate shared IB device context. If there is multiport device the
305  * master and representors will share this context, if there is single
306  * port dedicated IB device, the context will be used by only given
307  * port due to unification.
308  *
309  * Routine first searches the context for the specified IB device name,
310  * if found the shared context assumed and reference counter is incremented.
311  * If no context found the new one is created and initialized with specified
312  * IB device context and parameters.
313  *
314  * @param[in] spawn
315  *   Pointer to the IB device attributes (name, port, etc).
316  *
317  * @return
318  *   Pointer to mlx5_ibv_shared object on success,
319  *   otherwise NULL and rte_errno is set.
320  */
321 static struct mlx5_ibv_shared *
322 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
323 {
324         struct mlx5_ibv_shared *sh;
325         int err = 0;
326         uint32_t i;
327
328         assert(spawn);
329         /* Secondary process should not create the shared context. */
330         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
331         pthread_mutex_lock(&mlx5_ibv_list_mutex);
332         /* Search for IB context by device name. */
333         LIST_FOREACH(sh, &mlx5_ibv_list, next) {
334                 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
335                         sh->refcnt++;
336                         goto exit;
337                 }
338         }
339         /* No device found, we have to create new shared context. */
340         assert(spawn->max_port);
341         sh = rte_zmalloc("ethdev shared ib context",
342                          sizeof(struct mlx5_ibv_shared) +
343                          spawn->max_port *
344                          sizeof(struct mlx5_ibv_shared_port),
345                          RTE_CACHE_LINE_SIZE);
346         if (!sh) {
347                 DRV_LOG(ERR, "shared context allocation failure");
348                 rte_errno  = ENOMEM;
349                 goto exit;
350         }
351         /* Try to open IB device with DV first, then usual Verbs. */
352         errno = 0;
353         sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
354         if (sh->ctx) {
355                 sh->devx = 1;
356                 DRV_LOG(DEBUG, "DevX is supported");
357         } else {
358                 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
359                 if (!sh->ctx) {
360                         err = errno ? errno : ENODEV;
361                         goto error;
362                 }
363                 DRV_LOG(DEBUG, "DevX is NOT supported");
364         }
365         err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
366         if (err) {
367                 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
368                 goto error;
369         }
370         sh->refcnt = 1;
371         sh->max_port = spawn->max_port;
372         strncpy(sh->ibdev_name, sh->ctx->device->name,
373                 sizeof(sh->ibdev_name));
374         strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
375                 sizeof(sh->ibdev_path));
376         pthread_mutex_init(&sh->intr_mutex, NULL);
377         /*
378          * Setting port_id to max unallowed value means
379          * there is no interrupt subhandler installed for
380          * the given port index i.
381          */
382         for (i = 0; i < sh->max_port; i++)
383                 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
384         sh->pd = mlx5_glue->alloc_pd(sh->ctx);
385         if (sh->pd == NULL) {
386                 DRV_LOG(ERR, "PD allocation failure");
387                 err = ENOMEM;
388                 goto error;
389         }
390 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
391         err = mlx5_get_pdn(sh->pd, &sh->pdn);
392         if (err) {
393                 DRV_LOG(ERR, "Fail to extract pdn from PD");
394                 goto error;
395         }
396 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
397         /*
398          * Once the device is added to the list of memory event
399          * callback, its global MR cache table cannot be expanded
400          * on the fly because of deadlock. If it overflows, lookup
401          * should be done by searching MR list linearly, which is slow.
402          *
403          * At this point the device is not added to the memory
404          * event list yet, context is just being created.
405          */
406         err = mlx5_mr_btree_init(&sh->mr.cache,
407                                  MLX5_MR_BTREE_CACHE_N * 2,
408                                  spawn->pci_dev->device.numa_node);
409         if (err) {
410                 err = rte_errno;
411                 goto error;
412         }
413         mlx5_flow_counters_mng_init(sh);
414         /* Add device to memory callback list. */
415         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
416         LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
417                          sh, mem_event_cb);
418         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
419         /* Add context to the global device list. */
420         LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
421 exit:
422         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
423         return sh;
424 error:
425         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
426         assert(sh);
427         if (sh->pd)
428                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
429         if (sh->ctx)
430                 claim_zero(mlx5_glue->close_device(sh->ctx));
431         rte_free(sh);
432         assert(err > 0);
433         rte_errno = err;
434         return NULL;
435 }
436
437 /**
438  * Free shared IB device context. Decrement counter and if zero free
439  * all allocated resources and close handles.
440  *
441  * @param[in] sh
442  *   Pointer to mlx5_ibv_shared object to free
443  */
444 static void
445 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
446 {
447         pthread_mutex_lock(&mlx5_ibv_list_mutex);
448 #ifndef NDEBUG
449         /* Check the object presence in the list. */
450         struct mlx5_ibv_shared *lctx;
451
452         LIST_FOREACH(lctx, &mlx5_ibv_list, next)
453                 if (lctx == sh)
454                         break;
455         assert(lctx);
456         if (lctx != sh) {
457                 DRV_LOG(ERR, "Freeing non-existing shared IB context");
458                 goto exit;
459         }
460 #endif
461         assert(sh);
462         assert(sh->refcnt);
463         /* Secondary process should not free the shared context. */
464         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
465         if (--sh->refcnt)
466                 goto exit;
467         /* Release created Memory Regions. */
468         mlx5_mr_release(sh);
469         /* Remove from memory callback device list. */
470         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
471         LIST_REMOVE(sh, mem_event_cb);
472         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
473         /* Remove context from the global device list. */
474         LIST_REMOVE(sh, next);
475         /*
476          *  Ensure there is no async event handler installed.
477          *  Only primary process handles async device events.
478          **/
479         mlx5_flow_counters_mng_close(sh);
480         assert(!sh->intr_cnt);
481         if (sh->intr_cnt)
482                 mlx5_intr_callback_unregister
483                         (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
484         pthread_mutex_destroy(&sh->intr_mutex);
485         if (sh->pd)
486                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
487         if (sh->ctx)
488                 claim_zero(mlx5_glue->close_device(sh->ctx));
489         rte_free(sh);
490 exit:
491         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
492 }
493
494 /**
495  * Initialize DR related data within private structure.
496  * Routine checks the reference counter and does actual
497  * resources creation/initialization only if counter is zero.
498  *
499  * @param[in] priv
500  *   Pointer to the private device data structure.
501  *
502  * @return
503  *   Zero on success, positive error code otherwise.
504  */
505 static int
506 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
507 {
508 #ifdef HAVE_MLX5DV_DR
509         struct mlx5_ibv_shared *sh = priv->sh;
510         int err = 0;
511         void *domain;
512
513         assert(sh);
514         if (sh->dv_refcnt) {
515                 /* Shared DV/DR structures is already initialized. */
516                 sh->dv_refcnt++;
517                 priv->dr_shared = 1;
518                 return 0;
519         }
520         /* Reference counter is zero, we should initialize structures. */
521         domain = mlx5_glue->dr_create_domain(sh->ctx,
522                                              MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
523         if (!domain) {
524                 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
525                 err = errno;
526                 goto error;
527         }
528         sh->rx_domain = domain;
529         domain = mlx5_glue->dr_create_domain(sh->ctx,
530                                              MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
531         if (!domain) {
532                 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
533                 err = errno;
534                 goto error;
535         }
536         pthread_mutex_init(&sh->dv_mutex, NULL);
537         sh->tx_domain = domain;
538 #ifdef HAVE_MLX5DV_DR_ESWITCH
539         if (priv->config.dv_esw_en) {
540                 domain  = mlx5_glue->dr_create_domain
541                         (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
542                 if (!domain) {
543                         DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
544                         err = errno;
545                         goto error;
546                 }
547                 sh->fdb_domain = domain;
548                 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
549         }
550 #endif
551         sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
552         sh->dv_refcnt++;
553         priv->dr_shared = 1;
554         return 0;
555
556 error:
557        /* Rollback the created objects. */
558         if (sh->rx_domain) {
559                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
560                 sh->rx_domain = NULL;
561         }
562         if (sh->tx_domain) {
563                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
564                 sh->tx_domain = NULL;
565         }
566         if (sh->fdb_domain) {
567                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
568                 sh->fdb_domain = NULL;
569         }
570         if (sh->esw_drop_action) {
571                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
572                 sh->esw_drop_action = NULL;
573         }
574         if (sh->pop_vlan_action) {
575                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
576                 sh->pop_vlan_action = NULL;
577         }
578         return err;
579 #else
580         (void)priv;
581         return 0;
582 #endif
583 }
584
585 /**
586  * Destroy DR related data within private structure.
587  *
588  * @param[in] priv
589  *   Pointer to the private device data structure.
590  */
591 static void
592 mlx5_free_shared_dr(struct mlx5_priv *priv)
593 {
594 #ifdef HAVE_MLX5DV_DR
595         struct mlx5_ibv_shared *sh;
596
597         if (!priv->dr_shared)
598                 return;
599         priv->dr_shared = 0;
600         sh = priv->sh;
601         assert(sh);
602         assert(sh->dv_refcnt);
603         if (sh->dv_refcnt && --sh->dv_refcnt)
604                 return;
605         if (sh->rx_domain) {
606                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
607                 sh->rx_domain = NULL;
608         }
609         if (sh->tx_domain) {
610                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
611                 sh->tx_domain = NULL;
612         }
613 #ifdef HAVE_MLX5DV_DR_ESWITCH
614         if (sh->fdb_domain) {
615                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
616                 sh->fdb_domain = NULL;
617         }
618         if (sh->esw_drop_action) {
619                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
620                 sh->esw_drop_action = NULL;
621         }
622 #endif
623         if (sh->pop_vlan_action) {
624                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
625                 sh->pop_vlan_action = NULL;
626         }
627         pthread_mutex_destroy(&sh->dv_mutex);
628 #else
629         (void)priv;
630 #endif
631 }
632
633 /**
634  * Initialize shared data between primary and secondary process.
635  *
636  * A memzone is reserved by primary process and secondary processes attach to
637  * the memzone.
638  *
639  * @return
640  *   0 on success, a negative errno value otherwise and rte_errno is set.
641  */
642 static int
643 mlx5_init_shared_data(void)
644 {
645         const struct rte_memzone *mz;
646         int ret = 0;
647
648         rte_spinlock_lock(&mlx5_shared_data_lock);
649         if (mlx5_shared_data == NULL) {
650                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
651                         /* Allocate shared memory. */
652                         mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
653                                                  sizeof(*mlx5_shared_data),
654                                                  SOCKET_ID_ANY, 0);
655                         if (mz == NULL) {
656                                 DRV_LOG(ERR,
657                                         "Cannot allocate mlx5 shared data\n");
658                                 ret = -rte_errno;
659                                 goto error;
660                         }
661                         mlx5_shared_data = mz->addr;
662                         memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
663                         rte_spinlock_init(&mlx5_shared_data->lock);
664                 } else {
665                         /* Lookup allocated shared memory. */
666                         mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
667                         if (mz == NULL) {
668                                 DRV_LOG(ERR,
669                                         "Cannot attach mlx5 shared data\n");
670                                 ret = -rte_errno;
671                                 goto error;
672                         }
673                         mlx5_shared_data = mz->addr;
674                         memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
675                 }
676         }
677 error:
678         rte_spinlock_unlock(&mlx5_shared_data_lock);
679         return ret;
680 }
681
682 /**
683  * Retrieve integer value from environment variable.
684  *
685  * @param[in] name
686  *   Environment variable name.
687  *
688  * @return
689  *   Integer value, 0 if the variable is not set.
690  */
691 int
692 mlx5_getenv_int(const char *name)
693 {
694         const char *val = getenv(name);
695
696         if (val == NULL)
697                 return 0;
698         return atoi(val);
699 }
700
701 /**
702  * Verbs callback to allocate a memory. This function should allocate the space
703  * according to the size provided residing inside a huge page.
704  * Please note that all allocation must respect the alignment from libmlx5
705  * (i.e. currently sysconf(_SC_PAGESIZE)).
706  *
707  * @param[in] size
708  *   The size in bytes of the memory to allocate.
709  * @param[in] data
710  *   A pointer to the callback data.
711  *
712  * @return
713  *   Allocated buffer, NULL otherwise and rte_errno is set.
714  */
715 static void *
716 mlx5_alloc_verbs_buf(size_t size, void *data)
717 {
718         struct mlx5_priv *priv = data;
719         void *ret;
720         size_t alignment = sysconf(_SC_PAGESIZE);
721         unsigned int socket = SOCKET_ID_ANY;
722
723         if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
724                 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
725
726                 socket = ctrl->socket;
727         } else if (priv->verbs_alloc_ctx.type ==
728                    MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
729                 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
730
731                 socket = ctrl->socket;
732         }
733         assert(data != NULL);
734         ret = rte_malloc_socket(__func__, size, alignment, socket);
735         if (!ret && size)
736                 rte_errno = ENOMEM;
737         return ret;
738 }
739
740 /**
741  * Verbs callback to free a memory.
742  *
743  * @param[in] ptr
744  *   A pointer to the memory to free.
745  * @param[in] data
746  *   A pointer to the callback data.
747  */
748 static void
749 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
750 {
751         assert(data != NULL);
752         rte_free(ptr);
753 }
754
755 /**
756  * DPDK callback to add udp tunnel port
757  *
758  * @param[in] dev
759  *   A pointer to eth_dev
760  * @param[in] udp_tunnel
761  *   A pointer to udp tunnel
762  *
763  * @return
764  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
765  */
766 int
767 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
768                          struct rte_eth_udp_tunnel *udp_tunnel)
769 {
770         assert(udp_tunnel != NULL);
771         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
772             udp_tunnel->udp_port == 4789)
773                 return 0;
774         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
775             udp_tunnel->udp_port == 4790)
776                 return 0;
777         return -ENOTSUP;
778 }
779
780 /**
781  * Initialize process private data structure.
782  *
783  * @param dev
784  *   Pointer to Ethernet device structure.
785  *
786  * @return
787  *   0 on success, a negative errno value otherwise and rte_errno is set.
788  */
789 int
790 mlx5_proc_priv_init(struct rte_eth_dev *dev)
791 {
792         struct mlx5_priv *priv = dev->data->dev_private;
793         struct mlx5_proc_priv *ppriv;
794         size_t ppriv_size;
795
796         /*
797          * UAR register table follows the process private structure. BlueFlame
798          * registers for Tx queues are stored in the table.
799          */
800         ppriv_size =
801                 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
802         ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
803                                   RTE_CACHE_LINE_SIZE, dev->device->numa_node);
804         if (!ppriv) {
805                 rte_errno = ENOMEM;
806                 return -rte_errno;
807         }
808         ppriv->uar_table_sz = ppriv_size;
809         dev->process_private = ppriv;
810         return 0;
811 }
812
813 /**
814  * Un-initialize process private data structure.
815  *
816  * @param dev
817  *   Pointer to Ethernet device structure.
818  */
819 static void
820 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
821 {
822         if (!dev->process_private)
823                 return;
824         rte_free(dev->process_private);
825         dev->process_private = NULL;
826 }
827
828 /**
829  * DPDK callback to close the device.
830  *
831  * Destroy all queues and objects, free memory.
832  *
833  * @param dev
834  *   Pointer to Ethernet device structure.
835  */
836 static void
837 mlx5_dev_close(struct rte_eth_dev *dev)
838 {
839         struct mlx5_priv *priv = dev->data->dev_private;
840         unsigned int i;
841         int ret;
842
843         DRV_LOG(DEBUG, "port %u closing device \"%s\"",
844                 dev->data->port_id,
845                 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
846         /* In case mlx5_dev_stop() has not been called. */
847         mlx5_dev_interrupt_handler_uninstall(dev);
848         mlx5_traffic_disable(dev);
849         mlx5_flow_flush(dev, NULL);
850         /* Prevent crashes when queues are still in use. */
851         dev->rx_pkt_burst = removed_rx_burst;
852         dev->tx_pkt_burst = removed_tx_burst;
853         rte_wmb();
854         /* Disable datapath on secondary process. */
855         mlx5_mp_req_stop_rxtx(dev);
856         if (priv->rxqs != NULL) {
857                 /* XXX race condition if mlx5_rx_burst() is still running. */
858                 usleep(1000);
859                 for (i = 0; (i != priv->rxqs_n); ++i)
860                         mlx5_rxq_release(dev, i);
861                 priv->rxqs_n = 0;
862                 priv->rxqs = NULL;
863         }
864         if (priv->txqs != NULL) {
865                 /* XXX race condition if mlx5_tx_burst() is still running. */
866                 usleep(1000);
867                 for (i = 0; (i != priv->txqs_n); ++i)
868                         mlx5_txq_release(dev, i);
869                 priv->txqs_n = 0;
870                 priv->txqs = NULL;
871         }
872         mlx5_proc_priv_uninit(dev);
873         mlx5_mprq_free_mp(dev);
874         mlx5_free_shared_dr(priv);
875         if (priv->rss_conf.rss_key != NULL)
876                 rte_free(priv->rss_conf.rss_key);
877         if (priv->reta_idx != NULL)
878                 rte_free(priv->reta_idx);
879         if (priv->config.vf)
880                 mlx5_nl_mac_addr_flush(dev);
881         if (priv->nl_socket_route >= 0)
882                 close(priv->nl_socket_route);
883         if (priv->nl_socket_rdma >= 0)
884                 close(priv->nl_socket_rdma);
885         if (priv->vmwa_context)
886                 mlx5_vlan_vmwa_exit(priv->vmwa_context);
887         if (priv->sh) {
888                 /*
889                  * Free the shared context in last turn, because the cleanup
890                  * routines above may use some shared fields, like
891                  * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
892                  * ifindex if Netlink fails.
893                  */
894                 mlx5_free_shared_ibctx(priv->sh);
895                 priv->sh = NULL;
896         }
897         ret = mlx5_hrxq_verify(dev);
898         if (ret)
899                 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
900                         dev->data->port_id);
901         ret = mlx5_ind_table_obj_verify(dev);
902         if (ret)
903                 DRV_LOG(WARNING, "port %u some indirection table still remain",
904                         dev->data->port_id);
905         ret = mlx5_rxq_obj_verify(dev);
906         if (ret)
907                 DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
908                         dev->data->port_id);
909         ret = mlx5_rxq_verify(dev);
910         if (ret)
911                 DRV_LOG(WARNING, "port %u some Rx queues still remain",
912                         dev->data->port_id);
913         ret = mlx5_txq_ibv_verify(dev);
914         if (ret)
915                 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
916                         dev->data->port_id);
917         ret = mlx5_txq_verify(dev);
918         if (ret)
919                 DRV_LOG(WARNING, "port %u some Tx queues still remain",
920                         dev->data->port_id);
921         ret = mlx5_flow_verify(dev);
922         if (ret)
923                 DRV_LOG(WARNING, "port %u some flows still remain",
924                         dev->data->port_id);
925         if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
926                 unsigned int c = 0;
927                 uint16_t port_id;
928
929                 RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
930                         struct mlx5_priv *opriv =
931                                 rte_eth_devices[port_id].data->dev_private;
932
933                         if (!opriv ||
934                             opriv->domain_id != priv->domain_id ||
935                             &rte_eth_devices[port_id] == dev)
936                                 continue;
937                         ++c;
938                 }
939                 if (!c)
940                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
941         }
942         memset(priv, 0, sizeof(*priv));
943         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
944         /*
945          * Reset mac_addrs to NULL such that it is not freed as part of
946          * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
947          * it is freed when dev_private is freed.
948          */
949         dev->data->mac_addrs = NULL;
950 }
951
952 const struct eth_dev_ops mlx5_dev_ops = {
953         .dev_configure = mlx5_dev_configure,
954         .dev_start = mlx5_dev_start,
955         .dev_stop = mlx5_dev_stop,
956         .dev_set_link_down = mlx5_set_link_down,
957         .dev_set_link_up = mlx5_set_link_up,
958         .dev_close = mlx5_dev_close,
959         .promiscuous_enable = mlx5_promiscuous_enable,
960         .promiscuous_disable = mlx5_promiscuous_disable,
961         .allmulticast_enable = mlx5_allmulticast_enable,
962         .allmulticast_disable = mlx5_allmulticast_disable,
963         .link_update = mlx5_link_update,
964         .stats_get = mlx5_stats_get,
965         .stats_reset = mlx5_stats_reset,
966         .xstats_get = mlx5_xstats_get,
967         .xstats_reset = mlx5_xstats_reset,
968         .xstats_get_names = mlx5_xstats_get_names,
969         .fw_version_get = mlx5_fw_version_get,
970         .dev_infos_get = mlx5_dev_infos_get,
971         .read_clock = mlx5_read_clock,
972         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
973         .vlan_filter_set = mlx5_vlan_filter_set,
974         .rx_queue_setup = mlx5_rx_queue_setup,
975         .tx_queue_setup = mlx5_tx_queue_setup,
976         .rx_queue_release = mlx5_rx_queue_release,
977         .tx_queue_release = mlx5_tx_queue_release,
978         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
979         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
980         .mac_addr_remove = mlx5_mac_addr_remove,
981         .mac_addr_add = mlx5_mac_addr_add,
982         .mac_addr_set = mlx5_mac_addr_set,
983         .set_mc_addr_list = mlx5_set_mc_addr_list,
984         .mtu_set = mlx5_dev_set_mtu,
985         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
986         .vlan_offload_set = mlx5_vlan_offload_set,
987         .reta_update = mlx5_dev_rss_reta_update,
988         .reta_query = mlx5_dev_rss_reta_query,
989         .rss_hash_update = mlx5_rss_hash_update,
990         .rss_hash_conf_get = mlx5_rss_hash_conf_get,
991         .filter_ctrl = mlx5_dev_filter_ctrl,
992         .rx_descriptor_status = mlx5_rx_descriptor_status,
993         .tx_descriptor_status = mlx5_tx_descriptor_status,
994         .rx_queue_count = mlx5_rx_queue_count,
995         .rx_queue_intr_enable = mlx5_rx_intr_enable,
996         .rx_queue_intr_disable = mlx5_rx_intr_disable,
997         .is_removed = mlx5_is_removed,
998         .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
999         .get_module_info = mlx5_get_module_info,
1000         .get_module_eeprom = mlx5_get_module_eeprom,
1001 };
1002
1003 /* Available operations from secondary process. */
1004 static const struct eth_dev_ops mlx5_dev_sec_ops = {
1005         .stats_get = mlx5_stats_get,
1006         .stats_reset = mlx5_stats_reset,
1007         .xstats_get = mlx5_xstats_get,
1008         .xstats_reset = mlx5_xstats_reset,
1009         .xstats_get_names = mlx5_xstats_get_names,
1010         .fw_version_get = mlx5_fw_version_get,
1011         .dev_infos_get = mlx5_dev_infos_get,
1012         .rx_descriptor_status = mlx5_rx_descriptor_status,
1013         .tx_descriptor_status = mlx5_tx_descriptor_status,
1014         .get_module_info = mlx5_get_module_info,
1015         .get_module_eeprom = mlx5_get_module_eeprom,
1016 };
1017
1018 /* Available operations in flow isolated mode. */
1019 const struct eth_dev_ops mlx5_dev_ops_isolate = {
1020         .dev_configure = mlx5_dev_configure,
1021         .dev_start = mlx5_dev_start,
1022         .dev_stop = mlx5_dev_stop,
1023         .dev_set_link_down = mlx5_set_link_down,
1024         .dev_set_link_up = mlx5_set_link_up,
1025         .dev_close = mlx5_dev_close,
1026         .promiscuous_enable = mlx5_promiscuous_enable,
1027         .promiscuous_disable = mlx5_promiscuous_disable,
1028         .allmulticast_enable = mlx5_allmulticast_enable,
1029         .allmulticast_disable = mlx5_allmulticast_disable,
1030         .link_update = mlx5_link_update,
1031         .stats_get = mlx5_stats_get,
1032         .stats_reset = mlx5_stats_reset,
1033         .xstats_get = mlx5_xstats_get,
1034         .xstats_reset = mlx5_xstats_reset,
1035         .xstats_get_names = mlx5_xstats_get_names,
1036         .fw_version_get = mlx5_fw_version_get,
1037         .dev_infos_get = mlx5_dev_infos_get,
1038         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1039         .vlan_filter_set = mlx5_vlan_filter_set,
1040         .rx_queue_setup = mlx5_rx_queue_setup,
1041         .tx_queue_setup = mlx5_tx_queue_setup,
1042         .rx_queue_release = mlx5_rx_queue_release,
1043         .tx_queue_release = mlx5_tx_queue_release,
1044         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1045         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1046         .mac_addr_remove = mlx5_mac_addr_remove,
1047         .mac_addr_add = mlx5_mac_addr_add,
1048         .mac_addr_set = mlx5_mac_addr_set,
1049         .set_mc_addr_list = mlx5_set_mc_addr_list,
1050         .mtu_set = mlx5_dev_set_mtu,
1051         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1052         .vlan_offload_set = mlx5_vlan_offload_set,
1053         .filter_ctrl = mlx5_dev_filter_ctrl,
1054         .rx_descriptor_status = mlx5_rx_descriptor_status,
1055         .tx_descriptor_status = mlx5_tx_descriptor_status,
1056         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1057         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1058         .is_removed = mlx5_is_removed,
1059         .get_module_info = mlx5_get_module_info,
1060         .get_module_eeprom = mlx5_get_module_eeprom,
1061 };
1062
1063 /**
1064  * Verify and store value for device argument.
1065  *
1066  * @param[in] key
1067  *   Key argument to verify.
1068  * @param[in] val
1069  *   Value associated with key.
1070  * @param opaque
1071  *   User data.
1072  *
1073  * @return
1074  *   0 on success, a negative errno value otherwise and rte_errno is set.
1075  */
1076 static int
1077 mlx5_args_check(const char *key, const char *val, void *opaque)
1078 {
1079         struct mlx5_dev_config *config = opaque;
1080         unsigned long tmp;
1081
1082         /* No-op, port representors are processed in mlx5_dev_spawn(). */
1083         if (!strcmp(MLX5_REPRESENTOR, key))
1084                 return 0;
1085         errno = 0;
1086         tmp = strtoul(val, NULL, 0);
1087         if (errno) {
1088                 rte_errno = errno;
1089                 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1090                 return -rte_errno;
1091         }
1092         if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
1093                 config->cqe_comp = !!tmp;
1094         } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
1095                 config->cqe_pad = !!tmp;
1096         } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
1097                 config->hw_padding = !!tmp;
1098         } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
1099                 config->mprq.enabled = !!tmp;
1100         } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
1101                 config->mprq.stride_num_n = tmp;
1102         } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
1103                 config->mprq.max_memcpy_len = tmp;
1104         } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
1105                 config->mprq.min_rxqs_num = tmp;
1106         } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
1107                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1108                                  " converted to txq_inline_max", key);
1109                 config->txq_inline_max = tmp;
1110         } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
1111                 config->txq_inline_max = tmp;
1112         } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
1113                 config->txq_inline_min = tmp;
1114         } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
1115                 config->txq_inline_mpw = tmp;
1116         } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
1117                 config->txqs_inline = tmp;
1118         } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
1119                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1120         } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
1121                 config->mps = !!tmp;
1122         } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
1123                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1124         } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
1125                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1126                                  " converted to txq_inline_mpw", key);
1127                 config->txq_inline_mpw = tmp;
1128         } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
1129                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1130         } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
1131                 config->rx_vec_en = !!tmp;
1132         } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1133                 config->l3_vxlan_en = !!tmp;
1134         } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1135                 config->vf_nl_en = !!tmp;
1136         } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1137                 config->dv_esw_en = !!tmp;
1138         } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1139                 config->dv_flow_en = !!tmp;
1140         } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
1141                 config->mr_ext_memseg_en = !!tmp;
1142         } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
1143                 config->max_dump_files_num = tmp;
1144         } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
1145                 config->lro.timeout = tmp;
1146         } else {
1147                 DRV_LOG(WARNING, "%s: unknown parameter", key);
1148                 rte_errno = EINVAL;
1149                 return -rte_errno;
1150         }
1151         return 0;
1152 }
1153
1154 /**
1155  * Parse device parameters.
1156  *
1157  * @param config
1158  *   Pointer to device configuration structure.
1159  * @param devargs
1160  *   Device arguments structure.
1161  *
1162  * @return
1163  *   0 on success, a negative errno value otherwise and rte_errno is set.
1164  */
1165 static int
1166 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
1167 {
1168         const char **params = (const char *[]){
1169                 MLX5_RXQ_CQE_COMP_EN,
1170                 MLX5_RXQ_CQE_PAD_EN,
1171                 MLX5_RXQ_PKT_PAD_EN,
1172                 MLX5_RX_MPRQ_EN,
1173                 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
1174                 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
1175                 MLX5_RXQS_MIN_MPRQ,
1176                 MLX5_TXQ_INLINE,
1177                 MLX5_TXQ_INLINE_MIN,
1178                 MLX5_TXQ_INLINE_MAX,
1179                 MLX5_TXQ_INLINE_MPW,
1180                 MLX5_TXQS_MIN_INLINE,
1181                 MLX5_TXQS_MAX_VEC,
1182                 MLX5_TXQ_MPW_EN,
1183                 MLX5_TXQ_MPW_HDR_DSEG_EN,
1184                 MLX5_TXQ_MAX_INLINE_LEN,
1185                 MLX5_TX_VEC_EN,
1186                 MLX5_RX_VEC_EN,
1187                 MLX5_L3_VXLAN_EN,
1188                 MLX5_VF_NL_EN,
1189                 MLX5_DV_ESW_EN,
1190                 MLX5_DV_FLOW_EN,
1191                 MLX5_MR_EXT_MEMSEG_EN,
1192                 MLX5_REPRESENTOR,
1193                 MLX5_MAX_DUMP_FILES_NUM,
1194                 MLX5_LRO_TIMEOUT_USEC,
1195                 NULL,
1196         };
1197         struct rte_kvargs *kvlist;
1198         int ret = 0;
1199         int i;
1200
1201         if (devargs == NULL)
1202                 return 0;
1203         /* Following UGLY cast is done to pass checkpatch. */
1204         kvlist = rte_kvargs_parse(devargs->args, params);
1205         if (kvlist == NULL) {
1206                 rte_errno = EINVAL;
1207                 return -rte_errno;
1208         }
1209         /* Process parameters. */
1210         for (i = 0; (params[i] != NULL); ++i) {
1211                 if (rte_kvargs_count(kvlist, params[i])) {
1212                         ret = rte_kvargs_process(kvlist, params[i],
1213                                                  mlx5_args_check, config);
1214                         if (ret) {
1215                                 rte_errno = EINVAL;
1216                                 rte_kvargs_free(kvlist);
1217                                 return -rte_errno;
1218                         }
1219                 }
1220         }
1221         rte_kvargs_free(kvlist);
1222         return 0;
1223 }
1224
1225 static struct rte_pci_driver mlx5_driver;
1226
1227 /**
1228  * PMD global initialization.
1229  *
1230  * Independent from individual device, this function initializes global
1231  * per-PMD data structures distinguishing primary and secondary processes.
1232  * Hence, each initialization is called once per a process.
1233  *
1234  * @return
1235  *   0 on success, a negative errno value otherwise and rte_errno is set.
1236  */
1237 static int
1238 mlx5_init_once(void)
1239 {
1240         struct mlx5_shared_data *sd;
1241         struct mlx5_local_data *ld = &mlx5_local_data;
1242         int ret = 0;
1243
1244         if (mlx5_init_shared_data())
1245                 return -rte_errno;
1246         sd = mlx5_shared_data;
1247         assert(sd);
1248         rte_spinlock_lock(&sd->lock);
1249         switch (rte_eal_process_type()) {
1250         case RTE_PROC_PRIMARY:
1251                 if (sd->init_done)
1252                         break;
1253                 LIST_INIT(&sd->mem_event_cb_list);
1254                 rte_rwlock_init(&sd->mem_event_rwlock);
1255                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1256                                                 mlx5_mr_mem_event_cb, NULL);
1257                 ret = mlx5_mp_init_primary();
1258                 if (ret)
1259                         goto out;
1260                 sd->init_done = true;
1261                 break;
1262         case RTE_PROC_SECONDARY:
1263                 if (ld->init_done)
1264                         break;
1265                 ret = mlx5_mp_init_secondary();
1266                 if (ret)
1267                         goto out;
1268                 ++sd->secondary_cnt;
1269                 ld->init_done = true;
1270                 break;
1271         default:
1272                 break;
1273         }
1274 out:
1275         rte_spinlock_unlock(&sd->lock);
1276         return ret;
1277 }
1278
1279 /**
1280  * Configures the minimal amount of data to inline into WQE
1281  * while sending packets.
1282  *
1283  * - the txq_inline_min has the maximal priority, if this
1284  *   key is specified in devargs
1285  * - if DevX is enabled the inline mode is queried from the
1286  *   device (HCA attributes and NIC vport context if needed).
1287  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
1288  *   and none (0 bytes) for other NICs
1289  *
1290  * @param spawn
1291  *   Verbs device parameters (name, port, switch_info) to spawn.
1292  * @param config
1293  *   Device configuration parameters.
1294  */
1295 static void
1296 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1297                     struct mlx5_dev_config *config)
1298 {
1299         if (config->txq_inline_min != MLX5_ARG_UNSET) {
1300                 /* Application defines size of inlined data explicitly. */
1301                 switch (spawn->pci_dev->id.device_id) {
1302                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1303                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1304                         if (config->txq_inline_min <
1305                                        (int)MLX5_INLINE_HSIZE_L2) {
1306                                 DRV_LOG(DEBUG,
1307                                         "txq_inline_mix aligned to minimal"
1308                                         " ConnectX-4 required value %d",
1309                                         (int)MLX5_INLINE_HSIZE_L2);
1310                                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1311                         }
1312                         break;
1313                 }
1314                 goto exit;
1315         }
1316         if (config->hca_attr.eth_net_offloads) {
1317                 /* We have DevX enabled, inline mode queried successfully. */
1318                 switch (config->hca_attr.wqe_inline_mode) {
1319                 case MLX5_CAP_INLINE_MODE_L2:
1320                         /* outer L2 header must be inlined. */
1321                         config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1322                         goto exit;
1323                 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1324                         /* No inline data are required by NIC. */
1325                         config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1326                         config->hw_vlan_insert =
1327                                 config->hca_attr.wqe_vlan_insert;
1328                         DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
1329                         goto exit;
1330                 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1331                         /* inline mode is defined by NIC vport context. */
1332                         if (!config->hca_attr.eth_virt)
1333                                 break;
1334                         switch (config->hca_attr.vport_inline_mode) {
1335                         case MLX5_INLINE_MODE_NONE:
1336                                 config->txq_inline_min =
1337                                         MLX5_INLINE_HSIZE_NONE;
1338                                 goto exit;
1339                         case MLX5_INLINE_MODE_L2:
1340                                 config->txq_inline_min =
1341                                         MLX5_INLINE_HSIZE_L2;
1342                                 goto exit;
1343                         case MLX5_INLINE_MODE_IP:
1344                                 config->txq_inline_min =
1345                                         MLX5_INLINE_HSIZE_L3;
1346                                 goto exit;
1347                         case MLX5_INLINE_MODE_TCP_UDP:
1348                                 config->txq_inline_min =
1349                                         MLX5_INLINE_HSIZE_L4;
1350                                 goto exit;
1351                         case MLX5_INLINE_MODE_INNER_L2:
1352                                 config->txq_inline_min =
1353                                         MLX5_INLINE_HSIZE_INNER_L2;
1354                                 goto exit;
1355                         case MLX5_INLINE_MODE_INNER_IP:
1356                                 config->txq_inline_min =
1357                                         MLX5_INLINE_HSIZE_INNER_L3;
1358                                 goto exit;
1359                         case MLX5_INLINE_MODE_INNER_TCP_UDP:
1360                                 config->txq_inline_min =
1361                                         MLX5_INLINE_HSIZE_INNER_L4;
1362                                 goto exit;
1363                         }
1364                 }
1365         }
1366         /*
1367          * We get here if we are unable to deduce
1368          * inline data size with DevX. Try PCI ID
1369          * to determine old NICs.
1370          */
1371         switch (spawn->pci_dev->id.device_id) {
1372         case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1373         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1374         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1375         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1376                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1377                 config->hw_vlan_insert = 0;
1378                 break;
1379         case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
1380         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1381         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
1382         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1383                 /*
1384                  * These NICs support VLAN insertion from WQE and
1385                  * report the wqe_vlan_insert flag. But there is the bug
1386                  * and PFC control may be broken, so disable feature.
1387                  */
1388                 config->hw_vlan_insert = 0;
1389                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1390                 break;
1391         default:
1392                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1393                 break;
1394         }
1395 exit:
1396         DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
1397 }
1398
1399 /**
1400  * Allocate page of door-bells and register it using DevX API.
1401  *
1402  * @param [in] dev
1403  *   Pointer to Ethernet device.
1404  *
1405  * @return
1406  *   Pointer to new page on success, NULL otherwise.
1407  */
1408 static struct mlx5_devx_dbr_page *
1409 mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
1410 {
1411         struct mlx5_priv *priv = dev->data->dev_private;
1412         struct mlx5_devx_dbr_page *page;
1413
1414         /* Allocate space for door-bell page and management data. */
1415         page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
1416                                  RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1417         if (!page) {
1418                 DRV_LOG(ERR, "port %u cannot allocate dbr page",
1419                         dev->data->port_id);
1420                 return NULL;
1421         }
1422         /* Register allocated memory. */
1423         page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
1424                                               MLX5_DBR_PAGE_SIZE, 0);
1425         if (!page->umem) {
1426                 DRV_LOG(ERR, "port %u cannot umem reg dbr page",
1427                         dev->data->port_id);
1428                 rte_free(page);
1429                 return NULL;
1430         }
1431         return page;
1432 }
1433
1434 /**
1435  * Find the next available door-bell, allocate new page if needed.
1436  *
1437  * @param [in] dev
1438  *   Pointer to Ethernet device.
1439  * @param [out] dbr_page
1440  *   Door-bell page containing the page data.
1441  *
1442  * @return
1443  *   Door-bell address offset on success, a negative error value otherwise.
1444  */
1445 int64_t
1446 mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
1447 {
1448         struct mlx5_priv *priv = dev->data->dev_private;
1449         struct mlx5_devx_dbr_page *page = NULL;
1450         uint32_t i, j;
1451
1452         LIST_FOREACH(page, &priv->dbrpgs, next)
1453                 if (page->dbr_count < MLX5_DBR_PER_PAGE)
1454                         break;
1455         if (!page) { /* No page with free door-bell exists. */
1456                 page = mlx5_alloc_dbr_page(dev);
1457                 if (!page) /* Failed to allocate new page. */
1458                         return (-1);
1459                 LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
1460         }
1461         /* Loop to find bitmap part with clear bit. */
1462         for (i = 0;
1463              i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
1464              i++)
1465                 ; /* Empty. */
1466         /* Find the first clear bit. */
1467         j = rte_bsf64(~page->dbr_bitmap[i]);
1468         assert(i < (MLX5_DBR_PER_PAGE / 64));
1469         page->dbr_bitmap[i] |= (1 << j);
1470         page->dbr_count++;
1471         *dbr_page = page;
1472         return (((i * 64) + j) * sizeof(uint64_t));
1473 }
1474
1475 /**
1476  * Release a door-bell record.
1477  *
1478  * @param [in] dev
1479  *   Pointer to Ethernet device.
1480  * @param [in] umem_id
1481  *   UMEM ID of page containing the door-bell record to release.
1482  * @param [in] offset
1483  *   Offset of door-bell record in page.
1484  *
1485  * @return
1486  *   0 on success, a negative error value otherwise.
1487  */
1488 int32_t
1489 mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
1490 {
1491         struct mlx5_priv *priv = dev->data->dev_private;
1492         struct mlx5_devx_dbr_page *page = NULL;
1493         int ret = 0;
1494
1495         LIST_FOREACH(page, &priv->dbrpgs, next)
1496                 /* Find the page this address belongs to. */
1497                 if (page->umem->umem_id == umem_id)
1498                         break;
1499         if (!page)
1500                 return -EINVAL;
1501         page->dbr_count--;
1502         if (!page->dbr_count) {
1503                 /* Page not used, free it and remove from list. */
1504                 LIST_REMOVE(page, next);
1505                 if (page->umem)
1506                         ret = -mlx5_glue->devx_umem_dereg(page->umem);
1507                 rte_free(page);
1508         } else {
1509                 /* Mark in bitmap that this door-bell is not in use. */
1510                 offset /= MLX5_DBR_SIZE;
1511                 int i = offset / 64;
1512                 int j = offset % 64;
1513
1514                 page->dbr_bitmap[i] &= ~(1 << j);
1515         }
1516         return ret;
1517 }
1518
1519 /**
1520  * Spawn an Ethernet device from Verbs information.
1521  *
1522  * @param dpdk_dev
1523  *   Backing DPDK device.
1524  * @param spawn
1525  *   Verbs device parameters (name, port, switch_info) to spawn.
1526  * @param config
1527  *   Device configuration parameters.
1528  *
1529  * @return
1530  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
1531  *   is set. The following errors are defined:
1532  *
1533  *   EBUSY: device is not supposed to be spawned.
1534  *   EEXIST: device is already spawned
1535  */
1536 static struct rte_eth_dev *
1537 mlx5_dev_spawn(struct rte_device *dpdk_dev,
1538                struct mlx5_dev_spawn_data *spawn,
1539                struct mlx5_dev_config config)
1540 {
1541         const struct mlx5_switch_info *switch_info = &spawn->info;
1542         struct mlx5_ibv_shared *sh = NULL;
1543         struct ibv_port_attr port_attr;
1544         struct mlx5dv_context dv_attr = { .comp_mask = 0 };
1545         struct rte_eth_dev *eth_dev = NULL;
1546         struct mlx5_priv *priv = NULL;
1547         int err = 0;
1548         unsigned int hw_padding = 0;
1549         unsigned int mps;
1550         unsigned int cqe_comp;
1551         unsigned int cqe_pad = 0;
1552         unsigned int tunnel_en = 0;
1553         unsigned int mpls_en = 0;
1554         unsigned int swp = 0;
1555         unsigned int mprq = 0;
1556         unsigned int mprq_min_stride_size_n = 0;
1557         unsigned int mprq_max_stride_size_n = 0;
1558         unsigned int mprq_min_stride_num_n = 0;
1559         unsigned int mprq_max_stride_num_n = 0;
1560         struct rte_ether_addr mac;
1561         char name[RTE_ETH_NAME_MAX_LEN];
1562         int own_domain_id = 0;
1563         uint16_t port_id;
1564         unsigned int i;
1565 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1566         struct mlx5dv_devx_port devx_port;
1567 #endif
1568
1569         /* Determine if this port representor is supposed to be spawned. */
1570         if (switch_info->representor && dpdk_dev->devargs) {
1571                 struct rte_eth_devargs eth_da;
1572
1573                 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
1574                 if (err) {
1575                         rte_errno = -err;
1576                         DRV_LOG(ERR, "failed to process device arguments: %s",
1577                                 strerror(rte_errno));
1578                         return NULL;
1579                 }
1580                 for (i = 0; i < eth_da.nb_representor_ports; ++i)
1581                         if (eth_da.representor_ports[i] ==
1582                             (uint16_t)switch_info->port_name)
1583                                 break;
1584                 if (i == eth_da.nb_representor_ports) {
1585                         rte_errno = EBUSY;
1586                         return NULL;
1587                 }
1588         }
1589         /* Build device name. */
1590         if (spawn->pf_bond <  0) {
1591                 /* Single device. */
1592                 if (!switch_info->representor)
1593                         strlcpy(name, dpdk_dev->name, sizeof(name));
1594                 else
1595                         snprintf(name, sizeof(name), "%s_representor_%u",
1596                                  dpdk_dev->name, switch_info->port_name);
1597         } else {
1598                 /* Bonding device. */
1599                 if (!switch_info->representor)
1600                         snprintf(name, sizeof(name), "%s_%s",
1601                                  dpdk_dev->name, spawn->ibv_dev->name);
1602                 else
1603                         snprintf(name, sizeof(name), "%s_%s_representor_%u",
1604                                  dpdk_dev->name, spawn->ibv_dev->name,
1605                                  switch_info->port_name);
1606         }
1607         /* check if the device is already spawned */
1608         if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1609                 rte_errno = EEXIST;
1610                 return NULL;
1611         }
1612         DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1613         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1614                 eth_dev = rte_eth_dev_attach_secondary(name);
1615                 if (eth_dev == NULL) {
1616                         DRV_LOG(ERR, "can not attach rte ethdev");
1617                         rte_errno = ENOMEM;
1618                         return NULL;
1619                 }
1620                 eth_dev->device = dpdk_dev;
1621                 eth_dev->dev_ops = &mlx5_dev_sec_ops;
1622                 err = mlx5_proc_priv_init(eth_dev);
1623                 if (err)
1624                         return NULL;
1625                 /* Receive command fd from primary process */
1626                 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
1627                 if (err < 0)
1628                         return NULL;
1629                 /* Remap UAR for Tx queues. */
1630                 err = mlx5_tx_uar_init_secondary(eth_dev, err);
1631                 if (err)
1632                         return NULL;
1633                 /*
1634                  * Ethdev pointer is still required as input since
1635                  * the primary device is not accessible from the
1636                  * secondary process.
1637                  */
1638                 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1639                 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1640                 return eth_dev;
1641         }
1642         sh = mlx5_alloc_shared_ibctx(spawn);
1643         if (!sh)
1644                 return NULL;
1645         config.devx = sh->devx;
1646 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
1647         config.dest_tir = 1;
1648 #endif
1649 #ifdef HAVE_IBV_MLX5_MOD_SWP
1650         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
1651 #endif
1652         /*
1653          * Multi-packet send is supported by ConnectX-4 Lx PF as well
1654          * as all ConnectX-5 devices.
1655          */
1656 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1657         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1658 #endif
1659 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1660         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1661 #endif
1662         mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
1663         if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1664                 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1665                         DRV_LOG(DEBUG, "enhanced MPW is supported");
1666                         mps = MLX5_MPW_ENHANCED;
1667                 } else {
1668                         DRV_LOG(DEBUG, "MPW is supported");
1669                         mps = MLX5_MPW;
1670                 }
1671         } else {
1672                 DRV_LOG(DEBUG, "MPW isn't supported");
1673                 mps = MLX5_MPW_DISABLED;
1674         }
1675 #ifdef HAVE_IBV_MLX5_MOD_SWP
1676         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1677                 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1678         DRV_LOG(DEBUG, "SWP support: %u", swp);
1679 #endif
1680         config.swp = !!swp;
1681 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1682         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1683                 struct mlx5dv_striding_rq_caps mprq_caps =
1684                         dv_attr.striding_rq_caps;
1685
1686                 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1687                         mprq_caps.min_single_stride_log_num_of_bytes);
1688                 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1689                         mprq_caps.max_single_stride_log_num_of_bytes);
1690                 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1691                         mprq_caps.min_single_wqe_log_num_of_strides);
1692                 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1693                         mprq_caps.max_single_wqe_log_num_of_strides);
1694                 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1695                         mprq_caps.supported_qpts);
1696                 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1697                 mprq = 1;
1698                 mprq_min_stride_size_n =
1699                         mprq_caps.min_single_stride_log_num_of_bytes;
1700                 mprq_max_stride_size_n =
1701                         mprq_caps.max_single_stride_log_num_of_bytes;
1702                 mprq_min_stride_num_n =
1703                         mprq_caps.min_single_wqe_log_num_of_strides;
1704                 mprq_max_stride_num_n =
1705                         mprq_caps.max_single_wqe_log_num_of_strides;
1706                 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1707                                                    mprq_min_stride_num_n);
1708         }
1709 #endif
1710         if (RTE_CACHE_LINE_SIZE == 128 &&
1711             !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
1712                 cqe_comp = 0;
1713         else
1714                 cqe_comp = 1;
1715         config.cqe_comp = cqe_comp;
1716 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1717         /* Whether device supports 128B Rx CQE padding. */
1718         cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
1719                   (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
1720 #endif
1721 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1722         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1723                 tunnel_en = ((dv_attr.tunnel_offloads_caps &
1724                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
1725                              (dv_attr.tunnel_offloads_caps &
1726                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
1727         }
1728         DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
1729                 tunnel_en ? "" : "not ");
1730 #else
1731         DRV_LOG(WARNING,
1732                 "tunnel offloading disabled due to old OFED/rdma-core version");
1733 #endif
1734         config.tunnel_en = tunnel_en;
1735 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1736         mpls_en = ((dv_attr.tunnel_offloads_caps &
1737                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1738                    (dv_attr.tunnel_offloads_caps &
1739                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1740         DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1741                 mpls_en ? "" : "not ");
1742 #else
1743         DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1744                 " old OFED/rdma-core version or firmware configuration");
1745 #endif
1746         config.mpls_en = mpls_en;
1747         /* Check port status. */
1748         err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
1749         if (err) {
1750                 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1751                 goto error;
1752         }
1753         if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1754                 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1755                 err = EINVAL;
1756                 goto error;
1757         }
1758         if (port_attr.state != IBV_PORT_ACTIVE)
1759                 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1760                         mlx5_glue->port_state_str(port_attr.state),
1761                         port_attr.state);
1762         /* Allocate private eth device data. */
1763         priv = rte_zmalloc("ethdev private structure",
1764                            sizeof(*priv),
1765                            RTE_CACHE_LINE_SIZE);
1766         if (priv == NULL) {
1767                 DRV_LOG(ERR, "priv allocation failure");
1768                 err = ENOMEM;
1769                 goto error;
1770         }
1771         priv->sh = sh;
1772         priv->ibv_port = spawn->ibv_port;
1773         priv->pci_dev = spawn->pci_dev;
1774         priv->mtu = RTE_ETHER_MTU;
1775 #ifndef RTE_ARCH_64
1776         /* Initialize UAR access locks for 32bit implementations. */
1777         rte_spinlock_init(&priv->uar_lock_cq);
1778         for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1779                 rte_spinlock_init(&priv->uar_lock[i]);
1780 #endif
1781         /* Some internal functions rely on Netlink sockets, open them now. */
1782         priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1783         priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1784         priv->nl_sn = 0;
1785         priv->representor = !!switch_info->representor;
1786         priv->master = !!switch_info->master;
1787         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1788         priv->vport_meta_tag = 0;
1789         priv->vport_meta_mask = 0;
1790 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1791         /*
1792          * The DevX port query API is implemented. E-Switch may use
1793          * either vport or reg_c[0] metadata register to match on
1794          * vport index. The engaged part of metadata register is
1795          * defined by mask.
1796          */
1797         devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
1798                               MLX5DV_DEVX_PORT_MATCH_REG_C_0;
1799         err = mlx5dv_query_devx_port(sh->ctx, spawn->ibv_port, &devx_port);
1800         if (err) {
1801                 DRV_LOG(WARNING, "can't query devx port %d on device %s\n",
1802                         spawn->ibv_port, spawn->ibv_dev->name);
1803                 devx_port.comp_mask = 0;
1804         }
1805         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
1806                 priv->vport_meta_tag = devx_port.reg_c_0.value;
1807                 priv->vport_meta_mask = devx_port.reg_c_0.mask;
1808                 if (!priv->vport_meta_mask) {
1809                         DRV_LOG(ERR, "vport zero mask for port %d"
1810                                      " on bonding device %s\n",
1811                                      spawn->ibv_port, spawn->ibv_dev->name);
1812                         err = ENOTSUP;
1813                         goto error;
1814                 }
1815                 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1816                         DRV_LOG(ERR, "invalid vport tag for port %d"
1817                                      " on bonding device %s\n",
1818                                      spawn->ibv_port, spawn->ibv_dev->name);
1819                         err = ENOTSUP;
1820                         goto error;
1821                 }
1822         } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
1823                 priv->vport_id = devx_port.vport_num;
1824         } else if (spawn->pf_bond >= 0) {
1825                 DRV_LOG(ERR, "can't deduce vport index for port %d"
1826                              " on bonding device %s\n",
1827                              spawn->ibv_port, spawn->ibv_dev->name);
1828                 err = ENOTSUP;
1829                 goto error;
1830         } else {
1831                 /* Suppose vport index in compatible way. */
1832                 priv->vport_id = switch_info->representor ?
1833                                  switch_info->port_name + 1 : -1;
1834         }
1835 #else
1836         /*
1837          * Kernel/rdma_core support single E-Switch per PF configurations
1838          * only and vport_id field contains the vport index for
1839          * associated VF, which is deduced from representor port name.
1840          * For example, let's have the IB device port 10, it has
1841          * attached network device eth0, which has port name attribute
1842          * pf0vf2, we can deduce the VF number as 2, and set vport index
1843          * as 3 (2+1). This assigning schema should be changed if the
1844          * multiple E-Switch instances per PF configurations or/and PCI
1845          * subfunctions are added.
1846          */
1847         priv->vport_id = switch_info->representor ?
1848                          switch_info->port_name + 1 : -1;
1849 #endif
1850         /* representor_id field keeps the unmodified VF index. */
1851         priv->representor_id = switch_info->representor ?
1852                                switch_info->port_name : -1;
1853         /*
1854          * Look for sibling devices in order to reuse their switch domain
1855          * if any, otherwise allocate one.
1856          */
1857         RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) {
1858                 const struct mlx5_priv *opriv =
1859                         rte_eth_devices[port_id].data->dev_private;
1860
1861                 if (!opriv ||
1862                         opriv->domain_id ==
1863                         RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1864                         continue;
1865                 priv->domain_id = opriv->domain_id;
1866                 break;
1867         }
1868         if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1869                 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1870                 if (err) {
1871                         err = rte_errno;
1872                         DRV_LOG(ERR, "unable to allocate switch domain: %s",
1873                                 strerror(rte_errno));
1874                         goto error;
1875                 }
1876                 own_domain_id = 1;
1877         }
1878         err = mlx5_args(&config, dpdk_dev->devargs);
1879         if (err) {
1880                 err = rte_errno;
1881                 DRV_LOG(ERR, "failed to process device arguments: %s",
1882                         strerror(rte_errno));
1883                 goto error;
1884         }
1885         config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1886                             IBV_DEVICE_RAW_IP_CSUM);
1887         DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1888                 (config.hw_csum ? "" : "not "));
1889 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1890         !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1891         DRV_LOG(DEBUG, "counters are not supported");
1892 #endif
1893 #ifndef HAVE_IBV_FLOW_DV_SUPPORT
1894         if (config.dv_flow_en) {
1895                 DRV_LOG(WARNING, "DV flow is not supported");
1896                 config.dv_flow_en = 0;
1897         }
1898 #endif
1899         config.ind_table_max_size =
1900                 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
1901         /*
1902          * Remove this check once DPDK supports larger/variable
1903          * indirection tables.
1904          */
1905         if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1906                 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1907         DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1908                 config.ind_table_max_size);
1909         config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1910                                   IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1911         DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1912                 (config.hw_vlan_strip ? "" : "not "));
1913         config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1914                                  IBV_RAW_PACKET_CAP_SCATTER_FCS);
1915         DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1916                 (config.hw_fcs_strip ? "" : "not "));
1917 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1918         hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1919 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1920         hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1921                         IBV_DEVICE_PCI_WRITE_END_PADDING);
1922 #endif
1923         if (config.hw_padding && !hw_padding) {
1924                 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1925                 config.hw_padding = 0;
1926         } else if (config.hw_padding) {
1927                 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1928         }
1929         config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
1930                       (sh->device_attr.tso_caps.supported_qpts &
1931                        (1 << IBV_QPT_RAW_PACKET)));
1932         if (config.tso)
1933                 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
1934         /*
1935          * MPW is disabled by default, while the Enhanced MPW is enabled
1936          * by default.
1937          */
1938         if (config.mps == MLX5_ARG_UNSET)
1939                 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1940                                                           MLX5_MPW_DISABLED;
1941         else
1942                 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
1943         DRV_LOG(INFO, "%sMPS is %s",
1944                 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1945                 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1946         if (config.cqe_comp && !cqe_comp) {
1947                 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1948                 config.cqe_comp = 0;
1949         }
1950         if (config.cqe_pad && !cqe_pad) {
1951                 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1952                 config.cqe_pad = 0;
1953         } else if (config.cqe_pad) {
1954                 DRV_LOG(INFO, "Rx CQE padding is enabled");
1955         }
1956         if (config.devx) {
1957                 priv->counter_fallback = 0;
1958                 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
1959                 if (err) {
1960                         err = -err;
1961                         goto error;
1962                 }
1963                 if (!config.hca_attr.flow_counters_dump)
1964                         priv->counter_fallback = 1;
1965 #ifndef HAVE_IBV_DEVX_ASYNC
1966                 priv->counter_fallback = 1;
1967 #endif
1968                 if (priv->counter_fallback)
1969                         DRV_LOG(INFO, "Use fall-back DV counter management\n");
1970                 /* Check for LRO support. */
1971                 if (config.dest_tir && config.hca_attr.lro_cap) {
1972                         /* TBD check tunnel lro caps. */
1973                         config.lro.supported = config.hca_attr.lro_cap;
1974                         DRV_LOG(DEBUG, "Device supports LRO");
1975                         /*
1976                          * If LRO timeout is not configured by application,
1977                          * use the minimal supported value.
1978                          */
1979                         if (!config.lro.timeout)
1980                                 config.lro.timeout =
1981                                 config.hca_attr.lro_timer_supported_periods[0];
1982                         DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1983                                 config.lro.timeout);
1984                 }
1985         }
1986         if (config.mprq.enabled && mprq) {
1987                 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1988                     config.mprq.stride_num_n < mprq_min_stride_num_n) {
1989                         config.mprq.stride_num_n =
1990                                 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1991                                         mprq_min_stride_num_n);
1992                         DRV_LOG(WARNING,
1993                                 "the number of strides"
1994                                 " for Multi-Packet RQ is out of range,"
1995                                 " setting default value (%u)",
1996                                 1 << config.mprq.stride_num_n);
1997                 }
1998                 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1999                 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
2000         } else if (config.mprq.enabled && !mprq) {
2001                 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
2002                 config.mprq.enabled = 0;
2003         }
2004         if (config.max_dump_files_num == 0)
2005                 config.max_dump_files_num = 128;
2006         eth_dev = rte_eth_dev_allocate(name);
2007         if (eth_dev == NULL) {
2008                 DRV_LOG(ERR, "can not allocate rte ethdev");
2009                 err = ENOMEM;
2010                 goto error;
2011         }
2012         /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
2013         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2014         if (priv->representor) {
2015                 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
2016                 eth_dev->data->representor_id = priv->representor_id;
2017         }
2018         /*
2019          * Store associated network device interface index. This index
2020          * is permanent throughout the lifetime of device. So, we may store
2021          * the ifindex here and use the cached value further.
2022          */
2023         assert(spawn->ifindex);
2024         priv->if_index = spawn->ifindex;
2025         eth_dev->data->dev_private = priv;
2026         priv->dev_data = eth_dev->data;
2027         eth_dev->data->mac_addrs = priv->mac;
2028         eth_dev->device = dpdk_dev;
2029         /* Configure the first MAC address by default. */
2030         if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
2031                 DRV_LOG(ERR,
2032                         "port %u cannot get MAC address, is mlx5_en"
2033                         " loaded? (errno: %s)",
2034                         eth_dev->data->port_id, strerror(rte_errno));
2035                 err = ENODEV;
2036                 goto error;
2037         }
2038         DRV_LOG(INFO,
2039                 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
2040                 eth_dev->data->port_id,
2041                 mac.addr_bytes[0], mac.addr_bytes[1],
2042                 mac.addr_bytes[2], mac.addr_bytes[3],
2043                 mac.addr_bytes[4], mac.addr_bytes[5]);
2044 #ifndef NDEBUG
2045         {
2046                 char ifname[IF_NAMESIZE];
2047
2048                 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
2049                         DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
2050                                 eth_dev->data->port_id, ifname);
2051                 else
2052                         DRV_LOG(DEBUG, "port %u ifname is unknown",
2053                                 eth_dev->data->port_id);
2054         }
2055 #endif
2056         /* Get actual MTU if possible. */
2057         err = mlx5_get_mtu(eth_dev, &priv->mtu);
2058         if (err) {
2059                 err = rte_errno;
2060                 goto error;
2061         }
2062         DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
2063                 priv->mtu);
2064         /* Initialize burst functions to prevent crashes before link-up. */
2065         eth_dev->rx_pkt_burst = removed_rx_burst;
2066         eth_dev->tx_pkt_burst = removed_tx_burst;
2067         eth_dev->dev_ops = &mlx5_dev_ops;
2068         /* Register MAC address. */
2069         claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
2070         if (config.vf && config.vf_nl_en)
2071                 mlx5_nl_mac_addr_sync(eth_dev);
2072         TAILQ_INIT(&priv->flows);
2073         TAILQ_INIT(&priv->ctrl_flows);
2074         /* Hint libmlx5 to use PMD allocator for data plane resources */
2075         struct mlx5dv_ctx_allocators alctr = {
2076                 .alloc = &mlx5_alloc_verbs_buf,
2077                 .free = &mlx5_free_verbs_buf,
2078                 .data = priv,
2079         };
2080         mlx5_glue->dv_set_context_attr(sh->ctx,
2081                                        MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2082                                        (void *)((uintptr_t)&alctr));
2083         /* Bring Ethernet device up. */
2084         DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
2085                 eth_dev->data->port_id);
2086         mlx5_set_link_up(eth_dev);
2087         /*
2088          * Even though the interrupt handler is not installed yet,
2089          * interrupts will still trigger on the async_fd from
2090          * Verbs context returned by ibv_open_device().
2091          */
2092         mlx5_link_update(eth_dev, 0);
2093 #ifdef HAVE_MLX5DV_DR_ESWITCH
2094         if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
2095               (switch_info->representor || switch_info->master)))
2096                 config.dv_esw_en = 0;
2097 #else
2098         config.dv_esw_en = 0;
2099 #endif
2100         /* Detect minimal data bytes to inline. */
2101         mlx5_set_min_inline(spawn, &config);
2102         /* Store device configuration on private structure. */
2103         priv->config = config;
2104         /* Create context for virtual machine VLAN workaround. */
2105         priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
2106         if (config.dv_flow_en) {
2107                 err = mlx5_alloc_shared_dr(priv);
2108                 if (err)
2109                         goto error;
2110         }
2111         /* Supported Verbs flow priority number detection. */
2112         err = mlx5_flow_discover_priorities(eth_dev);
2113         if (err < 0) {
2114                 err = -err;
2115                 goto error;
2116         }
2117         priv->config.flow_prio = err;
2118         return eth_dev;
2119 error:
2120         if (priv) {
2121                 if (priv->sh)
2122                         mlx5_free_shared_dr(priv);
2123                 if (priv->nl_socket_route >= 0)
2124                         close(priv->nl_socket_route);
2125                 if (priv->nl_socket_rdma >= 0)
2126                         close(priv->nl_socket_rdma);
2127                 if (priv->vmwa_context)
2128                         mlx5_vlan_vmwa_exit(priv->vmwa_context);
2129                 if (own_domain_id)
2130                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
2131                 rte_free(priv);
2132                 if (eth_dev != NULL)
2133                         eth_dev->data->dev_private = NULL;
2134         }
2135         if (eth_dev != NULL) {
2136                 /* mac_addrs must not be freed alone because part of dev_private */
2137                 eth_dev->data->mac_addrs = NULL;
2138                 rte_eth_dev_release_port(eth_dev);
2139         }
2140         if (sh)
2141                 mlx5_free_shared_ibctx(sh);
2142         assert(err > 0);
2143         rte_errno = err;
2144         return NULL;
2145 }
2146
2147 /**
2148  * Comparison callback to sort device data.
2149  *
2150  * This is meant to be used with qsort().
2151  *
2152  * @param a[in]
2153  *   Pointer to pointer to first data object.
2154  * @param b[in]
2155  *   Pointer to pointer to second data object.
2156  *
2157  * @return
2158  *   0 if both objects are equal, less than 0 if the first argument is less
2159  *   than the second, greater than 0 otherwise.
2160  */
2161 static int
2162 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
2163 {
2164         const struct mlx5_switch_info *si_a =
2165                 &((const struct mlx5_dev_spawn_data *)a)->info;
2166         const struct mlx5_switch_info *si_b =
2167                 &((const struct mlx5_dev_spawn_data *)b)->info;
2168         int ret;
2169
2170         /* Master device first. */
2171         ret = si_b->master - si_a->master;
2172         if (ret)
2173                 return ret;
2174         /* Then representor devices. */
2175         ret = si_b->representor - si_a->representor;
2176         if (ret)
2177                 return ret;
2178         /* Unidentified devices come last in no specific order. */
2179         if (!si_a->representor)
2180                 return 0;
2181         /* Order representors by name. */
2182         return si_a->port_name - si_b->port_name;
2183 }
2184
2185 /**
2186  * Match PCI information for possible slaves of bonding device.
2187  *
2188  * @param[in] ibv_dev
2189  *   Pointer to Infiniband device structure.
2190  * @param[in] pci_dev
2191  *   Pointer to PCI device structure to match PCI address.
2192  * @param[in] nl_rdma
2193  *   Netlink RDMA group socket handle.
2194  *
2195  * @return
2196  *   negative value if no bonding device found, otherwise
2197  *   positive index of slave PF in bonding.
2198  */
2199 static int
2200 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
2201                            const struct rte_pci_device *pci_dev,
2202                            int nl_rdma)
2203 {
2204         char ifname[IF_NAMESIZE + 1];
2205         unsigned int ifindex;
2206         unsigned int np, i;
2207         FILE *file = NULL;
2208         int pf = -1;
2209
2210         /*
2211          * Try to get master device name. If something goes
2212          * wrong suppose the lack of kernel support and no
2213          * bonding devices.
2214          */
2215         if (nl_rdma < 0)
2216                 return -1;
2217         if (!strstr(ibv_dev->name, "bond"))
2218                 return -1;
2219         np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
2220         if (!np)
2221                 return -1;
2222         /*
2223          * The Master device might not be on the predefined
2224          * port (not on port index 1, it is not garanted),
2225          * we have to scan all Infiniband device port and
2226          * find master.
2227          */
2228         for (i = 1; i <= np; ++i) {
2229                 /* Check whether Infiniband port is populated. */
2230                 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
2231                 if (!ifindex)
2232                         continue;
2233                 if (!if_indextoname(ifindex, ifname))
2234                         continue;
2235                 /* Try to read bonding slave names from sysfs. */
2236                 MKSTR(slaves,
2237                       "/sys/class/net/%s/master/bonding/slaves", ifname);
2238                 file = fopen(slaves, "r");
2239                 if (file)
2240                         break;
2241         }
2242         if (!file)
2243                 return -1;
2244         /* Use safe format to check maximal buffer length. */
2245         assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
2246         while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
2247                 char tmp_str[IF_NAMESIZE + 32];
2248                 struct rte_pci_addr pci_addr;
2249                 struct mlx5_switch_info info;
2250
2251                 /* Process slave interface names in the loop. */
2252                 snprintf(tmp_str, sizeof(tmp_str),
2253                          "/sys/class/net/%s", ifname);
2254                 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
2255                         DRV_LOG(WARNING, "can not get PCI address"
2256                                          " for netdev \"%s\"", ifname);
2257                         continue;
2258                 }
2259                 if (pci_dev->addr.domain != pci_addr.domain ||
2260                     pci_dev->addr.bus != pci_addr.bus ||
2261                     pci_dev->addr.devid != pci_addr.devid ||
2262                     pci_dev->addr.function != pci_addr.function)
2263                         continue;
2264                 /* Slave interface PCI address match found. */
2265                 fclose(file);
2266                 snprintf(tmp_str, sizeof(tmp_str),
2267                          "/sys/class/net/%s/phys_port_name", ifname);
2268                 file = fopen(tmp_str, "rb");
2269                 if (!file)
2270                         break;
2271                 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2272                 if (fscanf(file, "%32s", tmp_str) == 1)
2273                         mlx5_translate_port_name(tmp_str, &info);
2274                 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
2275                     info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2276                         pf = info.port_name;
2277                 break;
2278         }
2279         if (file)
2280                 fclose(file);
2281         return pf;
2282 }
2283
2284 /**
2285  * DPDK callback to register a PCI device.
2286  *
2287  * This function spawns Ethernet devices out of a given PCI device.
2288  *
2289  * @param[in] pci_drv
2290  *   PCI driver structure (mlx5_driver).
2291  * @param[in] pci_dev
2292  *   PCI device information.
2293  *
2294  * @return
2295  *   0 on success, a negative errno value otherwise and rte_errno is set.
2296  */
2297 static int
2298 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2299                struct rte_pci_device *pci_dev)
2300 {
2301         struct ibv_device **ibv_list;
2302         /*
2303          * Number of found IB Devices matching with requested PCI BDF.
2304          * nd != 1 means there are multiple IB devices over the same
2305          * PCI device and we have representors and master.
2306          */
2307         unsigned int nd = 0;
2308         /*
2309          * Number of found IB device Ports. nd = 1 and np = 1..n means
2310          * we have the single multiport IB device, and there may be
2311          * representors attached to some of found ports.
2312          */
2313         unsigned int np = 0;
2314         /*
2315          * Number of DPDK ethernet devices to Spawn - either over
2316          * multiple IB devices or multiple ports of single IB device.
2317          * Actually this is the number of iterations to spawn.
2318          */
2319         unsigned int ns = 0;
2320         /*
2321          * Bonding device
2322          *   < 0 - no bonding device (single one)
2323          *  >= 0 - bonding device (value is slave PF index)
2324          */
2325         int bd = -1;
2326         struct mlx5_dev_spawn_data *list = NULL;
2327         struct mlx5_dev_config dev_config;
2328         int ret;
2329
2330         ret = mlx5_init_once();
2331         if (ret) {
2332                 DRV_LOG(ERR, "unable to init PMD global data: %s",
2333                         strerror(rte_errno));
2334                 return -rte_errno;
2335         }
2336         assert(pci_drv == &mlx5_driver);
2337         errno = 0;
2338         ibv_list = mlx5_glue->get_device_list(&ret);
2339         if (!ibv_list) {
2340                 rte_errno = errno ? errno : ENOSYS;
2341                 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
2342                 return -rte_errno;
2343         }
2344         /*
2345          * First scan the list of all Infiniband devices to find
2346          * matching ones, gathering into the list.
2347          */
2348         struct ibv_device *ibv_match[ret + 1];
2349         int nl_route = mlx5_nl_init(NETLINK_ROUTE);
2350         int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
2351         unsigned int i;
2352
2353         while (ret-- > 0) {
2354                 struct rte_pci_addr pci_addr;
2355
2356                 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
2357                 bd = mlx5_device_bond_pci_match
2358                                 (ibv_list[ret], pci_dev, nl_rdma);
2359                 if (bd >= 0) {
2360                         /*
2361                          * Bonding device detected. Only one match is allowed,
2362                          * the bonding is supported over multi-port IB device,
2363                          * there should be no matches on representor PCI
2364                          * functions or non VF LAG bonding devices with
2365                          * specified address.
2366                          */
2367                         if (nd) {
2368                                 DRV_LOG(ERR,
2369                                         "multiple PCI match on bonding device"
2370                                         "\"%s\" found", ibv_list[ret]->name);
2371                                 rte_errno = ENOENT;
2372                                 ret = -rte_errno;
2373                                 goto exit;
2374                         }
2375                         DRV_LOG(INFO, "PCI information matches for"
2376                                       " slave %d bonding device \"%s\"",
2377                                       bd, ibv_list[ret]->name);
2378                         ibv_match[nd++] = ibv_list[ret];
2379                         break;
2380                 }
2381                 if (mlx5_dev_to_pci_addr
2382                         (ibv_list[ret]->ibdev_path, &pci_addr))
2383                         continue;
2384                 if (pci_dev->addr.domain != pci_addr.domain ||
2385                     pci_dev->addr.bus != pci_addr.bus ||
2386                     pci_dev->addr.devid != pci_addr.devid ||
2387                     pci_dev->addr.function != pci_addr.function)
2388                         continue;
2389                 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2390                         ibv_list[ret]->name);
2391                 ibv_match[nd++] = ibv_list[ret];
2392         }
2393         ibv_match[nd] = NULL;
2394         if (!nd) {
2395                 /* No device matches, just complain and bail out. */
2396                 DRV_LOG(WARNING,
2397                         "no Verbs device matches PCI device " PCI_PRI_FMT ","
2398                         " are kernel drivers loaded?",
2399                         pci_dev->addr.domain, pci_dev->addr.bus,
2400                         pci_dev->addr.devid, pci_dev->addr.function);
2401                 rte_errno = ENOENT;
2402                 ret = -rte_errno;
2403                 goto exit;
2404         }
2405         if (nd == 1) {
2406                 /*
2407                  * Found single matching device may have multiple ports.
2408                  * Each port may be representor, we have to check the port
2409                  * number and check the representors existence.
2410                  */
2411                 if (nl_rdma >= 0)
2412                         np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
2413                 if (!np)
2414                         DRV_LOG(WARNING, "can not get IB device \"%s\""
2415                                          " ports number", ibv_match[0]->name);
2416                 if (bd >= 0 && !np) {
2417                         DRV_LOG(ERR, "can not get ports"
2418                                      " for bonding device");
2419                         rte_errno = ENOENT;
2420                         ret = -rte_errno;
2421                         goto exit;
2422                 }
2423         }
2424 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
2425         if (bd >= 0) {
2426                 /*
2427                  * This may happen if there is VF LAG kernel support and
2428                  * application is compiled with older rdma_core library.
2429                  */
2430                 DRV_LOG(ERR,
2431                         "No kernel/verbs support for VF LAG bonding found.");
2432                 rte_errno = ENOTSUP;
2433                 ret = -rte_errno;
2434                 goto exit;
2435         }
2436 #endif
2437         /*
2438          * Now we can determine the maximal
2439          * amount of devices to be spawned.
2440          */
2441         list = rte_zmalloc("device spawn data",
2442                          sizeof(struct mlx5_dev_spawn_data) *
2443                          (np ? np : nd),
2444                          RTE_CACHE_LINE_SIZE);
2445         if (!list) {
2446                 DRV_LOG(ERR, "spawn data array allocation failure");
2447                 rte_errno = ENOMEM;
2448                 ret = -rte_errno;
2449                 goto exit;
2450         }
2451         if (bd >= 0 || np > 1) {
2452                 /*
2453                  * Single IB device with multiple ports found,
2454                  * it may be E-Switch master device and representors.
2455                  * We have to perform identification trough the ports.
2456                  */
2457                 assert(nl_rdma >= 0);
2458                 assert(ns == 0);
2459                 assert(nd == 1);
2460                 assert(np);
2461                 for (i = 1; i <= np; ++i) {
2462                         list[ns].max_port = np;
2463                         list[ns].ibv_port = i;
2464                         list[ns].ibv_dev = ibv_match[0];
2465                         list[ns].eth_dev = NULL;
2466                         list[ns].pci_dev = pci_dev;
2467                         list[ns].pf_bond = bd;
2468                         list[ns].ifindex = mlx5_nl_ifindex
2469                                         (nl_rdma, list[ns].ibv_dev->name, i);
2470                         if (!list[ns].ifindex) {
2471                                 /*
2472                                  * No network interface index found for the
2473                                  * specified port, it means there is no
2474                                  * representor on this port. It's OK,
2475                                  * there can be disabled ports, for example
2476                                  * if sriov_numvfs < sriov_totalvfs.
2477                                  */
2478                                 continue;
2479                         }
2480                         ret = -1;
2481                         if (nl_route >= 0)
2482                                 ret = mlx5_nl_switch_info
2483                                                (nl_route,
2484                                                 list[ns].ifindex,
2485                                                 &list[ns].info);
2486                         if (ret || (!list[ns].info.representor &&
2487                                     !list[ns].info.master)) {
2488                                 /*
2489                                  * We failed to recognize representors with
2490                                  * Netlink, let's try to perform the task
2491                                  * with sysfs.
2492                                  */
2493                                 ret =  mlx5_sysfs_switch_info
2494                                                 (list[ns].ifindex,
2495                                                  &list[ns].info);
2496                         }
2497                         if (!ret && bd >= 0) {
2498                                 switch (list[ns].info.name_type) {
2499                                 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2500                                         if (list[ns].info.port_name == bd)
2501                                                 ns++;
2502                                         break;
2503                                 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2504                                         if (list[ns].info.pf_num == bd)
2505                                                 ns++;
2506                                         break;
2507                                 default:
2508                                         break;
2509                                 }
2510                                 continue;
2511                         }
2512                         if (!ret && (list[ns].info.representor ^
2513                                      list[ns].info.master))
2514                                 ns++;
2515                 }
2516                 if (!ns) {
2517                         DRV_LOG(ERR,
2518                                 "unable to recognize master/representors"
2519                                 " on the IB device with multiple ports");
2520                         rte_errno = ENOENT;
2521                         ret = -rte_errno;
2522                         goto exit;
2523                 }
2524         } else {
2525                 /*
2526                  * The existence of several matching entries (nd > 1) means
2527                  * port representors have been instantiated. No existing Verbs
2528                  * call nor sysfs entries can tell them apart, this can only
2529                  * be done through Netlink calls assuming kernel drivers are
2530                  * recent enough to support them.
2531                  *
2532                  * In the event of identification failure through Netlink,
2533                  * try again through sysfs, then:
2534                  *
2535                  * 1. A single IB device matches (nd == 1) with single
2536                  *    port (np=0/1) and is not a representor, assume
2537                  *    no switch support.
2538                  *
2539                  * 2. Otherwise no safe assumptions can be made;
2540                  *    complain louder and bail out.
2541                  */
2542                 np = 1;
2543                 for (i = 0; i != nd; ++i) {
2544                         memset(&list[ns].info, 0, sizeof(list[ns].info));
2545                         list[ns].max_port = 1;
2546                         list[ns].ibv_port = 1;
2547                         list[ns].ibv_dev = ibv_match[i];
2548                         list[ns].eth_dev = NULL;
2549                         list[ns].pci_dev = pci_dev;
2550                         list[ns].pf_bond = -1;
2551                         list[ns].ifindex = 0;
2552                         if (nl_rdma >= 0)
2553                                 list[ns].ifindex = mlx5_nl_ifindex
2554                                         (nl_rdma, list[ns].ibv_dev->name, 1);
2555                         if (!list[ns].ifindex) {
2556                                 char ifname[IF_NAMESIZE];
2557
2558                                 /*
2559                                  * Netlink failed, it may happen with old
2560                                  * ib_core kernel driver (before 4.16).
2561                                  * We can assume there is old driver because
2562                                  * here we are processing single ports IB
2563                                  * devices. Let's try sysfs to retrieve
2564                                  * the ifindex. The method works for
2565                                  * master device only.
2566                                  */
2567                                 if (nd > 1) {
2568                                         /*
2569                                          * Multiple devices found, assume
2570                                          * representors, can not distinguish
2571                                          * master/representor and retrieve
2572                                          * ifindex via sysfs.
2573                                          */
2574                                         continue;
2575                                 }
2576                                 ret = mlx5_get_master_ifname
2577                                         (ibv_match[i]->ibdev_path, &ifname);
2578                                 if (!ret)
2579                                         list[ns].ifindex =
2580                                                 if_nametoindex(ifname);
2581                                 if (!list[ns].ifindex) {
2582                                         /*
2583                                          * No network interface index found
2584                                          * for the specified device, it means
2585                                          * there it is neither representor
2586                                          * nor master.
2587                                          */
2588                                         continue;
2589                                 }
2590                         }
2591                         ret = -1;
2592                         if (nl_route >= 0)
2593                                 ret = mlx5_nl_switch_info
2594                                                (nl_route,
2595                                                 list[ns].ifindex,
2596                                                 &list[ns].info);
2597                         if (ret || (!list[ns].info.representor &&
2598                                     !list[ns].info.master)) {
2599                                 /*
2600                                  * We failed to recognize representors with
2601                                  * Netlink, let's try to perform the task
2602                                  * with sysfs.
2603                                  */
2604                                 ret =  mlx5_sysfs_switch_info
2605                                                 (list[ns].ifindex,
2606                                                  &list[ns].info);
2607                         }
2608                         if (!ret && (list[ns].info.representor ^
2609                                      list[ns].info.master)) {
2610                                 ns++;
2611                         } else if ((nd == 1) &&
2612                                    !list[ns].info.representor &&
2613                                    !list[ns].info.master) {
2614                                 /*
2615                                  * Single IB device with
2616                                  * one physical port and
2617                                  * attached network device.
2618                                  * May be SRIOV is not enabled
2619                                  * or there is no representors.
2620                                  */
2621                                 DRV_LOG(INFO, "no E-Switch support detected");
2622                                 ns++;
2623                                 break;
2624                         }
2625                 }
2626                 if (!ns) {
2627                         DRV_LOG(ERR,
2628                                 "unable to recognize master/representors"
2629                                 " on the multiple IB devices");
2630                         rte_errno = ENOENT;
2631                         ret = -rte_errno;
2632                         goto exit;
2633                 }
2634         }
2635         assert(ns);
2636         /*
2637          * Sort list to probe devices in natural order for users convenience
2638          * (i.e. master first, then representors from lowest to highest ID).
2639          */
2640         qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2641         /* Default configuration. */
2642         dev_config = (struct mlx5_dev_config){
2643                 .hw_padding = 0,
2644                 .mps = MLX5_ARG_UNSET,
2645                 .rx_vec_en = 1,
2646                 .txq_inline_max = MLX5_ARG_UNSET,
2647                 .txq_inline_min = MLX5_ARG_UNSET,
2648                 .txq_inline_mpw = MLX5_ARG_UNSET,
2649                 .txqs_inline = MLX5_ARG_UNSET,
2650                 .vf_nl_en = 1,
2651                 .mr_ext_memseg_en = 1,
2652                 .mprq = {
2653                         .enabled = 0, /* Disabled by default. */
2654                         .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
2655                         .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
2656                         .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
2657                 },
2658                 .dv_esw_en = 1,
2659         };
2660         /* Device specific configuration. */
2661         switch (pci_dev->id.device_id) {
2662         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2663         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2664         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2665         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2666         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2667         case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2668                 dev_config.vf = 1;
2669                 break;
2670         default:
2671                 break;
2672         }
2673         for (i = 0; i != ns; ++i) {
2674                 uint32_t restore;
2675
2676                 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2677                                                  &list[i],
2678                                                  dev_config);
2679                 if (!list[i].eth_dev) {
2680                         if (rte_errno != EBUSY && rte_errno != EEXIST)
2681                                 break;
2682                         /* Device is disabled or already spawned. Ignore it. */
2683                         continue;
2684                 }
2685                 restore = list[i].eth_dev->data->dev_flags;
2686                 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2687                 /* Restore non-PCI flags cleared by the above call. */
2688                 list[i].eth_dev->data->dev_flags |= restore;
2689                 rte_eth_dev_probing_finish(list[i].eth_dev);
2690         }
2691         if (i != ns) {
2692                 DRV_LOG(ERR,
2693                         "probe of PCI device " PCI_PRI_FMT " aborted after"
2694                         " encountering an error: %s",
2695                         pci_dev->addr.domain, pci_dev->addr.bus,
2696                         pci_dev->addr.devid, pci_dev->addr.function,
2697                         strerror(rte_errno));
2698                 ret = -rte_errno;
2699                 /* Roll back. */
2700                 while (i--) {
2701                         if (!list[i].eth_dev)
2702                                 continue;
2703                         mlx5_dev_close(list[i].eth_dev);
2704                         /* mac_addrs must not be freed because in dev_private */
2705                         list[i].eth_dev->data->mac_addrs = NULL;
2706                         claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2707                 }
2708                 /* Restore original error. */
2709                 rte_errno = -ret;
2710         } else {
2711                 ret = 0;
2712         }
2713 exit:
2714         /*
2715          * Do the routine cleanup:
2716          * - close opened Netlink sockets
2717          * - free allocated spawn data array
2718          * - free the Infiniband device list
2719          */
2720         if (nl_rdma >= 0)
2721                 close(nl_rdma);
2722         if (nl_route >= 0)
2723                 close(nl_route);
2724         if (list)
2725                 rte_free(list);
2726         assert(ibv_list);
2727         mlx5_glue->free_device_list(ibv_list);
2728         return ret;
2729 }
2730
2731 /**
2732  * DPDK callback to remove a PCI device.
2733  *
2734  * This function removes all Ethernet devices belong to a given PCI device.
2735  *
2736  * @param[in] pci_dev
2737  *   Pointer to the PCI device.
2738  *
2739  * @return
2740  *   0 on success, the function cannot fail.
2741  */
2742 static int
2743 mlx5_pci_remove(struct rte_pci_device *pci_dev)
2744 {
2745         uint16_t port_id;
2746
2747         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2748                 rte_eth_dev_close(port_id);
2749         return 0;
2750 }
2751
2752 static const struct rte_pci_id mlx5_pci_id_map[] = {
2753         {
2754                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2755                                PCI_DEVICE_ID_MELLANOX_CONNECTX4)
2756         },
2757         {
2758                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2759                                PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
2760         },
2761         {
2762                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2763                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
2764         },
2765         {
2766                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2767                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
2768         },
2769         {
2770                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2771                                PCI_DEVICE_ID_MELLANOX_CONNECTX5)
2772         },
2773         {
2774                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2775                                PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
2776         },
2777         {
2778                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2779                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
2780         },
2781         {
2782                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2783                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
2784         },
2785         {
2786                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2787                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
2788         },
2789         {
2790                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2791                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
2792         },
2793         {
2794                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2795                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
2796         },
2797         {
2798                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2799                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
2800         },
2801         {
2802                 .vendor_id = 0
2803         }
2804 };
2805
2806 static struct rte_pci_driver mlx5_driver = {
2807         .driver = {
2808                 .name = MLX5_DRIVER_NAME
2809         },
2810         .id_table = mlx5_pci_id_map,
2811         .probe = mlx5_pci_probe,
2812         .remove = mlx5_pci_remove,
2813         .dma_map = mlx5_dma_map,
2814         .dma_unmap = mlx5_dma_unmap,
2815         .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
2816                      RTE_PCI_DRV_PROBE_AGAIN,
2817 };
2818
2819 #ifdef RTE_IBVERBS_LINK_DLOPEN
2820
2821 /**
2822  * Suffix RTE_EAL_PMD_PATH with "-glue".
2823  *
2824  * This function performs a sanity check on RTE_EAL_PMD_PATH before
2825  * suffixing its last component.
2826  *
2827  * @param buf[out]
2828  *   Output buffer, should be large enough otherwise NULL is returned.
2829  * @param size
2830  *   Size of @p out.
2831  *
2832  * @return
2833  *   Pointer to @p buf or @p NULL in case suffix cannot be appended.
2834  */
2835 static char *
2836 mlx5_glue_path(char *buf, size_t size)
2837 {
2838         static const char *const bad[] = { "/", ".", "..", NULL };
2839         const char *path = RTE_EAL_PMD_PATH;
2840         size_t len = strlen(path);
2841         size_t off;
2842         int i;
2843
2844         while (len && path[len - 1] == '/')
2845                 --len;
2846         for (off = len; off && path[off - 1] != '/'; --off)
2847                 ;
2848         for (i = 0; bad[i]; ++i)
2849                 if (!strncmp(path + off, bad[i], (int)(len - off)))
2850                         goto error;
2851         i = snprintf(buf, size, "%.*s-glue", (int)len, path);
2852         if (i == -1 || (size_t)i >= size)
2853                 goto error;
2854         return buf;
2855 error:
2856         DRV_LOG(ERR,
2857                 "unable to append \"-glue\" to last component of"
2858                 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
2859                 " please re-configure DPDK");
2860         return NULL;
2861 }
2862
2863 /**
2864  * Initialization routine for run-time dependency on rdma-core.
2865  */
2866 static int
2867 mlx5_glue_init(void)
2868 {
2869         char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
2870         const char *path[] = {
2871                 /*
2872                  * A basic security check is necessary before trusting
2873                  * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
2874                  */
2875                 (geteuid() == getuid() && getegid() == getgid() ?
2876                  getenv("MLX5_GLUE_PATH") : NULL),
2877                 /*
2878                  * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
2879                  * variant, otherwise let dlopen() look up libraries on its
2880                  * own.
2881                  */
2882                 (*RTE_EAL_PMD_PATH ?
2883                  mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
2884         };
2885         unsigned int i = 0;
2886         void *handle = NULL;
2887         void **sym;
2888         const char *dlmsg;
2889
2890         while (!handle && i != RTE_DIM(path)) {
2891                 const char *end;
2892                 size_t len;
2893                 int ret;
2894
2895                 if (!path[i]) {
2896                         ++i;
2897                         continue;
2898                 }
2899                 end = strpbrk(path[i], ":;");
2900                 if (!end)
2901                         end = path[i] + strlen(path[i]);
2902                 len = end - path[i];
2903                 ret = 0;
2904                 do {
2905                         char name[ret + 1];
2906
2907                         ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
2908                                        (int)len, path[i],
2909                                        (!len || *(end - 1) == '/') ? "" : "/");
2910                         if (ret == -1)
2911                                 break;
2912                         if (sizeof(name) != (size_t)ret + 1)
2913                                 continue;
2914                         DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
2915                                 name);
2916                         handle = dlopen(name, RTLD_LAZY);
2917                         break;
2918                 } while (1);
2919                 path[i] = end + 1;
2920                 if (!*end)
2921                         ++i;
2922         }
2923         if (!handle) {
2924                 rte_errno = EINVAL;
2925                 dlmsg = dlerror();
2926                 if (dlmsg)
2927                         DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
2928                 goto glue_error;
2929         }
2930         sym = dlsym(handle, "mlx5_glue");
2931         if (!sym || !*sym) {
2932                 rte_errno = EINVAL;
2933                 dlmsg = dlerror();
2934                 if (dlmsg)
2935                         DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
2936                 goto glue_error;
2937         }
2938         mlx5_glue = *sym;
2939         return 0;
2940 glue_error:
2941         if (handle)
2942                 dlclose(handle);
2943         DRV_LOG(WARNING,
2944                 "cannot initialize PMD due to missing run-time dependency on"
2945                 " rdma-core libraries (libibverbs, libmlx5)");
2946         return -rte_errno;
2947 }
2948
2949 #endif
2950
2951 /**
2952  * Driver initialization routine.
2953  */
2954 RTE_INIT(rte_mlx5_pmd_init)
2955 {
2956         /* Initialize driver log type. */
2957         mlx5_logtype = rte_log_register("pmd.net.mlx5");
2958         if (mlx5_logtype >= 0)
2959                 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
2960
2961         /* Build the static tables for Verbs conversion. */
2962         mlx5_set_ptype_table();
2963         mlx5_set_cksum_table();
2964         mlx5_set_swp_types_table();
2965         /*
2966          * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
2967          * huge pages. Calling ibv_fork_init() during init allows
2968          * applications to use fork() safely for purposes other than
2969          * using this PMD, which is not supported in forked processes.
2970          */
2971         setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
2972         /* Match the size of Rx completion entry to the size of a cacheline. */
2973         if (RTE_CACHE_LINE_SIZE == 128)
2974                 setenv("MLX5_CQE_SIZE", "128", 0);
2975         /*
2976          * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
2977          * cleanup all the Verbs resources even when the device was removed.
2978          */
2979         setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
2980 #ifdef RTE_IBVERBS_LINK_DLOPEN
2981         if (mlx5_glue_init())
2982                 return;
2983         assert(mlx5_glue);
2984 #endif
2985 #ifndef NDEBUG
2986         /* Glue structure must not contain any NULL pointers. */
2987         {
2988                 unsigned int i;
2989
2990                 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
2991                         assert(((const void *const *)mlx5_glue)[i]);
2992         }
2993 #endif
2994         if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
2995                 DRV_LOG(ERR,
2996                         "rdma-core glue \"%s\" mismatch: \"%s\" is required",
2997                         mlx5_glue->version, MLX5_GLUE_VERSION);
2998                 return;
2999         }
3000         mlx5_glue->fork_init();
3001         rte_pci_register(&mlx5_driver);
3002 }
3003
3004 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
3005 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
3006 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");