0613f7083a4a85034fa63bc716b78b48d762c951
[dpdk.git] / drivers / net / mlx5 / mlx5.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 #include <net/if.h>
13 #include <sys/mman.h>
14 #include <linux/rtnetlink.h>
15
16 /* Verbs header. */
17 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
18 #ifdef PEDANTIC
19 #pragma GCC diagnostic ignored "-Wpedantic"
20 #endif
21 #include <infiniband/verbs.h>
22 #ifdef PEDANTIC
23 #pragma GCC diagnostic error "-Wpedantic"
24 #endif
25
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_ethdev_pci.h>
29 #include <rte_pci.h>
30 #include <rte_bus_pci.h>
31 #include <rte_common.h>
32 #include <rte_kvargs.h>
33 #include <rte_rwlock.h>
34 #include <rte_spinlock.h>
35 #include <rte_string_fns.h>
36 #include <rte_alarm.h>
37
38 #include <mlx5_glue.h>
39 #include <mlx5_devx_cmds.h>
40 #include <mlx5_common.h>
41
42 #include "mlx5_defs.h"
43 #include "mlx5.h"
44 #include "mlx5_utils.h"
45 #include "mlx5_rxtx.h"
46 #include "mlx5_autoconf.h"
47 #include "mlx5_mr.h"
48 #include "mlx5_flow.h"
49 #include "rte_pmd_mlx5.h"
50
51 /* Device parameter to enable RX completion queue compression. */
52 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
53
54 /* Device parameter to enable RX completion entry padding to 128B. */
55 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
56
57 /* Device parameter to enable padding Rx packet to cacheline size. */
58 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
59
60 /* Device parameter to enable Multi-Packet Rx queue. */
61 #define MLX5_RX_MPRQ_EN "mprq_en"
62
63 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
64 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
65
66 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
67 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
68
69 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
70 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
71
72 /* Device parameter to configure inline send. Deprecated, ignored.*/
73 #define MLX5_TXQ_INLINE "txq_inline"
74
75 /* Device parameter to limit packet size to inline with ordinary SEND. */
76 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
77
78 /* Device parameter to configure minimal data size to inline. */
79 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
80
81 /* Device parameter to limit packet size to inline with Enhanced MPW. */
82 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
83
84 /*
85  * Device parameter to configure the number of TX queues threshold for
86  * enabling inline send.
87  */
88 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
89
90 /*
91  * Device parameter to configure the number of TX queues threshold for
92  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
93  */
94 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
95
96 /* Device parameter to enable multi-packet send WQEs. */
97 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
98
99 /*
100  * Device parameter to force doorbell register mapping
101  * to non-cahed region eliminating the extra write memory barrier.
102  */
103 #define MLX5_TX_DB_NC "tx_db_nc"
104
105 /*
106  * Device parameter to include 2 dsegs in the title WQEBB.
107  * Deprecated, ignored.
108  */
109 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
110
111 /*
112  * Device parameter to limit the size of inlining packet.
113  * Deprecated, ignored.
114  */
115 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
116
117 /*
118  * Device parameter to enable hardware Tx vector.
119  * Deprecated, ignored (no vectorized Tx routines anymore).
120  */
121 #define MLX5_TX_VEC_EN "tx_vec_en"
122
123 /* Device parameter to enable hardware Rx vector. */
124 #define MLX5_RX_VEC_EN "rx_vec_en"
125
126 /* Allow L3 VXLAN flow creation. */
127 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
128
129 /* Activate DV E-Switch flow steering. */
130 #define MLX5_DV_ESW_EN "dv_esw_en"
131
132 /* Activate DV flow steering. */
133 #define MLX5_DV_FLOW_EN "dv_flow_en"
134
135 /* Enable extensive flow metadata support. */
136 #define MLX5_DV_XMETA_EN "dv_xmeta_en"
137
138 /* Activate Netlink support in VF mode. */
139 #define MLX5_VF_NL_EN "vf_nl_en"
140
141 /* Enable extending memsegs when creating a MR. */
142 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
143
144 /* Select port representors to instantiate. */
145 #define MLX5_REPRESENTOR "representor"
146
147 /* Device parameter to configure the maximum number of dump files per queue. */
148 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
149
150 /* Configure timeout of LRO session (in microseconds). */
151 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
152
153 #ifndef HAVE_IBV_MLX5_MOD_MPW
154 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
155 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
156 #endif
157
158 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
159 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
160 #endif
161
162 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
163
164 /* Shared memory between primary and secondary processes. */
165 struct mlx5_shared_data *mlx5_shared_data;
166
167 /* Spinlock for mlx5_shared_data allocation. */
168 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
169
170 /* Process local data for secondary processes. */
171 static struct mlx5_local_data mlx5_local_data;
172
173 /** Driver-specific log messages type. */
174 int mlx5_logtype;
175
176 /** Data associated with devices to spawn. */
177 struct mlx5_dev_spawn_data {
178         uint32_t ifindex; /**< Network interface index. */
179         uint32_t max_port; /**< IB device maximal port index. */
180         uint32_t ibv_port; /**< IB device physical port index. */
181         int pf_bond; /**< bonding device PF index. < 0 - no bonding */
182         struct mlx5_switch_info info; /**< Switch information. */
183         struct ibv_device *ibv_dev; /**< Associated IB device. */
184         struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
185         struct rte_pci_device *pci_dev; /**< Backend PCI device. */
186 };
187
188 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
189 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
190
191 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
192 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
193
194 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
195 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
196
197 /**
198  * Allocate ID pool structure.
199  *
200  * @param[in] max_id
201  *   The maximum id can be allocated from the pool.
202  *
203  * @return
204  *   Pointer to pool object, NULL value otherwise.
205  */
206 struct mlx5_flow_id_pool *
207 mlx5_flow_id_pool_alloc(uint32_t max_id)
208 {
209         struct mlx5_flow_id_pool *pool;
210         void *mem;
211
212         pool = rte_zmalloc("id pool allocation", sizeof(*pool),
213                            RTE_CACHE_LINE_SIZE);
214         if (!pool) {
215                 DRV_LOG(ERR, "can't allocate id pool");
216                 rte_errno  = ENOMEM;
217                 return NULL;
218         }
219         mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
220                           RTE_CACHE_LINE_SIZE);
221         if (!mem) {
222                 DRV_LOG(ERR, "can't allocate mem for id pool");
223                 rte_errno  = ENOMEM;
224                 goto error;
225         }
226         pool->free_arr = mem;
227         pool->curr = pool->free_arr;
228         pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE;
229         pool->base_index = 0;
230         pool->max_id = max_id;
231         return pool;
232 error:
233         rte_free(pool);
234         return NULL;
235 }
236
237 /**
238  * Release ID pool structure.
239  *
240  * @param[in] pool
241  *   Pointer to flow id pool object to free.
242  */
243 void
244 mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
245 {
246         rte_free(pool->free_arr);
247         rte_free(pool);
248 }
249
250 /**
251  * Generate ID.
252  *
253  * @param[in] pool
254  *   Pointer to flow id pool.
255  * @param[out] id
256  *   The generated ID.
257  *
258  * @return
259  *   0 on success, error value otherwise.
260  */
261 uint32_t
262 mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id)
263 {
264         if (pool->curr == pool->free_arr) {
265                 if (pool->base_index == pool->max_id) {
266                         rte_errno  = ENOMEM;
267                         DRV_LOG(ERR, "no free id");
268                         return -rte_errno;
269                 }
270                 *id = ++pool->base_index;
271                 return 0;
272         }
273         *id = *(--pool->curr);
274         return 0;
275 }
276
277 /**
278  * Release ID.
279  *
280  * @param[in] pool
281  *   Pointer to flow id pool.
282  * @param[out] id
283  *   The generated ID.
284  *
285  * @return
286  *   0 on success, error value otherwise.
287  */
288 uint32_t
289 mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id)
290 {
291         uint32_t size;
292         uint32_t size2;
293         void *mem;
294
295         if (pool->curr == pool->last) {
296                 size = pool->curr - pool->free_arr;
297                 size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
298                 MLX5_ASSERT(size2 > size);
299                 mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
300                 if (!mem) {
301                         DRV_LOG(ERR, "can't allocate mem for id pool");
302                         rte_errno  = ENOMEM;
303                         return -rte_errno;
304                 }
305                 memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
306                 rte_free(pool->free_arr);
307                 pool->free_arr = mem;
308                 pool->curr = pool->free_arr + size;
309                 pool->last = pool->free_arr + size2;
310         }
311         *pool->curr = id;
312         pool->curr++;
313         return 0;
314 }
315
316 /**
317  * Initialize the counters management structure.
318  *
319  * @param[in] sh
320  *   Pointer to mlx5_ibv_shared object to free
321  */
322 static void
323 mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
324 {
325         uint8_t i;
326
327         TAILQ_INIT(&sh->cmng.flow_counters);
328         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
329                 TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
330 }
331
332 /**
333  * Destroy all the resources allocated for a counter memory management.
334  *
335  * @param[in] mng
336  *   Pointer to the memory management structure.
337  */
338 static void
339 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
340 {
341         uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
342
343         LIST_REMOVE(mng, next);
344         claim_zero(mlx5_devx_cmd_destroy(mng->dm));
345         claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
346         rte_free(mem);
347 }
348
349 /**
350  * Close and release all the resources of the counters management.
351  *
352  * @param[in] sh
353  *   Pointer to mlx5_ibv_shared object to free.
354  */
355 static void
356 mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
357 {
358         struct mlx5_counter_stats_mem_mng *mng;
359         uint8_t i;
360         int j;
361         int retries = 1024;
362
363         rte_errno = 0;
364         while (--retries) {
365                 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
366                 if (rte_errno != EINPROGRESS)
367                         break;
368                 rte_pause();
369         }
370         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
371                 struct mlx5_flow_counter_pool *pool;
372                 uint32_t batch = !!(i % 2);
373
374                 if (!sh->cmng.ccont[i].pools)
375                         continue;
376                 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
377                 while (pool) {
378                         if (batch) {
379                                 if (pool->min_dcs)
380                                         claim_zero
381                                         (mlx5_devx_cmd_destroy(pool->min_dcs));
382                         }
383                         for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
384                                 if (pool->counters_raw[j].action)
385                                         claim_zero
386                                         (mlx5_glue->destroy_flow_action
387                                                (pool->counters_raw[j].action));
388                                 if (!batch && pool->counters_raw[j].dcs)
389                                         claim_zero(mlx5_devx_cmd_destroy
390                                                   (pool->counters_raw[j].dcs));
391                         }
392                         TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
393                                      next);
394                         rte_free(pool);
395                         pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
396                 }
397                 rte_free(sh->cmng.ccont[i].pools);
398         }
399         mng = LIST_FIRST(&sh->cmng.mem_mngs);
400         while (mng) {
401                 mlx5_flow_destroy_counter_stat_mem_mng(mng);
402                 mng = LIST_FIRST(&sh->cmng.mem_mngs);
403         }
404         memset(&sh->cmng, 0, sizeof(sh->cmng));
405 }
406
407 /**
408  * Extract pdn of PD object using DV API.
409  *
410  * @param[in] pd
411  *   Pointer to the verbs PD object.
412  * @param[out] pdn
413  *   Pointer to the PD object number variable.
414  *
415  * @return
416  *   0 on success, error value otherwise.
417  */
418 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
419 static int
420 mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused)
421 {
422         struct mlx5dv_obj obj;
423         struct mlx5dv_pd pd_info;
424         int ret = 0;
425
426         obj.pd.in = pd;
427         obj.pd.out = &pd_info;
428         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
429         if (ret) {
430                 DRV_LOG(DEBUG, "Fail to get PD object info");
431                 return ret;
432         }
433         *pdn = pd_info.pdn;
434         return 0;
435 }
436 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
437
438 static int
439 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
440 {
441         char *env;
442         int value;
443
444         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
445         /* Get environment variable to store. */
446         env = getenv(MLX5_SHUT_UP_BF);
447         value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
448         if (config->dbnc == MLX5_ARG_UNSET)
449                 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
450         else
451                 setenv(MLX5_SHUT_UP_BF,
452                        config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
453         return value;
454 }
455
456 static void
457 mlx5_restore_doorbell_mapping_env(int value)
458 {
459         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
460         /* Restore the original environment variable state. */
461         if (value == MLX5_ARG_UNSET)
462                 unsetenv(MLX5_SHUT_UP_BF);
463         else
464                 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
465 }
466
467 /**
468  * Allocate shared IB device context. If there is multiport device the
469  * master and representors will share this context, if there is single
470  * port dedicated IB device, the context will be used by only given
471  * port due to unification.
472  *
473  * Routine first searches the context for the specified IB device name,
474  * if found the shared context assumed and reference counter is incremented.
475  * If no context found the new one is created and initialized with specified
476  * IB device context and parameters.
477  *
478  * @param[in] spawn
479  *   Pointer to the IB device attributes (name, port, etc).
480  * @param[in] config
481  *   Pointer to device configuration structure.
482  *
483  * @return
484  *   Pointer to mlx5_ibv_shared object on success,
485  *   otherwise NULL and rte_errno is set.
486  */
487 static struct mlx5_ibv_shared *
488 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
489                         const struct mlx5_dev_config *config)
490 {
491         struct mlx5_ibv_shared *sh;
492         int dbmap_env;
493         int err = 0;
494         uint32_t i;
495 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
496         struct mlx5_devx_tis_attr tis_attr = { 0 };
497 #endif
498
499         MLX5_ASSERT(spawn);
500         /* Secondary process should not create the shared context. */
501         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
502         pthread_mutex_lock(&mlx5_ibv_list_mutex);
503         /* Search for IB context by device name. */
504         LIST_FOREACH(sh, &mlx5_ibv_list, next) {
505                 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
506                         sh->refcnt++;
507                         goto exit;
508                 }
509         }
510         /* No device found, we have to create new shared context. */
511         MLX5_ASSERT(spawn->max_port);
512         sh = rte_zmalloc("ethdev shared ib context",
513                          sizeof(struct mlx5_ibv_shared) +
514                          spawn->max_port *
515                          sizeof(struct mlx5_ibv_shared_port),
516                          RTE_CACHE_LINE_SIZE);
517         if (!sh) {
518                 DRV_LOG(ERR, "shared context allocation failure");
519                 rte_errno  = ENOMEM;
520                 goto exit;
521         }
522         /*
523          * Configure environment variable "MLX5_BF_SHUT_UP"
524          * before the device creation. The rdma_core library
525          * checks the variable at device creation and
526          * stores the result internally.
527          */
528         dbmap_env = mlx5_config_doorbell_mapping_env(config);
529         /* Try to open IB device with DV first, then usual Verbs. */
530         errno = 0;
531         sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
532         if (sh->ctx) {
533                 sh->devx = 1;
534                 DRV_LOG(DEBUG, "DevX is supported");
535                 /* The device is created, no need for environment. */
536                 mlx5_restore_doorbell_mapping_env(dbmap_env);
537         } else {
538                 /* The environment variable is still configured. */
539                 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
540                 err = errno ? errno : ENODEV;
541                 /*
542                  * The environment variable is not needed anymore,
543                  * all device creation attempts are completed.
544                  */
545                 mlx5_restore_doorbell_mapping_env(dbmap_env);
546                 if (!sh->ctx)
547                         goto error;
548                 DRV_LOG(DEBUG, "DevX is NOT supported");
549         }
550         err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
551         if (err) {
552                 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
553                 goto error;
554         }
555         sh->refcnt = 1;
556         sh->max_port = spawn->max_port;
557         strncpy(sh->ibdev_name, sh->ctx->device->name,
558                 sizeof(sh->ibdev_name));
559         strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
560                 sizeof(sh->ibdev_path));
561         pthread_mutex_init(&sh->intr_mutex, NULL);
562         /*
563          * Setting port_id to max unallowed value means
564          * there is no interrupt subhandler installed for
565          * the given port index i.
566          */
567         for (i = 0; i < sh->max_port; i++) {
568                 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
569                 sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
570         }
571         sh->pd = mlx5_glue->alloc_pd(sh->ctx);
572         if (sh->pd == NULL) {
573                 DRV_LOG(ERR, "PD allocation failure");
574                 err = ENOMEM;
575                 goto error;
576         }
577 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
578         if (sh->devx) {
579                 err = mlx5_get_pdn(sh->pd, &sh->pdn);
580                 if (err) {
581                         DRV_LOG(ERR, "Fail to extract pdn from PD");
582                         goto error;
583                 }
584                 sh->td = mlx5_devx_cmd_create_td(sh->ctx);
585                 if (!sh->td) {
586                         DRV_LOG(ERR, "TD allocation failure");
587                         err = ENOMEM;
588                         goto error;
589                 }
590                 tis_attr.transport_domain = sh->td->id;
591                 sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
592                 if (!sh->tis) {
593                         DRV_LOG(ERR, "TIS allocation failure");
594                         err = ENOMEM;
595                         goto error;
596                 }
597         }
598         sh->flow_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX);
599         if (!sh->flow_id_pool) {
600                 DRV_LOG(ERR, "can't create flow id pool");
601                 err = ENOMEM;
602                 goto error;
603         }
604 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
605         /*
606          * Once the device is added to the list of memory event
607          * callback, its global MR cache table cannot be expanded
608          * on the fly because of deadlock. If it overflows, lookup
609          * should be done by searching MR list linearly, which is slow.
610          *
611          * At this point the device is not added to the memory
612          * event list yet, context is just being created.
613          */
614         err = mlx5_mr_btree_init(&sh->mr.cache,
615                                  MLX5_MR_BTREE_CACHE_N * 2,
616                                  spawn->pci_dev->device.numa_node);
617         if (err) {
618                 err = rte_errno;
619                 goto error;
620         }
621         mlx5_flow_counters_mng_init(sh);
622         /* Add device to memory callback list. */
623         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
624         LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
625                          sh, mem_event_cb);
626         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
627         /* Add context to the global device list. */
628         LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
629 exit:
630         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
631         return sh;
632 error:
633         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
634         MLX5_ASSERT(sh);
635         if (sh->tis)
636                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
637         if (sh->td)
638                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
639         if (sh->pd)
640                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
641         if (sh->ctx)
642                 claim_zero(mlx5_glue->close_device(sh->ctx));
643         if (sh->flow_id_pool)
644                 mlx5_flow_id_pool_release(sh->flow_id_pool);
645         rte_free(sh);
646         MLX5_ASSERT(err > 0);
647         rte_errno = err;
648         return NULL;
649 }
650
651 /**
652  * Free shared IB device context. Decrement counter and if zero free
653  * all allocated resources and close handles.
654  *
655  * @param[in] sh
656  *   Pointer to mlx5_ibv_shared object to free
657  */
658 static void
659 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
660 {
661         pthread_mutex_lock(&mlx5_ibv_list_mutex);
662 #ifdef RTE_LIBRTE_MLX5_DEBUG
663         /* Check the object presence in the list. */
664         struct mlx5_ibv_shared *lctx;
665
666         LIST_FOREACH(lctx, &mlx5_ibv_list, next)
667                 if (lctx == sh)
668                         break;
669         MLX5_ASSERT(lctx);
670         if (lctx != sh) {
671                 DRV_LOG(ERR, "Freeing non-existing shared IB context");
672                 goto exit;
673         }
674 #endif
675         MLX5_ASSERT(sh);
676         MLX5_ASSERT(sh->refcnt);
677         /* Secondary process should not free the shared context. */
678         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
679         if (--sh->refcnt)
680                 goto exit;
681         /* Remove from memory callback device list. */
682         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
683         LIST_REMOVE(sh, mem_event_cb);
684         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
685         /* Release created Memory Regions. */
686         mlx5_mr_release(sh);
687         /* Remove context from the global device list. */
688         LIST_REMOVE(sh, next);
689         /*
690          *  Ensure there is no async event handler installed.
691          *  Only primary process handles async device events.
692          **/
693         mlx5_flow_counters_mng_close(sh);
694         MLX5_ASSERT(!sh->intr_cnt);
695         if (sh->intr_cnt)
696                 mlx5_intr_callback_unregister
697                         (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
698 #ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT
699         if (sh->devx_intr_cnt) {
700                 if (sh->intr_handle_devx.fd)
701                         rte_intr_callback_unregister(&sh->intr_handle_devx,
702                                           mlx5_dev_interrupt_handler_devx, sh);
703                 if (sh->devx_comp)
704                         mlx5dv_devx_destroy_cmd_comp(sh->devx_comp);
705         }
706 #endif
707         pthread_mutex_destroy(&sh->intr_mutex);
708         if (sh->pd)
709                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
710         if (sh->tis)
711                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
712         if (sh->td)
713                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
714         if (sh->ctx)
715                 claim_zero(mlx5_glue->close_device(sh->ctx));
716         if (sh->flow_id_pool)
717                 mlx5_flow_id_pool_release(sh->flow_id_pool);
718         rte_free(sh);
719 exit:
720         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
721 }
722
723 /**
724  * Destroy table hash list and all the root entries per domain.
725  *
726  * @param[in] priv
727  *   Pointer to the private device data structure.
728  */
729 static void
730 mlx5_free_table_hash_list(struct mlx5_priv *priv)
731 {
732         struct mlx5_ibv_shared *sh = priv->sh;
733         struct mlx5_flow_tbl_data_entry *tbl_data;
734         union mlx5_flow_tbl_key table_key = {
735                 {
736                         .table_id = 0,
737                         .reserved = 0,
738                         .domain = 0,
739                         .direction = 0,
740                 }
741         };
742         struct mlx5_hlist_entry *pos;
743
744         if (!sh->flow_tbls)
745                 return;
746         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
747         if (pos) {
748                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
749                                         entry);
750                 MLX5_ASSERT(tbl_data);
751                 mlx5_hlist_remove(sh->flow_tbls, pos);
752                 rte_free(tbl_data);
753         }
754         table_key.direction = 1;
755         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
756         if (pos) {
757                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
758                                         entry);
759                 MLX5_ASSERT(tbl_data);
760                 mlx5_hlist_remove(sh->flow_tbls, pos);
761                 rte_free(tbl_data);
762         }
763         table_key.direction = 0;
764         table_key.domain = 1;
765         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
766         if (pos) {
767                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
768                                         entry);
769                 MLX5_ASSERT(tbl_data);
770                 mlx5_hlist_remove(sh->flow_tbls, pos);
771                 rte_free(tbl_data);
772         }
773         mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
774 }
775
776 /**
777  * Initialize flow table hash list and create the root tables entry
778  * for each domain.
779  *
780  * @param[in] priv
781  *   Pointer to the private device data structure.
782  *
783  * @return
784  *   Zero on success, positive error code otherwise.
785  */
786 static int
787 mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
788 {
789         struct mlx5_ibv_shared *sh = priv->sh;
790         char s[MLX5_HLIST_NAMESIZE];
791         int err = 0;
792
793         MLX5_ASSERT(sh);
794         snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
795         sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
796         if (!sh->flow_tbls) {
797                 DRV_LOG(ERR, "flow tables with hash creation failed.\n");
798                 err = ENOMEM;
799                 return err;
800         }
801 #ifndef HAVE_MLX5DV_DR
802         /*
803          * In case we have not DR support, the zero tables should be created
804          * because DV expect to see them even if they cannot be created by
805          * RDMA-CORE.
806          */
807         union mlx5_flow_tbl_key table_key = {
808                 {
809                         .table_id = 0,
810                         .reserved = 0,
811                         .domain = 0,
812                         .direction = 0,
813                 }
814         };
815         struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
816                                                           sizeof(*tbl_data), 0);
817
818         if (!tbl_data) {
819                 err = ENOMEM;
820                 goto error;
821         }
822         tbl_data->entry.key = table_key.v64;
823         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
824         if (err)
825                 goto error;
826         rte_atomic32_init(&tbl_data->tbl.refcnt);
827         rte_atomic32_inc(&tbl_data->tbl.refcnt);
828         table_key.direction = 1;
829         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
830         if (!tbl_data) {
831                 err = ENOMEM;
832                 goto error;
833         }
834         tbl_data->entry.key = table_key.v64;
835         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
836         if (err)
837                 goto error;
838         rte_atomic32_init(&tbl_data->tbl.refcnt);
839         rte_atomic32_inc(&tbl_data->tbl.refcnt);
840         table_key.direction = 0;
841         table_key.domain = 1;
842         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
843         if (!tbl_data) {
844                 err = ENOMEM;
845                 goto error;
846         }
847         tbl_data->entry.key = table_key.v64;
848         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
849         if (err)
850                 goto error;
851         rte_atomic32_init(&tbl_data->tbl.refcnt);
852         rte_atomic32_inc(&tbl_data->tbl.refcnt);
853         return err;
854 error:
855         mlx5_free_table_hash_list(priv);
856 #endif /* HAVE_MLX5DV_DR */
857         return err;
858 }
859
860 /**
861  * Initialize DR related data within private structure.
862  * Routine checks the reference counter and does actual
863  * resources creation/initialization only if counter is zero.
864  *
865  * @param[in] priv
866  *   Pointer to the private device data structure.
867  *
868  * @return
869  *   Zero on success, positive error code otherwise.
870  */
871 static int
872 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
873 {
874         struct mlx5_ibv_shared *sh = priv->sh;
875         char s[MLX5_HLIST_NAMESIZE];
876         int err = 0;
877
878         if (!sh->flow_tbls)
879                 err = mlx5_alloc_table_hash_list(priv);
880         else
881                 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
882                         (void *)sh->flow_tbls);
883         if (err)
884                 return err;
885         /* Create tags hash list table. */
886         snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
887         sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
888         if (!sh->tag_table) {
889                 DRV_LOG(ERR, "tags with hash creation failed.\n");
890                 err = ENOMEM;
891                 goto error;
892         }
893 #ifdef HAVE_MLX5DV_DR
894         void *domain;
895
896         if (sh->dv_refcnt) {
897                 /* Shared DV/DR structures is already initialized. */
898                 sh->dv_refcnt++;
899                 priv->dr_shared = 1;
900                 return 0;
901         }
902         /* Reference counter is zero, we should initialize structures. */
903         domain = mlx5_glue->dr_create_domain(sh->ctx,
904                                              MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
905         if (!domain) {
906                 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
907                 err = errno;
908                 goto error;
909         }
910         sh->rx_domain = domain;
911         domain = mlx5_glue->dr_create_domain(sh->ctx,
912                                              MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
913         if (!domain) {
914                 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
915                 err = errno;
916                 goto error;
917         }
918         pthread_mutex_init(&sh->dv_mutex, NULL);
919         sh->tx_domain = domain;
920 #ifdef HAVE_MLX5DV_DR_ESWITCH
921         if (priv->config.dv_esw_en) {
922                 domain  = mlx5_glue->dr_create_domain
923                         (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
924                 if (!domain) {
925                         DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
926                         err = errno;
927                         goto error;
928                 }
929                 sh->fdb_domain = domain;
930                 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
931         }
932 #endif
933         sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
934 #endif /* HAVE_MLX5DV_DR */
935         sh->dv_refcnt++;
936         priv->dr_shared = 1;
937         return 0;
938 error:
939         /* Rollback the created objects. */
940         if (sh->rx_domain) {
941                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
942                 sh->rx_domain = NULL;
943         }
944         if (sh->tx_domain) {
945                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
946                 sh->tx_domain = NULL;
947         }
948         if (sh->fdb_domain) {
949                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
950                 sh->fdb_domain = NULL;
951         }
952         if (sh->esw_drop_action) {
953                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
954                 sh->esw_drop_action = NULL;
955         }
956         if (sh->pop_vlan_action) {
957                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
958                 sh->pop_vlan_action = NULL;
959         }
960         if (sh->tag_table) {
961                 /* tags should be destroyed with flow before. */
962                 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
963                 sh->tag_table = NULL;
964         }
965         mlx5_free_table_hash_list(priv);
966         return err;
967 }
968
969 /**
970  * Destroy DR related data within private structure.
971  *
972  * @param[in] priv
973  *   Pointer to the private device data structure.
974  */
975 static void
976 mlx5_free_shared_dr(struct mlx5_priv *priv)
977 {
978         struct mlx5_ibv_shared *sh;
979
980         if (!priv->dr_shared)
981                 return;
982         priv->dr_shared = 0;
983         sh = priv->sh;
984         MLX5_ASSERT(sh);
985 #ifdef HAVE_MLX5DV_DR
986         MLX5_ASSERT(sh->dv_refcnt);
987         if (sh->dv_refcnt && --sh->dv_refcnt)
988                 return;
989         if (sh->rx_domain) {
990                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
991                 sh->rx_domain = NULL;
992         }
993         if (sh->tx_domain) {
994                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
995                 sh->tx_domain = NULL;
996         }
997 #ifdef HAVE_MLX5DV_DR_ESWITCH
998         if (sh->fdb_domain) {
999                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
1000                 sh->fdb_domain = NULL;
1001         }
1002         if (sh->esw_drop_action) {
1003                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
1004                 sh->esw_drop_action = NULL;
1005         }
1006 #endif
1007         if (sh->pop_vlan_action) {
1008                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
1009                 sh->pop_vlan_action = NULL;
1010         }
1011         pthread_mutex_destroy(&sh->dv_mutex);
1012 #endif /* HAVE_MLX5DV_DR */
1013         if (sh->tag_table) {
1014                 /* tags should be destroyed with flow before. */
1015                 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
1016                 sh->tag_table = NULL;
1017         }
1018         mlx5_free_table_hash_list(priv);
1019 }
1020
1021 /**
1022  * Initialize shared data between primary and secondary process.
1023  *
1024  * A memzone is reserved by primary process and secondary processes attach to
1025  * the memzone.
1026  *
1027  * @return
1028  *   0 on success, a negative errno value otherwise and rte_errno is set.
1029  */
1030 static int
1031 mlx5_init_shared_data(void)
1032 {
1033         const struct rte_memzone *mz;
1034         int ret = 0;
1035
1036         rte_spinlock_lock(&mlx5_shared_data_lock);
1037         if (mlx5_shared_data == NULL) {
1038                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1039                         /* Allocate shared memory. */
1040                         mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
1041                                                  sizeof(*mlx5_shared_data),
1042                                                  SOCKET_ID_ANY, 0);
1043                         if (mz == NULL) {
1044                                 DRV_LOG(ERR,
1045                                         "Cannot allocate mlx5 shared data");
1046                                 ret = -rte_errno;
1047                                 goto error;
1048                         }
1049                         mlx5_shared_data = mz->addr;
1050                         memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
1051                         rte_spinlock_init(&mlx5_shared_data->lock);
1052                 } else {
1053                         /* Lookup allocated shared memory. */
1054                         mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
1055                         if (mz == NULL) {
1056                                 DRV_LOG(ERR,
1057                                         "Cannot attach mlx5 shared data");
1058                                 ret = -rte_errno;
1059                                 goto error;
1060                         }
1061                         mlx5_shared_data = mz->addr;
1062                         memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
1063                 }
1064         }
1065 error:
1066         rte_spinlock_unlock(&mlx5_shared_data_lock);
1067         return ret;
1068 }
1069
1070 /**
1071  * Retrieve integer value from environment variable.
1072  *
1073  * @param[in] name
1074  *   Environment variable name.
1075  *
1076  * @return
1077  *   Integer value, 0 if the variable is not set.
1078  */
1079 int
1080 mlx5_getenv_int(const char *name)
1081 {
1082         const char *val = getenv(name);
1083
1084         if (val == NULL)
1085                 return 0;
1086         return atoi(val);
1087 }
1088
1089 /**
1090  * Verbs callback to allocate a memory. This function should allocate the space
1091  * according to the size provided residing inside a huge page.
1092  * Please note that all allocation must respect the alignment from libmlx5
1093  * (i.e. currently sysconf(_SC_PAGESIZE)).
1094  *
1095  * @param[in] size
1096  *   The size in bytes of the memory to allocate.
1097  * @param[in] data
1098  *   A pointer to the callback data.
1099  *
1100  * @return
1101  *   Allocated buffer, NULL otherwise and rte_errno is set.
1102  */
1103 static void *
1104 mlx5_alloc_verbs_buf(size_t size, void *data)
1105 {
1106         struct mlx5_priv *priv = data;
1107         void *ret;
1108         size_t alignment = sysconf(_SC_PAGESIZE);
1109         unsigned int socket = SOCKET_ID_ANY;
1110
1111         if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
1112                 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
1113
1114                 socket = ctrl->socket;
1115         } else if (priv->verbs_alloc_ctx.type ==
1116                    MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
1117                 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
1118
1119                 socket = ctrl->socket;
1120         }
1121         MLX5_ASSERT(data != NULL);
1122         ret = rte_malloc_socket(__func__, size, alignment, socket);
1123         if (!ret && size)
1124                 rte_errno = ENOMEM;
1125         return ret;
1126 }
1127
1128 /**
1129  * Verbs callback to free a memory.
1130  *
1131  * @param[in] ptr
1132  *   A pointer to the memory to free.
1133  * @param[in] data
1134  *   A pointer to the callback data.
1135  */
1136 static void
1137 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
1138 {
1139         MLX5_ASSERT(data != NULL);
1140         rte_free(ptr);
1141 }
1142
1143 /**
1144  * DPDK callback to add udp tunnel port
1145  *
1146  * @param[in] dev
1147  *   A pointer to eth_dev
1148  * @param[in] udp_tunnel
1149  *   A pointer to udp tunnel
1150  *
1151  * @return
1152  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
1153  */
1154 int
1155 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
1156                          struct rte_eth_udp_tunnel *udp_tunnel)
1157 {
1158         MLX5_ASSERT(udp_tunnel != NULL);
1159         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
1160             udp_tunnel->udp_port == 4789)
1161                 return 0;
1162         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
1163             udp_tunnel->udp_port == 4790)
1164                 return 0;
1165         return -ENOTSUP;
1166 }
1167
1168 /**
1169  * Initialize process private data structure.
1170  *
1171  * @param dev
1172  *   Pointer to Ethernet device structure.
1173  *
1174  * @return
1175  *   0 on success, a negative errno value otherwise and rte_errno is set.
1176  */
1177 int
1178 mlx5_proc_priv_init(struct rte_eth_dev *dev)
1179 {
1180         struct mlx5_priv *priv = dev->data->dev_private;
1181         struct mlx5_proc_priv *ppriv;
1182         size_t ppriv_size;
1183
1184         /*
1185          * UAR register table follows the process private structure. BlueFlame
1186          * registers for Tx queues are stored in the table.
1187          */
1188         ppriv_size =
1189                 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
1190         ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
1191                                   RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1192         if (!ppriv) {
1193                 rte_errno = ENOMEM;
1194                 return -rte_errno;
1195         }
1196         ppriv->uar_table_sz = ppriv_size;
1197         dev->process_private = ppriv;
1198         return 0;
1199 }
1200
1201 /**
1202  * Un-initialize process private data structure.
1203  *
1204  * @param dev
1205  *   Pointer to Ethernet device structure.
1206  */
1207 static void
1208 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
1209 {
1210         if (!dev->process_private)
1211                 return;
1212         rte_free(dev->process_private);
1213         dev->process_private = NULL;
1214 }
1215
1216 /**
1217  * DPDK callback to close the device.
1218  *
1219  * Destroy all queues and objects, free memory.
1220  *
1221  * @param dev
1222  *   Pointer to Ethernet device structure.
1223  */
1224 static void
1225 mlx5_dev_close(struct rte_eth_dev *dev)
1226 {
1227         struct mlx5_priv *priv = dev->data->dev_private;
1228         unsigned int i;
1229         int ret;
1230
1231         DRV_LOG(DEBUG, "port %u closing device \"%s\"",
1232                 dev->data->port_id,
1233                 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
1234         /* In case mlx5_dev_stop() has not been called. */
1235         mlx5_dev_interrupt_handler_uninstall(dev);
1236         mlx5_dev_interrupt_handler_devx_uninstall(dev);
1237         /*
1238          * If default mreg copy action is removed at the stop stage,
1239          * the search will return none and nothing will be done anymore.
1240          */
1241         mlx5_flow_stop_default(dev);
1242         mlx5_traffic_disable(dev);
1243         /*
1244          * If all the flows are already flushed in the device stop stage,
1245          * then this will return directly without any action.
1246          */
1247         mlx5_flow_list_flush(dev, &priv->flows, true);
1248         mlx5_flow_meter_flush(dev, NULL);
1249         /* Prevent crashes when queues are still in use. */
1250         dev->rx_pkt_burst = removed_rx_burst;
1251         dev->tx_pkt_burst = removed_tx_burst;
1252         rte_wmb();
1253         /* Disable datapath on secondary process. */
1254         mlx5_mp_req_stop_rxtx(dev);
1255         if (priv->rxqs != NULL) {
1256                 /* XXX race condition if mlx5_rx_burst() is still running. */
1257                 usleep(1000);
1258                 for (i = 0; (i != priv->rxqs_n); ++i)
1259                         mlx5_rxq_release(dev, i);
1260                 priv->rxqs_n = 0;
1261                 priv->rxqs = NULL;
1262         }
1263         if (priv->txqs != NULL) {
1264                 /* XXX race condition if mlx5_tx_burst() is still running. */
1265                 usleep(1000);
1266                 for (i = 0; (i != priv->txqs_n); ++i)
1267                         mlx5_txq_release(dev, i);
1268                 priv->txqs_n = 0;
1269                 priv->txqs = NULL;
1270         }
1271         mlx5_proc_priv_uninit(dev);
1272         if (priv->mreg_cp_tbl)
1273                 mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
1274         mlx5_mprq_free_mp(dev);
1275         mlx5_free_shared_dr(priv);
1276         if (priv->rss_conf.rss_key != NULL)
1277                 rte_free(priv->rss_conf.rss_key);
1278         if (priv->reta_idx != NULL)
1279                 rte_free(priv->reta_idx);
1280         if (priv->config.vf)
1281                 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
1282                                        dev->data->mac_addrs,
1283                                        MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
1284         if (priv->nl_socket_route >= 0)
1285                 close(priv->nl_socket_route);
1286         if (priv->nl_socket_rdma >= 0)
1287                 close(priv->nl_socket_rdma);
1288         if (priv->vmwa_context)
1289                 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1290         if (priv->sh) {
1291                 /*
1292                  * Free the shared context in last turn, because the cleanup
1293                  * routines above may use some shared fields, like
1294                  * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
1295                  * ifindex if Netlink fails.
1296                  */
1297                 mlx5_free_shared_ibctx(priv->sh);
1298                 priv->sh = NULL;
1299         }
1300         ret = mlx5_hrxq_verify(dev);
1301         if (ret)
1302                 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
1303                         dev->data->port_id);
1304         ret = mlx5_ind_table_obj_verify(dev);
1305         if (ret)
1306                 DRV_LOG(WARNING, "port %u some indirection table still remain",
1307                         dev->data->port_id);
1308         ret = mlx5_rxq_obj_verify(dev);
1309         if (ret)
1310                 DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
1311                         dev->data->port_id);
1312         ret = mlx5_rxq_verify(dev);
1313         if (ret)
1314                 DRV_LOG(WARNING, "port %u some Rx queues still remain",
1315                         dev->data->port_id);
1316         ret = mlx5_txq_obj_verify(dev);
1317         if (ret)
1318                 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
1319                         dev->data->port_id);
1320         ret = mlx5_txq_verify(dev);
1321         if (ret)
1322                 DRV_LOG(WARNING, "port %u some Tx queues still remain",
1323                         dev->data->port_id);
1324         ret = mlx5_flow_verify(dev);
1325         if (ret)
1326                 DRV_LOG(WARNING, "port %u some flows still remain",
1327                         dev->data->port_id);
1328         if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1329                 unsigned int c = 0;
1330                 uint16_t port_id;
1331
1332                 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1333                         struct mlx5_priv *opriv =
1334                                 rte_eth_devices[port_id].data->dev_private;
1335
1336                         if (!opriv ||
1337                             opriv->domain_id != priv->domain_id ||
1338                             &rte_eth_devices[port_id] == dev)
1339                                 continue;
1340                         ++c;
1341                         break;
1342                 }
1343                 if (!c)
1344                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1345         }
1346         memset(priv, 0, sizeof(*priv));
1347         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1348         /*
1349          * Reset mac_addrs to NULL such that it is not freed as part of
1350          * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
1351          * it is freed when dev_private is freed.
1352          */
1353         dev->data->mac_addrs = NULL;
1354 }
1355
1356 const struct eth_dev_ops mlx5_dev_ops = {
1357         .dev_configure = mlx5_dev_configure,
1358         .dev_start = mlx5_dev_start,
1359         .dev_stop = mlx5_dev_stop,
1360         .dev_set_link_down = mlx5_set_link_down,
1361         .dev_set_link_up = mlx5_set_link_up,
1362         .dev_close = mlx5_dev_close,
1363         .promiscuous_enable = mlx5_promiscuous_enable,
1364         .promiscuous_disable = mlx5_promiscuous_disable,
1365         .allmulticast_enable = mlx5_allmulticast_enable,
1366         .allmulticast_disable = mlx5_allmulticast_disable,
1367         .link_update = mlx5_link_update,
1368         .stats_get = mlx5_stats_get,
1369         .stats_reset = mlx5_stats_reset,
1370         .xstats_get = mlx5_xstats_get,
1371         .xstats_reset = mlx5_xstats_reset,
1372         .xstats_get_names = mlx5_xstats_get_names,
1373         .fw_version_get = mlx5_fw_version_get,
1374         .dev_infos_get = mlx5_dev_infos_get,
1375         .read_clock = mlx5_read_clock,
1376         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1377         .vlan_filter_set = mlx5_vlan_filter_set,
1378         .rx_queue_setup = mlx5_rx_queue_setup,
1379         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1380         .tx_queue_setup = mlx5_tx_queue_setup,
1381         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1382         .rx_queue_release = mlx5_rx_queue_release,
1383         .tx_queue_release = mlx5_tx_queue_release,
1384         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1385         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1386         .mac_addr_remove = mlx5_mac_addr_remove,
1387         .mac_addr_add = mlx5_mac_addr_add,
1388         .mac_addr_set = mlx5_mac_addr_set,
1389         .set_mc_addr_list = mlx5_set_mc_addr_list,
1390         .mtu_set = mlx5_dev_set_mtu,
1391         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1392         .vlan_offload_set = mlx5_vlan_offload_set,
1393         .reta_update = mlx5_dev_rss_reta_update,
1394         .reta_query = mlx5_dev_rss_reta_query,
1395         .rss_hash_update = mlx5_rss_hash_update,
1396         .rss_hash_conf_get = mlx5_rss_hash_conf_get,
1397         .filter_ctrl = mlx5_dev_filter_ctrl,
1398         .rx_descriptor_status = mlx5_rx_descriptor_status,
1399         .tx_descriptor_status = mlx5_tx_descriptor_status,
1400         .rxq_info_get = mlx5_rxq_info_get,
1401         .txq_info_get = mlx5_txq_info_get,
1402         .rx_burst_mode_get = mlx5_rx_burst_mode_get,
1403         .tx_burst_mode_get = mlx5_tx_burst_mode_get,
1404         .rx_queue_count = mlx5_rx_queue_count,
1405         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1406         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1407         .is_removed = mlx5_is_removed,
1408         .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
1409         .get_module_info = mlx5_get_module_info,
1410         .get_module_eeprom = mlx5_get_module_eeprom,
1411         .hairpin_cap_get = mlx5_hairpin_cap_get,
1412         .mtr_ops_get = mlx5_flow_meter_ops_get,
1413 };
1414
1415 /* Available operations from secondary process. */
1416 static const struct eth_dev_ops mlx5_dev_sec_ops = {
1417         .stats_get = mlx5_stats_get,
1418         .stats_reset = mlx5_stats_reset,
1419         .xstats_get = mlx5_xstats_get,
1420         .xstats_reset = mlx5_xstats_reset,
1421         .xstats_get_names = mlx5_xstats_get_names,
1422         .fw_version_get = mlx5_fw_version_get,
1423         .dev_infos_get = mlx5_dev_infos_get,
1424         .rx_descriptor_status = mlx5_rx_descriptor_status,
1425         .tx_descriptor_status = mlx5_tx_descriptor_status,
1426         .rxq_info_get = mlx5_rxq_info_get,
1427         .txq_info_get = mlx5_txq_info_get,
1428         .rx_burst_mode_get = mlx5_rx_burst_mode_get,
1429         .tx_burst_mode_get = mlx5_tx_burst_mode_get,
1430         .get_module_info = mlx5_get_module_info,
1431         .get_module_eeprom = mlx5_get_module_eeprom,
1432 };
1433
1434 /* Available operations in flow isolated mode. */
1435 const struct eth_dev_ops mlx5_dev_ops_isolate = {
1436         .dev_configure = mlx5_dev_configure,
1437         .dev_start = mlx5_dev_start,
1438         .dev_stop = mlx5_dev_stop,
1439         .dev_set_link_down = mlx5_set_link_down,
1440         .dev_set_link_up = mlx5_set_link_up,
1441         .dev_close = mlx5_dev_close,
1442         .promiscuous_enable = mlx5_promiscuous_enable,
1443         .promiscuous_disable = mlx5_promiscuous_disable,
1444         .allmulticast_enable = mlx5_allmulticast_enable,
1445         .allmulticast_disable = mlx5_allmulticast_disable,
1446         .link_update = mlx5_link_update,
1447         .stats_get = mlx5_stats_get,
1448         .stats_reset = mlx5_stats_reset,
1449         .xstats_get = mlx5_xstats_get,
1450         .xstats_reset = mlx5_xstats_reset,
1451         .xstats_get_names = mlx5_xstats_get_names,
1452         .fw_version_get = mlx5_fw_version_get,
1453         .dev_infos_get = mlx5_dev_infos_get,
1454         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1455         .vlan_filter_set = mlx5_vlan_filter_set,
1456         .rx_queue_setup = mlx5_rx_queue_setup,
1457         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1458         .tx_queue_setup = mlx5_tx_queue_setup,
1459         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1460         .rx_queue_release = mlx5_rx_queue_release,
1461         .tx_queue_release = mlx5_tx_queue_release,
1462         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1463         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1464         .mac_addr_remove = mlx5_mac_addr_remove,
1465         .mac_addr_add = mlx5_mac_addr_add,
1466         .mac_addr_set = mlx5_mac_addr_set,
1467         .set_mc_addr_list = mlx5_set_mc_addr_list,
1468         .mtu_set = mlx5_dev_set_mtu,
1469         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1470         .vlan_offload_set = mlx5_vlan_offload_set,
1471         .filter_ctrl = mlx5_dev_filter_ctrl,
1472         .rx_descriptor_status = mlx5_rx_descriptor_status,
1473         .tx_descriptor_status = mlx5_tx_descriptor_status,
1474         .rxq_info_get = mlx5_rxq_info_get,
1475         .txq_info_get = mlx5_txq_info_get,
1476         .rx_burst_mode_get = mlx5_rx_burst_mode_get,
1477         .tx_burst_mode_get = mlx5_tx_burst_mode_get,
1478         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1479         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1480         .is_removed = mlx5_is_removed,
1481         .get_module_info = mlx5_get_module_info,
1482         .get_module_eeprom = mlx5_get_module_eeprom,
1483         .hairpin_cap_get = mlx5_hairpin_cap_get,
1484         .mtr_ops_get = mlx5_flow_meter_ops_get,
1485 };
1486
1487 /**
1488  * Verify and store value for device argument.
1489  *
1490  * @param[in] key
1491  *   Key argument to verify.
1492  * @param[in] val
1493  *   Value associated with key.
1494  * @param opaque
1495  *   User data.
1496  *
1497  * @return
1498  *   0 on success, a negative errno value otherwise and rte_errno is set.
1499  */
1500 static int
1501 mlx5_args_check(const char *key, const char *val, void *opaque)
1502 {
1503         struct mlx5_dev_config *config = opaque;
1504         unsigned long tmp;
1505
1506         /* No-op, port representors are processed in mlx5_dev_spawn(). */
1507         if (!strcmp(MLX5_REPRESENTOR, key))
1508                 return 0;
1509         errno = 0;
1510         tmp = strtoul(val, NULL, 0);
1511         if (errno) {
1512                 rte_errno = errno;
1513                 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1514                 return -rte_errno;
1515         }
1516         if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
1517                 config->cqe_comp = !!tmp;
1518         } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
1519                 config->cqe_pad = !!tmp;
1520         } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
1521                 config->hw_padding = !!tmp;
1522         } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
1523                 config->mprq.enabled = !!tmp;
1524         } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
1525                 config->mprq.stride_num_n = tmp;
1526         } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
1527                 config->mprq.max_memcpy_len = tmp;
1528         } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
1529                 config->mprq.min_rxqs_num = tmp;
1530         } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
1531                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1532                                  " converted to txq_inline_max", key);
1533                 config->txq_inline_max = tmp;
1534         } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
1535                 config->txq_inline_max = tmp;
1536         } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
1537                 config->txq_inline_min = tmp;
1538         } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
1539                 config->txq_inline_mpw = tmp;
1540         } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
1541                 config->txqs_inline = tmp;
1542         } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
1543                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1544         } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
1545                 config->mps = !!tmp;
1546         } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
1547                 if (tmp != MLX5_TXDB_CACHED &&
1548                     tmp != MLX5_TXDB_NCACHED &&
1549                     tmp != MLX5_TXDB_HEURISTIC) {
1550                         DRV_LOG(ERR, "invalid Tx doorbell "
1551                                      "mapping parameter");
1552                         rte_errno = EINVAL;
1553                         return -rte_errno;
1554                 }
1555                 config->dbnc = tmp;
1556         } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
1557                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1558         } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
1559                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1560                                  " converted to txq_inline_mpw", key);
1561                 config->txq_inline_mpw = tmp;
1562         } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
1563                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1564         } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
1565                 config->rx_vec_en = !!tmp;
1566         } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1567                 config->l3_vxlan_en = !!tmp;
1568         } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1569                 config->vf_nl_en = !!tmp;
1570         } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1571                 config->dv_esw_en = !!tmp;
1572         } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1573                 config->dv_flow_en = !!tmp;
1574         } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
1575                 if (tmp != MLX5_XMETA_MODE_LEGACY &&
1576                     tmp != MLX5_XMETA_MODE_META16 &&
1577                     tmp != MLX5_XMETA_MODE_META32) {
1578                         DRV_LOG(ERR, "invalid extensive "
1579                                      "metadata parameter");
1580                         rte_errno = EINVAL;
1581                         return -rte_errno;
1582                 }
1583                 config->dv_xmeta_en = tmp;
1584         } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
1585                 config->mr_ext_memseg_en = !!tmp;
1586         } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
1587                 config->max_dump_files_num = tmp;
1588         } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
1589                 config->lro.timeout = tmp;
1590         } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) {
1591                 DRV_LOG(DEBUG, "class argument is %s.", val);
1592         } else {
1593                 DRV_LOG(WARNING, "%s: unknown parameter", key);
1594                 rte_errno = EINVAL;
1595                 return -rte_errno;
1596         }
1597         return 0;
1598 }
1599
1600 /**
1601  * Parse device parameters.
1602  *
1603  * @param config
1604  *   Pointer to device configuration structure.
1605  * @param devargs
1606  *   Device arguments structure.
1607  *
1608  * @return
1609  *   0 on success, a negative errno value otherwise and rte_errno is set.
1610  */
1611 static int
1612 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
1613 {
1614         const char **params = (const char *[]){
1615                 MLX5_RXQ_CQE_COMP_EN,
1616                 MLX5_RXQ_CQE_PAD_EN,
1617                 MLX5_RXQ_PKT_PAD_EN,
1618                 MLX5_RX_MPRQ_EN,
1619                 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
1620                 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
1621                 MLX5_RXQS_MIN_MPRQ,
1622                 MLX5_TXQ_INLINE,
1623                 MLX5_TXQ_INLINE_MIN,
1624                 MLX5_TXQ_INLINE_MAX,
1625                 MLX5_TXQ_INLINE_MPW,
1626                 MLX5_TXQS_MIN_INLINE,
1627                 MLX5_TXQS_MAX_VEC,
1628                 MLX5_TXQ_MPW_EN,
1629                 MLX5_TXQ_MPW_HDR_DSEG_EN,
1630                 MLX5_TXQ_MAX_INLINE_LEN,
1631                 MLX5_TX_DB_NC,
1632                 MLX5_TX_VEC_EN,
1633                 MLX5_RX_VEC_EN,
1634                 MLX5_L3_VXLAN_EN,
1635                 MLX5_VF_NL_EN,
1636                 MLX5_DV_ESW_EN,
1637                 MLX5_DV_FLOW_EN,
1638                 MLX5_DV_XMETA_EN,
1639                 MLX5_MR_EXT_MEMSEG_EN,
1640                 MLX5_REPRESENTOR,
1641                 MLX5_MAX_DUMP_FILES_NUM,
1642                 MLX5_LRO_TIMEOUT_USEC,
1643                 MLX5_CLASS_ARG_NAME,
1644                 NULL,
1645         };
1646         struct rte_kvargs *kvlist;
1647         int ret = 0;
1648         int i;
1649
1650         if (devargs == NULL)
1651                 return 0;
1652         /* Following UGLY cast is done to pass checkpatch. */
1653         kvlist = rte_kvargs_parse(devargs->args, params);
1654         if (kvlist == NULL) {
1655                 rte_errno = EINVAL;
1656                 return -rte_errno;
1657         }
1658         /* Process parameters. */
1659         for (i = 0; (params[i] != NULL); ++i) {
1660                 if (rte_kvargs_count(kvlist, params[i])) {
1661                         ret = rte_kvargs_process(kvlist, params[i],
1662                                                  mlx5_args_check, config);
1663                         if (ret) {
1664                                 rte_errno = EINVAL;
1665                                 rte_kvargs_free(kvlist);
1666                                 return -rte_errno;
1667                         }
1668                 }
1669         }
1670         rte_kvargs_free(kvlist);
1671         return 0;
1672 }
1673
1674 static struct rte_pci_driver mlx5_driver;
1675
1676 /**
1677  * PMD global initialization.
1678  *
1679  * Independent from individual device, this function initializes global
1680  * per-PMD data structures distinguishing primary and secondary processes.
1681  * Hence, each initialization is called once per a process.
1682  *
1683  * @return
1684  *   0 on success, a negative errno value otherwise and rte_errno is set.
1685  */
1686 static int
1687 mlx5_init_once(void)
1688 {
1689         struct mlx5_shared_data *sd;
1690         struct mlx5_local_data *ld = &mlx5_local_data;
1691         int ret = 0;
1692
1693         if (mlx5_init_shared_data())
1694                 return -rte_errno;
1695         sd = mlx5_shared_data;
1696         MLX5_ASSERT(sd);
1697         rte_spinlock_lock(&sd->lock);
1698         switch (rte_eal_process_type()) {
1699         case RTE_PROC_PRIMARY:
1700                 if (sd->init_done)
1701                         break;
1702                 LIST_INIT(&sd->mem_event_cb_list);
1703                 rte_rwlock_init(&sd->mem_event_rwlock);
1704                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1705                                                 mlx5_mr_mem_event_cb, NULL);
1706                 ret = mlx5_mp_init_primary();
1707                 if (ret)
1708                         goto out;
1709                 sd->init_done = true;
1710                 break;
1711         case RTE_PROC_SECONDARY:
1712                 if (ld->init_done)
1713                         break;
1714                 ret = mlx5_mp_init_secondary();
1715                 if (ret)
1716                         goto out;
1717                 ++sd->secondary_cnt;
1718                 ld->init_done = true;
1719                 break;
1720         default:
1721                 break;
1722         }
1723 out:
1724         rte_spinlock_unlock(&sd->lock);
1725         return ret;
1726 }
1727
1728 /**
1729  * Configures the minimal amount of data to inline into WQE
1730  * while sending packets.
1731  *
1732  * - the txq_inline_min has the maximal priority, if this
1733  *   key is specified in devargs
1734  * - if DevX is enabled the inline mode is queried from the
1735  *   device (HCA attributes and NIC vport context if needed).
1736  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
1737  *   and none (0 bytes) for other NICs
1738  *
1739  * @param spawn
1740  *   Verbs device parameters (name, port, switch_info) to spawn.
1741  * @param config
1742  *   Device configuration parameters.
1743  */
1744 static void
1745 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1746                     struct mlx5_dev_config *config)
1747 {
1748         if (config->txq_inline_min != MLX5_ARG_UNSET) {
1749                 /* Application defines size of inlined data explicitly. */
1750                 switch (spawn->pci_dev->id.device_id) {
1751                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1752                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1753                         if (config->txq_inline_min <
1754                                        (int)MLX5_INLINE_HSIZE_L2) {
1755                                 DRV_LOG(DEBUG,
1756                                         "txq_inline_mix aligned to minimal"
1757                                         " ConnectX-4 required value %d",
1758                                         (int)MLX5_INLINE_HSIZE_L2);
1759                                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1760                         }
1761                         break;
1762                 }
1763                 goto exit;
1764         }
1765         if (config->hca_attr.eth_net_offloads) {
1766                 /* We have DevX enabled, inline mode queried successfully. */
1767                 switch (config->hca_attr.wqe_inline_mode) {
1768                 case MLX5_CAP_INLINE_MODE_L2:
1769                         /* outer L2 header must be inlined. */
1770                         config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1771                         goto exit;
1772                 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1773                         /* No inline data are required by NIC. */
1774                         config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1775                         config->hw_vlan_insert =
1776                                 config->hca_attr.wqe_vlan_insert;
1777                         DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
1778                         goto exit;
1779                 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1780                         /* inline mode is defined by NIC vport context. */
1781                         if (!config->hca_attr.eth_virt)
1782                                 break;
1783                         switch (config->hca_attr.vport_inline_mode) {
1784                         case MLX5_INLINE_MODE_NONE:
1785                                 config->txq_inline_min =
1786                                         MLX5_INLINE_HSIZE_NONE;
1787                                 goto exit;
1788                         case MLX5_INLINE_MODE_L2:
1789                                 config->txq_inline_min =
1790                                         MLX5_INLINE_HSIZE_L2;
1791                                 goto exit;
1792                         case MLX5_INLINE_MODE_IP:
1793                                 config->txq_inline_min =
1794                                         MLX5_INLINE_HSIZE_L3;
1795                                 goto exit;
1796                         case MLX5_INLINE_MODE_TCP_UDP:
1797                                 config->txq_inline_min =
1798                                         MLX5_INLINE_HSIZE_L4;
1799                                 goto exit;
1800                         case MLX5_INLINE_MODE_INNER_L2:
1801                                 config->txq_inline_min =
1802                                         MLX5_INLINE_HSIZE_INNER_L2;
1803                                 goto exit;
1804                         case MLX5_INLINE_MODE_INNER_IP:
1805                                 config->txq_inline_min =
1806                                         MLX5_INLINE_HSIZE_INNER_L3;
1807                                 goto exit;
1808                         case MLX5_INLINE_MODE_INNER_TCP_UDP:
1809                                 config->txq_inline_min =
1810                                         MLX5_INLINE_HSIZE_INNER_L4;
1811                                 goto exit;
1812                         }
1813                 }
1814         }
1815         /*
1816          * We get here if we are unable to deduce
1817          * inline data size with DevX. Try PCI ID
1818          * to determine old NICs.
1819          */
1820         switch (spawn->pci_dev->id.device_id) {
1821         case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1822         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1823         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1824         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1825                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1826                 config->hw_vlan_insert = 0;
1827                 break;
1828         case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
1829         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1830         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
1831         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1832                 /*
1833                  * These NICs support VLAN insertion from WQE and
1834                  * report the wqe_vlan_insert flag. But there is the bug
1835                  * and PFC control may be broken, so disable feature.
1836                  */
1837                 config->hw_vlan_insert = 0;
1838                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1839                 break;
1840         default:
1841                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1842                 break;
1843         }
1844 exit:
1845         DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
1846 }
1847
1848 /**
1849  * Configures the metadata mask fields in the shared context.
1850  *
1851  * @param [in] dev
1852  *   Pointer to Ethernet device.
1853  */
1854 static void
1855 mlx5_set_metadata_mask(struct rte_eth_dev *dev)
1856 {
1857         struct mlx5_priv *priv = dev->data->dev_private;
1858         struct mlx5_ibv_shared *sh = priv->sh;
1859         uint32_t meta, mark, reg_c0;
1860
1861         reg_c0 = ~priv->vport_meta_mask;
1862         switch (priv->config.dv_xmeta_en) {
1863         case MLX5_XMETA_MODE_LEGACY:
1864                 meta = UINT32_MAX;
1865                 mark = MLX5_FLOW_MARK_MASK;
1866                 break;
1867         case MLX5_XMETA_MODE_META16:
1868                 meta = reg_c0 >> rte_bsf32(reg_c0);
1869                 mark = MLX5_FLOW_MARK_MASK;
1870                 break;
1871         case MLX5_XMETA_MODE_META32:
1872                 meta = UINT32_MAX;
1873                 mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
1874                 break;
1875         default:
1876                 meta = 0;
1877                 mark = 0;
1878                 MLX5_ASSERT(false);
1879                 break;
1880         }
1881         if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
1882                 DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
1883                                  sh->dv_mark_mask, mark);
1884         else
1885                 sh->dv_mark_mask = mark;
1886         if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
1887                 DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
1888                                  sh->dv_meta_mask, meta);
1889         else
1890                 sh->dv_meta_mask = meta;
1891         if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
1892                 DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
1893                                  sh->dv_meta_mask, reg_c0);
1894         else
1895                 sh->dv_regc0_mask = reg_c0;
1896         DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en);
1897         DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
1898         DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
1899         DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
1900 }
1901
1902 /**
1903  * Allocate page of door-bells and register it using DevX API.
1904  *
1905  * @param [in] dev
1906  *   Pointer to Ethernet device.
1907  *
1908  * @return
1909  *   Pointer to new page on success, NULL otherwise.
1910  */
1911 static struct mlx5_devx_dbr_page *
1912 mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
1913 {
1914         struct mlx5_priv *priv = dev->data->dev_private;
1915         struct mlx5_devx_dbr_page *page;
1916
1917         /* Allocate space for door-bell page and management data. */
1918         page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
1919                                  RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1920         if (!page) {
1921                 DRV_LOG(ERR, "port %u cannot allocate dbr page",
1922                         dev->data->port_id);
1923                 return NULL;
1924         }
1925         /* Register allocated memory. */
1926         page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
1927                                               MLX5_DBR_PAGE_SIZE, 0);
1928         if (!page->umem) {
1929                 DRV_LOG(ERR, "port %u cannot umem reg dbr page",
1930                         dev->data->port_id);
1931                 rte_free(page);
1932                 return NULL;
1933         }
1934         return page;
1935 }
1936
1937 /**
1938  * Find the next available door-bell, allocate new page if needed.
1939  *
1940  * @param [in] dev
1941  *   Pointer to Ethernet device.
1942  * @param [out] dbr_page
1943  *   Door-bell page containing the page data.
1944  *
1945  * @return
1946  *   Door-bell address offset on success, a negative error value otherwise.
1947  */
1948 int64_t
1949 mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
1950 {
1951         struct mlx5_priv *priv = dev->data->dev_private;
1952         struct mlx5_devx_dbr_page *page = NULL;
1953         uint32_t i, j;
1954
1955         LIST_FOREACH(page, &priv->dbrpgs, next)
1956                 if (page->dbr_count < MLX5_DBR_PER_PAGE)
1957                         break;
1958         if (!page) { /* No page with free door-bell exists. */
1959                 page = mlx5_alloc_dbr_page(dev);
1960                 if (!page) /* Failed to allocate new page. */
1961                         return (-1);
1962                 LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
1963         }
1964         /* Loop to find bitmap part with clear bit. */
1965         for (i = 0;
1966              i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
1967              i++)
1968                 ; /* Empty. */
1969         /* Find the first clear bit. */
1970         j = rte_bsf64(~page->dbr_bitmap[i]);
1971         MLX5_ASSERT(i < (MLX5_DBR_PER_PAGE / 64));
1972         page->dbr_bitmap[i] |= (1 << j);
1973         page->dbr_count++;
1974         *dbr_page = page;
1975         return (((i * 64) + j) * sizeof(uint64_t));
1976 }
1977
1978 /**
1979  * Release a door-bell record.
1980  *
1981  * @param [in] dev
1982  *   Pointer to Ethernet device.
1983  * @param [in] umem_id
1984  *   UMEM ID of page containing the door-bell record to release.
1985  * @param [in] offset
1986  *   Offset of door-bell record in page.
1987  *
1988  * @return
1989  *   0 on success, a negative error value otherwise.
1990  */
1991 int32_t
1992 mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
1993 {
1994         struct mlx5_priv *priv = dev->data->dev_private;
1995         struct mlx5_devx_dbr_page *page = NULL;
1996         int ret = 0;
1997
1998         LIST_FOREACH(page, &priv->dbrpgs, next)
1999                 /* Find the page this address belongs to. */
2000                 if (page->umem->umem_id == umem_id)
2001                         break;
2002         if (!page)
2003                 return -EINVAL;
2004         page->dbr_count--;
2005         if (!page->dbr_count) {
2006                 /* Page not used, free it and remove from list. */
2007                 LIST_REMOVE(page, next);
2008                 if (page->umem)
2009                         ret = -mlx5_glue->devx_umem_dereg(page->umem);
2010                 rte_free(page);
2011         } else {
2012                 /* Mark in bitmap that this door-bell is not in use. */
2013                 offset /= MLX5_DBR_SIZE;
2014                 int i = offset / 64;
2015                 int j = offset % 64;
2016
2017                 page->dbr_bitmap[i] &= ~(1 << j);
2018         }
2019         return ret;
2020 }
2021
2022 int
2023 rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
2024 {
2025         static const char *const dynf_names[] = {
2026                 RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
2027                 RTE_MBUF_DYNFLAG_METADATA_NAME
2028         };
2029         unsigned int i;
2030
2031         if (n < RTE_DIM(dynf_names))
2032                 return -ENOMEM;
2033         for (i = 0; i < RTE_DIM(dynf_names); i++) {
2034                 if (names[i] == NULL)
2035                         return -EINVAL;
2036                 strcpy(names[i], dynf_names[i]);
2037         }
2038         return RTE_DIM(dynf_names);
2039 }
2040
2041 /**
2042  * Check sibling device configurations.
2043  *
2044  * Sibling devices sharing the Infiniband device context
2045  * should have compatible configurations. This regards
2046  * representors and bonding slaves.
2047  *
2048  * @param priv
2049  *   Private device descriptor.
2050  * @param config
2051  *   Configuration of the device is going to be created.
2052  *
2053  * @return
2054  *   0 on success, EINVAL otherwise
2055  */
2056 static int
2057 mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
2058                               struct mlx5_dev_config *config)
2059 {
2060         struct mlx5_ibv_shared *sh = priv->sh;
2061         struct mlx5_dev_config *sh_conf = NULL;
2062         uint16_t port_id;
2063
2064         MLX5_ASSERT(sh);
2065         /* Nothing to compare for the single/first device. */
2066         if (sh->refcnt == 1)
2067                 return 0;
2068         /* Find the device with shared context. */
2069         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
2070                 struct mlx5_priv *opriv =
2071                         rte_eth_devices[port_id].data->dev_private;
2072
2073                 if (opriv && opriv != priv && opriv->sh == sh) {
2074                         sh_conf = &opriv->config;
2075                         break;
2076                 }
2077         }
2078         if (!sh_conf)
2079                 return 0;
2080         if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
2081                 DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
2082                              " for shared %s context", sh->ibdev_name);
2083                 rte_errno = EINVAL;
2084                 return rte_errno;
2085         }
2086         if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) {
2087                 DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch"
2088                              " for shared %s context", sh->ibdev_name);
2089                 rte_errno = EINVAL;
2090                 return rte_errno;
2091         }
2092         return 0;
2093 }
2094 /**
2095  * Spawn an Ethernet device from Verbs information.
2096  *
2097  * @param dpdk_dev
2098  *   Backing DPDK device.
2099  * @param spawn
2100  *   Verbs device parameters (name, port, switch_info) to spawn.
2101  * @param config
2102  *   Device configuration parameters.
2103  *
2104  * @return
2105  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
2106  *   is set. The following errors are defined:
2107  *
2108  *   EBUSY: device is not supposed to be spawned.
2109  *   EEXIST: device is already spawned
2110  */
2111 static struct rte_eth_dev *
2112 mlx5_dev_spawn(struct rte_device *dpdk_dev,
2113                struct mlx5_dev_spawn_data *spawn,
2114                struct mlx5_dev_config config)
2115 {
2116         const struct mlx5_switch_info *switch_info = &spawn->info;
2117         struct mlx5_ibv_shared *sh = NULL;
2118         struct ibv_port_attr port_attr;
2119         struct mlx5dv_context dv_attr = { .comp_mask = 0 };
2120         struct rte_eth_dev *eth_dev = NULL;
2121         struct mlx5_priv *priv = NULL;
2122         int err = 0;
2123         unsigned int hw_padding = 0;
2124         unsigned int mps;
2125         unsigned int cqe_comp;
2126         unsigned int cqe_pad = 0;
2127         unsigned int tunnel_en = 0;
2128         unsigned int mpls_en = 0;
2129         unsigned int swp = 0;
2130         unsigned int mprq = 0;
2131         unsigned int mprq_min_stride_size_n = 0;
2132         unsigned int mprq_max_stride_size_n = 0;
2133         unsigned int mprq_min_stride_num_n = 0;
2134         unsigned int mprq_max_stride_num_n = 0;
2135         struct rte_ether_addr mac;
2136         char name[RTE_ETH_NAME_MAX_LEN];
2137         int own_domain_id = 0;
2138         uint16_t port_id;
2139         unsigned int i;
2140 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2141         struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
2142 #endif
2143
2144         /* Determine if this port representor is supposed to be spawned. */
2145         if (switch_info->representor && dpdk_dev->devargs) {
2146                 struct rte_eth_devargs eth_da;
2147
2148                 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
2149                 if (err) {
2150                         rte_errno = -err;
2151                         DRV_LOG(ERR, "failed to process device arguments: %s",
2152                                 strerror(rte_errno));
2153                         return NULL;
2154                 }
2155                 for (i = 0; i < eth_da.nb_representor_ports; ++i)
2156                         if (eth_da.representor_ports[i] ==
2157                             (uint16_t)switch_info->port_name)
2158                                 break;
2159                 if (i == eth_da.nb_representor_ports) {
2160                         rte_errno = EBUSY;
2161                         return NULL;
2162                 }
2163         }
2164         /* Build device name. */
2165         if (spawn->pf_bond <  0) {
2166                 /* Single device. */
2167                 if (!switch_info->representor)
2168                         strlcpy(name, dpdk_dev->name, sizeof(name));
2169                 else
2170                         snprintf(name, sizeof(name), "%s_representor_%u",
2171                                  dpdk_dev->name, switch_info->port_name);
2172         } else {
2173                 /* Bonding device. */
2174                 if (!switch_info->representor)
2175                         snprintf(name, sizeof(name), "%s_%s",
2176                                  dpdk_dev->name, spawn->ibv_dev->name);
2177                 else
2178                         snprintf(name, sizeof(name), "%s_%s_representor_%u",
2179                                  dpdk_dev->name, spawn->ibv_dev->name,
2180                                  switch_info->port_name);
2181         }
2182         /* check if the device is already spawned */
2183         if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
2184                 rte_errno = EEXIST;
2185                 return NULL;
2186         }
2187         DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
2188         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2189                 eth_dev = rte_eth_dev_attach_secondary(name);
2190                 if (eth_dev == NULL) {
2191                         DRV_LOG(ERR, "can not attach rte ethdev");
2192                         rte_errno = ENOMEM;
2193                         return NULL;
2194                 }
2195                 eth_dev->device = dpdk_dev;
2196                 eth_dev->dev_ops = &mlx5_dev_sec_ops;
2197                 err = mlx5_proc_priv_init(eth_dev);
2198                 if (err)
2199                         return NULL;
2200                 /* Receive command fd from primary process */
2201                 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
2202                 if (err < 0)
2203                         return NULL;
2204                 /* Remap UAR for Tx queues. */
2205                 err = mlx5_tx_uar_init_secondary(eth_dev, err);
2206                 if (err)
2207                         return NULL;
2208                 /*
2209                  * Ethdev pointer is still required as input since
2210                  * the primary device is not accessible from the
2211                  * secondary process.
2212                  */
2213                 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
2214                 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
2215                 return eth_dev;
2216         }
2217         /*
2218          * Some parameters ("tx_db_nc" in particularly) are needed in
2219          * advance to create dv/verbs device context. We proceed the
2220          * devargs here to get ones, and later proceed devargs again
2221          * to override some hardware settings.
2222          */
2223         err = mlx5_args(&config, dpdk_dev->devargs);
2224         if (err) {
2225                 err = rte_errno;
2226                 DRV_LOG(ERR, "failed to process device arguments: %s",
2227                         strerror(rte_errno));
2228                 goto error;
2229         }
2230         sh = mlx5_alloc_shared_ibctx(spawn, &config);
2231         if (!sh)
2232                 return NULL;
2233         config.devx = sh->devx;
2234 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
2235         config.dest_tir = 1;
2236 #endif
2237 #ifdef HAVE_IBV_MLX5_MOD_SWP
2238         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
2239 #endif
2240         /*
2241          * Multi-packet send is supported by ConnectX-4 Lx PF as well
2242          * as all ConnectX-5 devices.
2243          */
2244 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2245         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
2246 #endif
2247 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2248         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
2249 #endif
2250         mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
2251         if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
2252                 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
2253                         DRV_LOG(DEBUG, "enhanced MPW is supported");
2254                         mps = MLX5_MPW_ENHANCED;
2255                 } else {
2256                         DRV_LOG(DEBUG, "MPW is supported");
2257                         mps = MLX5_MPW;
2258                 }
2259         } else {
2260                 DRV_LOG(DEBUG, "MPW isn't supported");
2261                 mps = MLX5_MPW_DISABLED;
2262         }
2263 #ifdef HAVE_IBV_MLX5_MOD_SWP
2264         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
2265                 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
2266         DRV_LOG(DEBUG, "SWP support: %u", swp);
2267 #endif
2268         config.swp = !!swp;
2269 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2270         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
2271                 struct mlx5dv_striding_rq_caps mprq_caps =
2272                         dv_attr.striding_rq_caps;
2273
2274                 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
2275                         mprq_caps.min_single_stride_log_num_of_bytes);
2276                 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
2277                         mprq_caps.max_single_stride_log_num_of_bytes);
2278                 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
2279                         mprq_caps.min_single_wqe_log_num_of_strides);
2280                 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
2281                         mprq_caps.max_single_wqe_log_num_of_strides);
2282                 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
2283                         mprq_caps.supported_qpts);
2284                 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
2285                 mprq = 1;
2286                 mprq_min_stride_size_n =
2287                         mprq_caps.min_single_stride_log_num_of_bytes;
2288                 mprq_max_stride_size_n =
2289                         mprq_caps.max_single_stride_log_num_of_bytes;
2290                 mprq_min_stride_num_n =
2291                         mprq_caps.min_single_wqe_log_num_of_strides;
2292                 mprq_max_stride_num_n =
2293                         mprq_caps.max_single_wqe_log_num_of_strides;
2294                 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
2295                                                    mprq_min_stride_num_n);
2296         }
2297 #endif
2298         if (RTE_CACHE_LINE_SIZE == 128 &&
2299             !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
2300                 cqe_comp = 0;
2301         else
2302                 cqe_comp = 1;
2303         config.cqe_comp = cqe_comp;
2304 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
2305         /* Whether device supports 128B Rx CQE padding. */
2306         cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
2307                   (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
2308 #endif
2309 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2310         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
2311                 tunnel_en = ((dv_attr.tunnel_offloads_caps &
2312                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
2313                              (dv_attr.tunnel_offloads_caps &
2314                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
2315                              (dv_attr.tunnel_offloads_caps &
2316                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
2317         }
2318         DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
2319                 tunnel_en ? "" : "not ");
2320 #else
2321         DRV_LOG(WARNING,
2322                 "tunnel offloading disabled due to old OFED/rdma-core version");
2323 #endif
2324         config.tunnel_en = tunnel_en;
2325 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2326         mpls_en = ((dv_attr.tunnel_offloads_caps &
2327                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
2328                    (dv_attr.tunnel_offloads_caps &
2329                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
2330         DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
2331                 mpls_en ? "" : "not ");
2332 #else
2333         DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
2334                 " old OFED/rdma-core version or firmware configuration");
2335 #endif
2336         config.mpls_en = mpls_en;
2337         /* Check port status. */
2338         err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
2339         if (err) {
2340                 DRV_LOG(ERR, "port query failed: %s", strerror(err));
2341                 goto error;
2342         }
2343         if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
2344                 DRV_LOG(ERR, "port is not configured in Ethernet mode");
2345                 err = EINVAL;
2346                 goto error;
2347         }
2348         if (port_attr.state != IBV_PORT_ACTIVE)
2349                 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
2350                         mlx5_glue->port_state_str(port_attr.state),
2351                         port_attr.state);
2352         /* Allocate private eth device data. */
2353         priv = rte_zmalloc("ethdev private structure",
2354                            sizeof(*priv),
2355                            RTE_CACHE_LINE_SIZE);
2356         if (priv == NULL) {
2357                 DRV_LOG(ERR, "priv allocation failure");
2358                 err = ENOMEM;
2359                 goto error;
2360         }
2361         priv->sh = sh;
2362         priv->ibv_port = spawn->ibv_port;
2363         priv->pci_dev = spawn->pci_dev;
2364         priv->mtu = RTE_ETHER_MTU;
2365 #ifndef RTE_ARCH_64
2366         /* Initialize UAR access locks for 32bit implementations. */
2367         rte_spinlock_init(&priv->uar_lock_cq);
2368         for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
2369                 rte_spinlock_init(&priv->uar_lock[i]);
2370 #endif
2371         /* Some internal functions rely on Netlink sockets, open them now. */
2372         priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
2373         priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
2374         priv->representor = !!switch_info->representor;
2375         priv->master = !!switch_info->master;
2376         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2377         priv->vport_meta_tag = 0;
2378         priv->vport_meta_mask = 0;
2379         priv->pf_bond = spawn->pf_bond;
2380 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2381         /*
2382          * The DevX port query API is implemented. E-Switch may use
2383          * either vport or reg_c[0] metadata register to match on
2384          * vport index. The engaged part of metadata register is
2385          * defined by mask.
2386          */
2387         if (switch_info->representor || switch_info->master) {
2388                 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
2389                                       MLX5DV_DEVX_PORT_MATCH_REG_C_0;
2390                 err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port,
2391                                                  &devx_port);
2392                 if (err) {
2393                         DRV_LOG(WARNING,
2394                                 "can't query devx port %d on device %s",
2395                                 spawn->ibv_port, spawn->ibv_dev->name);
2396                         devx_port.comp_mask = 0;
2397                 }
2398         }
2399         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
2400                 priv->vport_meta_tag = devx_port.reg_c_0.value;
2401                 priv->vport_meta_mask = devx_port.reg_c_0.mask;
2402                 if (!priv->vport_meta_mask) {
2403                         DRV_LOG(ERR, "vport zero mask for port %d"
2404                                      " on bonding device %s",
2405                                      spawn->ibv_port, spawn->ibv_dev->name);
2406                         err = ENOTSUP;
2407                         goto error;
2408                 }
2409                 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
2410                         DRV_LOG(ERR, "invalid vport tag for port %d"
2411                                      " on bonding device %s",
2412                                      spawn->ibv_port, spawn->ibv_dev->name);
2413                         err = ENOTSUP;
2414                         goto error;
2415                 }
2416         }
2417         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
2418                 priv->vport_id = devx_port.vport_num;
2419         } else if (spawn->pf_bond >= 0) {
2420                 DRV_LOG(ERR, "can't deduce vport index for port %d"
2421                              " on bonding device %s",
2422                              spawn->ibv_port, spawn->ibv_dev->name);
2423                 err = ENOTSUP;
2424                 goto error;
2425         } else {
2426                 /* Suppose vport index in compatible way. */
2427                 priv->vport_id = switch_info->representor ?
2428                                  switch_info->port_name + 1 : -1;
2429         }
2430 #else
2431         /*
2432          * Kernel/rdma_core support single E-Switch per PF configurations
2433          * only and vport_id field contains the vport index for
2434          * associated VF, which is deduced from representor port name.
2435          * For example, let's have the IB device port 10, it has
2436          * attached network device eth0, which has port name attribute
2437          * pf0vf2, we can deduce the VF number as 2, and set vport index
2438          * as 3 (2+1). This assigning schema should be changed if the
2439          * multiple E-Switch instances per PF configurations or/and PCI
2440          * subfunctions are added.
2441          */
2442         priv->vport_id = switch_info->representor ?
2443                          switch_info->port_name + 1 : -1;
2444 #endif
2445         /* representor_id field keeps the unmodified VF index. */
2446         priv->representor_id = switch_info->representor ?
2447                                switch_info->port_name : -1;
2448         /*
2449          * Look for sibling devices in order to reuse their switch domain
2450          * if any, otherwise allocate one.
2451          */
2452         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
2453                 const struct mlx5_priv *opriv =
2454                         rte_eth_devices[port_id].data->dev_private;
2455
2456                 if (!opriv ||
2457                     opriv->sh != priv->sh ||
2458                         opriv->domain_id ==
2459                         RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
2460                         continue;
2461                 priv->domain_id = opriv->domain_id;
2462                 break;
2463         }
2464         if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
2465                 err = rte_eth_switch_domain_alloc(&priv->domain_id);
2466                 if (err) {
2467                         err = rte_errno;
2468                         DRV_LOG(ERR, "unable to allocate switch domain: %s",
2469                                 strerror(rte_errno));
2470                         goto error;
2471                 }
2472                 own_domain_id = 1;
2473         }
2474         /* Override some values set by hardware configuration. */
2475         mlx5_args(&config, dpdk_dev->devargs);
2476         err = mlx5_dev_check_sibling_config(priv, &config);
2477         if (err)
2478                 goto error;
2479         config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
2480                             IBV_DEVICE_RAW_IP_CSUM);
2481         DRV_LOG(DEBUG, "checksum offloading is %ssupported",
2482                 (config.hw_csum ? "" : "not "));
2483 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
2484         !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
2485         DRV_LOG(DEBUG, "counters are not supported");
2486 #endif
2487 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
2488         if (config.dv_flow_en) {
2489                 DRV_LOG(WARNING, "DV flow is not supported");
2490                 config.dv_flow_en = 0;
2491         }
2492 #endif
2493         config.ind_table_max_size =
2494                 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
2495         /*
2496          * Remove this check once DPDK supports larger/variable
2497          * indirection tables.
2498          */
2499         if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
2500                 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
2501         DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
2502                 config.ind_table_max_size);
2503         config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
2504                                   IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
2505         DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
2506                 (config.hw_vlan_strip ? "" : "not "));
2507         config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
2508                                  IBV_RAW_PACKET_CAP_SCATTER_FCS);
2509         DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
2510                 (config.hw_fcs_strip ? "" : "not "));
2511 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
2512         hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
2513 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
2514         hw_padding = !!(sh->device_attr.device_cap_flags_ex &
2515                         IBV_DEVICE_PCI_WRITE_END_PADDING);
2516 #endif
2517         if (config.hw_padding && !hw_padding) {
2518                 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
2519                 config.hw_padding = 0;
2520         } else if (config.hw_padding) {
2521                 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
2522         }
2523         config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
2524                       (sh->device_attr.tso_caps.supported_qpts &
2525                        (1 << IBV_QPT_RAW_PACKET)));
2526         if (config.tso)
2527                 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
2528         /*
2529          * MPW is disabled by default, while the Enhanced MPW is enabled
2530          * by default.
2531          */
2532         if (config.mps == MLX5_ARG_UNSET)
2533                 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
2534                                                           MLX5_MPW_DISABLED;
2535         else
2536                 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
2537         DRV_LOG(INFO, "%sMPS is %s",
2538                 config.mps == MLX5_MPW_ENHANCED ? "enhanced " :
2539                 config.mps == MLX5_MPW ? "legacy " : "",
2540                 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2541         if (config.cqe_comp && !cqe_comp) {
2542                 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
2543                 config.cqe_comp = 0;
2544         }
2545         if (config.cqe_pad && !cqe_pad) {
2546                 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
2547                 config.cqe_pad = 0;
2548         } else if (config.cqe_pad) {
2549                 DRV_LOG(INFO, "Rx CQE padding is enabled");
2550         }
2551         if (config.devx) {
2552                 priv->counter_fallback = 0;
2553                 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
2554                 if (err) {
2555                         err = -err;
2556                         goto error;
2557                 }
2558                 if (!config.hca_attr.flow_counters_dump)
2559                         priv->counter_fallback = 1;
2560 #ifndef HAVE_IBV_DEVX_ASYNC
2561                 priv->counter_fallback = 1;
2562 #endif
2563                 if (priv->counter_fallback)
2564                         DRV_LOG(INFO, "Use fall-back DV counter management");
2565                 /* Check for LRO support. */
2566                 if (config.dest_tir && config.hca_attr.lro_cap &&
2567                     config.dv_flow_en) {
2568                         /* TBD check tunnel lro caps. */
2569                         config.lro.supported = config.hca_attr.lro_cap;
2570                         DRV_LOG(DEBUG, "Device supports LRO");
2571                         /*
2572                          * If LRO timeout is not configured by application,
2573                          * use the minimal supported value.
2574                          */
2575                         if (!config.lro.timeout)
2576                                 config.lro.timeout =
2577                                 config.hca_attr.lro_timer_supported_periods[0];
2578                         DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
2579                                 config.lro.timeout);
2580                 }
2581 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
2582                 if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup &&
2583                     config.dv_flow_en) {
2584                         uint8_t reg_c_mask =
2585                                 config.hca_attr.qos.flow_meter_reg_c_ids;
2586                         /*
2587                          * Meter needs two REG_C's for color match and pre-sfx
2588                          * flow match. Here get the REG_C for color match.
2589                          * REG_C_0 and REG_C_1 is reserved for metadata feature.
2590                          */
2591                         reg_c_mask &= 0xfc;
2592                         if (__builtin_popcount(reg_c_mask) < 1) {
2593                                 priv->mtr_en = 0;
2594                                 DRV_LOG(WARNING, "No available register for"
2595                                         " meter.");
2596                         } else {
2597                                 priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
2598                                                       REG_C_0;
2599                                 priv->mtr_en = 1;
2600                                 priv->mtr_reg_share =
2601                                       config.hca_attr.qos.flow_meter_reg_share;
2602                                 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
2603                                         priv->mtr_color_reg);
2604                         }
2605                 }
2606 #endif
2607         }
2608         if (config.mprq.enabled && mprq) {
2609                 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
2610                     config.mprq.stride_num_n < mprq_min_stride_num_n) {
2611                         config.mprq.stride_num_n =
2612                                 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
2613                                         mprq_min_stride_num_n);
2614                         DRV_LOG(WARNING,
2615                                 "the number of strides"
2616                                 " for Multi-Packet RQ is out of range,"
2617                                 " setting default value (%u)",
2618                                 1 << config.mprq.stride_num_n);
2619                 }
2620                 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
2621                 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
2622         } else if (config.mprq.enabled && !mprq) {
2623                 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
2624                 config.mprq.enabled = 0;
2625         }
2626         if (config.max_dump_files_num == 0)
2627                 config.max_dump_files_num = 128;
2628         eth_dev = rte_eth_dev_allocate(name);
2629         if (eth_dev == NULL) {
2630                 DRV_LOG(ERR, "can not allocate rte ethdev");
2631                 err = ENOMEM;
2632                 goto error;
2633         }
2634         /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
2635         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2636         if (priv->representor) {
2637                 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
2638                 eth_dev->data->representor_id = priv->representor_id;
2639         }
2640         /*
2641          * Store associated network device interface index. This index
2642          * is permanent throughout the lifetime of device. So, we may store
2643          * the ifindex here and use the cached value further.
2644          */
2645         MLX5_ASSERT(spawn->ifindex);
2646         priv->if_index = spawn->ifindex;
2647         eth_dev->data->dev_private = priv;
2648         priv->dev_data = eth_dev->data;
2649         eth_dev->data->mac_addrs = priv->mac;
2650         eth_dev->device = dpdk_dev;
2651         /* Configure the first MAC address by default. */
2652         if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
2653                 DRV_LOG(ERR,
2654                         "port %u cannot get MAC address, is mlx5_en"
2655                         " loaded? (errno: %s)",
2656                         eth_dev->data->port_id, strerror(rte_errno));
2657                 err = ENODEV;
2658                 goto error;
2659         }
2660         DRV_LOG(INFO,
2661                 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
2662                 eth_dev->data->port_id,
2663                 mac.addr_bytes[0], mac.addr_bytes[1],
2664                 mac.addr_bytes[2], mac.addr_bytes[3],
2665                 mac.addr_bytes[4], mac.addr_bytes[5]);
2666 #ifdef RTE_LIBRTE_MLX5_DEBUG
2667         {
2668                 char ifname[IF_NAMESIZE];
2669
2670                 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
2671                         DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
2672                                 eth_dev->data->port_id, ifname);
2673                 else
2674                         DRV_LOG(DEBUG, "port %u ifname is unknown",
2675                                 eth_dev->data->port_id);
2676         }
2677 #endif
2678         /* Get actual MTU if possible. */
2679         err = mlx5_get_mtu(eth_dev, &priv->mtu);
2680         if (err) {
2681                 err = rte_errno;
2682                 goto error;
2683         }
2684         DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
2685                 priv->mtu);
2686         /* Initialize burst functions to prevent crashes before link-up. */
2687         eth_dev->rx_pkt_burst = removed_rx_burst;
2688         eth_dev->tx_pkt_burst = removed_tx_burst;
2689         eth_dev->dev_ops = &mlx5_dev_ops;
2690         /* Register MAC address. */
2691         claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
2692         if (config.vf && config.vf_nl_en)
2693                 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
2694                                       mlx5_ifindex(eth_dev),
2695                                       eth_dev->data->mac_addrs,
2696                                       MLX5_MAX_MAC_ADDRESSES);
2697         TAILQ_INIT(&priv->flows);
2698         TAILQ_INIT(&priv->ctrl_flows);
2699         TAILQ_INIT(&priv->flow_meters);
2700         TAILQ_INIT(&priv->flow_meter_profiles);
2701         /* Hint libmlx5 to use PMD allocator for data plane resources */
2702         struct mlx5dv_ctx_allocators alctr = {
2703                 .alloc = &mlx5_alloc_verbs_buf,
2704                 .free = &mlx5_free_verbs_buf,
2705                 .data = priv,
2706         };
2707         mlx5_glue->dv_set_context_attr(sh->ctx,
2708                                        MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2709                                        (void *)((uintptr_t)&alctr));
2710         /* Bring Ethernet device up. */
2711         DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
2712                 eth_dev->data->port_id);
2713         mlx5_set_link_up(eth_dev);
2714         /*
2715          * Even though the interrupt handler is not installed yet,
2716          * interrupts will still trigger on the async_fd from
2717          * Verbs context returned by ibv_open_device().
2718          */
2719         mlx5_link_update(eth_dev, 0);
2720 #ifdef HAVE_MLX5DV_DR_ESWITCH
2721         if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
2722               (switch_info->representor || switch_info->master)))
2723                 config.dv_esw_en = 0;
2724 #else
2725         config.dv_esw_en = 0;
2726 #endif
2727         /* Detect minimal data bytes to inline. */
2728         mlx5_set_min_inline(spawn, &config);
2729         /* Store device configuration on private structure. */
2730         priv->config = config;
2731         /* Create context for virtual machine VLAN workaround. */
2732         priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
2733         if (config.dv_flow_en) {
2734                 err = mlx5_alloc_shared_dr(priv);
2735                 if (err)
2736                         goto error;
2737                 /*
2738                  * RSS id is shared with meter flow id. Meter flow id can only
2739                  * use the 24 MSB of the register.
2740                  */
2741                 priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
2742                                      MLX5_MTR_COLOR_BITS);
2743                 if (!priv->qrss_id_pool) {
2744                         DRV_LOG(ERR, "can't create flow id pool");
2745                         err = ENOMEM;
2746                         goto error;
2747                 }
2748         }
2749         /* Supported Verbs flow priority number detection. */
2750         err = mlx5_flow_discover_priorities(eth_dev);
2751         if (err < 0) {
2752                 err = -err;
2753                 goto error;
2754         }
2755         priv->config.flow_prio = err;
2756         if (!priv->config.dv_esw_en &&
2757             priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2758                 DRV_LOG(WARNING, "metadata mode %u is not supported "
2759                                  "(no E-Switch)", priv->config.dv_xmeta_en);
2760                 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
2761         }
2762         mlx5_set_metadata_mask(eth_dev);
2763         if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
2764             !priv->sh->dv_regc0_mask) {
2765                 DRV_LOG(ERR, "metadata mode %u is not supported "
2766                              "(no metadata reg_c[0] is available)",
2767                              priv->config.dv_xmeta_en);
2768                         err = ENOTSUP;
2769                         goto error;
2770         }
2771         /* Query availibility of metadata reg_c's. */
2772         err = mlx5_flow_discover_mreg_c(eth_dev);
2773         if (err < 0) {
2774                 err = -err;
2775                 goto error;
2776         }
2777         if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
2778                 DRV_LOG(DEBUG,
2779                         "port %u extensive metadata register is not supported",
2780                         eth_dev->data->port_id);
2781                 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2782                         DRV_LOG(ERR, "metadata mode %u is not supported "
2783                                      "(no metadata registers available)",
2784                                      priv->config.dv_xmeta_en);
2785                         err = ENOTSUP;
2786                         goto error;
2787                 }
2788         }
2789         if (priv->config.dv_flow_en &&
2790             priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
2791             mlx5_flow_ext_mreg_supported(eth_dev) &&
2792             priv->sh->dv_regc0_mask) {
2793                 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
2794                                                       MLX5_FLOW_MREG_HTABLE_SZ);
2795                 if (!priv->mreg_cp_tbl) {
2796                         err = ENOMEM;
2797                         goto error;
2798                 }
2799         }
2800         return eth_dev;
2801 error:
2802         if (priv) {
2803                 if (priv->mreg_cp_tbl)
2804                         mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
2805                 if (priv->sh)
2806                         mlx5_free_shared_dr(priv);
2807                 if (priv->nl_socket_route >= 0)
2808                         close(priv->nl_socket_route);
2809                 if (priv->nl_socket_rdma >= 0)
2810                         close(priv->nl_socket_rdma);
2811                 if (priv->vmwa_context)
2812                         mlx5_vlan_vmwa_exit(priv->vmwa_context);
2813                 if (priv->qrss_id_pool)
2814                         mlx5_flow_id_pool_release(priv->qrss_id_pool);
2815                 if (own_domain_id)
2816                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
2817                 rte_free(priv);
2818                 if (eth_dev != NULL)
2819                         eth_dev->data->dev_private = NULL;
2820         }
2821         if (eth_dev != NULL) {
2822                 /* mac_addrs must not be freed alone because part of dev_private */
2823                 eth_dev->data->mac_addrs = NULL;
2824                 rte_eth_dev_release_port(eth_dev);
2825         }
2826         if (sh)
2827                 mlx5_free_shared_ibctx(sh);
2828         MLX5_ASSERT(err > 0);
2829         rte_errno = err;
2830         return NULL;
2831 }
2832
2833 /**
2834  * Comparison callback to sort device data.
2835  *
2836  * This is meant to be used with qsort().
2837  *
2838  * @param a[in]
2839  *   Pointer to pointer to first data object.
2840  * @param b[in]
2841  *   Pointer to pointer to second data object.
2842  *
2843  * @return
2844  *   0 if both objects are equal, less than 0 if the first argument is less
2845  *   than the second, greater than 0 otherwise.
2846  */
2847 static int
2848 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
2849 {
2850         const struct mlx5_switch_info *si_a =
2851                 &((const struct mlx5_dev_spawn_data *)a)->info;
2852         const struct mlx5_switch_info *si_b =
2853                 &((const struct mlx5_dev_spawn_data *)b)->info;
2854         int ret;
2855
2856         /* Master device first. */
2857         ret = si_b->master - si_a->master;
2858         if (ret)
2859                 return ret;
2860         /* Then representor devices. */
2861         ret = si_b->representor - si_a->representor;
2862         if (ret)
2863                 return ret;
2864         /* Unidentified devices come last in no specific order. */
2865         if (!si_a->representor)
2866                 return 0;
2867         /* Order representors by name. */
2868         return si_a->port_name - si_b->port_name;
2869 }
2870
2871 /**
2872  * Match PCI information for possible slaves of bonding device.
2873  *
2874  * @param[in] ibv_dev
2875  *   Pointer to Infiniband device structure.
2876  * @param[in] pci_dev
2877  *   Pointer to PCI device structure to match PCI address.
2878  * @param[in] nl_rdma
2879  *   Netlink RDMA group socket handle.
2880  *
2881  * @return
2882  *   negative value if no bonding device found, otherwise
2883  *   positive index of slave PF in bonding.
2884  */
2885 static int
2886 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
2887                            const struct rte_pci_device *pci_dev,
2888                            int nl_rdma)
2889 {
2890         char ifname[IF_NAMESIZE + 1];
2891         unsigned int ifindex;
2892         unsigned int np, i;
2893         FILE *file = NULL;
2894         int pf = -1;
2895
2896         /*
2897          * Try to get master device name. If something goes
2898          * wrong suppose the lack of kernel support and no
2899          * bonding devices.
2900          */
2901         if (nl_rdma < 0)
2902                 return -1;
2903         if (!strstr(ibv_dev->name, "bond"))
2904                 return -1;
2905         np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
2906         if (!np)
2907                 return -1;
2908         /*
2909          * The Master device might not be on the predefined
2910          * port (not on port index 1, it is not garanted),
2911          * we have to scan all Infiniband device port and
2912          * find master.
2913          */
2914         for (i = 1; i <= np; ++i) {
2915                 /* Check whether Infiniband port is populated. */
2916                 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
2917                 if (!ifindex)
2918                         continue;
2919                 if (!if_indextoname(ifindex, ifname))
2920                         continue;
2921                 /* Try to read bonding slave names from sysfs. */
2922                 MKSTR(slaves,
2923                       "/sys/class/net/%s/master/bonding/slaves", ifname);
2924                 file = fopen(slaves, "r");
2925                 if (file)
2926                         break;
2927         }
2928         if (!file)
2929                 return -1;
2930         /* Use safe format to check maximal buffer length. */
2931         MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
2932         while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
2933                 char tmp_str[IF_NAMESIZE + 32];
2934                 struct rte_pci_addr pci_addr;
2935                 struct mlx5_switch_info info;
2936
2937                 /* Process slave interface names in the loop. */
2938                 snprintf(tmp_str, sizeof(tmp_str),
2939                          "/sys/class/net/%s", ifname);
2940                 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
2941                         DRV_LOG(WARNING, "can not get PCI address"
2942                                          " for netdev \"%s\"", ifname);
2943                         continue;
2944                 }
2945                 if (pci_dev->addr.domain != pci_addr.domain ||
2946                     pci_dev->addr.bus != pci_addr.bus ||
2947                     pci_dev->addr.devid != pci_addr.devid ||
2948                     pci_dev->addr.function != pci_addr.function)
2949                         continue;
2950                 /* Slave interface PCI address match found. */
2951                 fclose(file);
2952                 snprintf(tmp_str, sizeof(tmp_str),
2953                          "/sys/class/net/%s/phys_port_name", ifname);
2954                 file = fopen(tmp_str, "rb");
2955                 if (!file)
2956                         break;
2957                 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2958                 if (fscanf(file, "%32s", tmp_str) == 1)
2959                         mlx5_translate_port_name(tmp_str, &info);
2960                 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
2961                     info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2962                         pf = info.port_name;
2963                 break;
2964         }
2965         if (file)
2966                 fclose(file);
2967         return pf;
2968 }
2969
2970 /**
2971  * DPDK callback to register a PCI device.
2972  *
2973  * This function spawns Ethernet devices out of a given PCI device.
2974  *
2975  * @param[in] pci_drv
2976  *   PCI driver structure (mlx5_driver).
2977  * @param[in] pci_dev
2978  *   PCI device information.
2979  *
2980  * @return
2981  *   0 on success, a negative errno value otherwise and rte_errno is set.
2982  */
2983 static int
2984 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2985                struct rte_pci_device *pci_dev)
2986 {
2987         struct ibv_device **ibv_list;
2988         /*
2989          * Number of found IB Devices matching with requested PCI BDF.
2990          * nd != 1 means there are multiple IB devices over the same
2991          * PCI device and we have representors and master.
2992          */
2993         unsigned int nd = 0;
2994         /*
2995          * Number of found IB device Ports. nd = 1 and np = 1..n means
2996          * we have the single multiport IB device, and there may be
2997          * representors attached to some of found ports.
2998          */
2999         unsigned int np = 0;
3000         /*
3001          * Number of DPDK ethernet devices to Spawn - either over
3002          * multiple IB devices or multiple ports of single IB device.
3003          * Actually this is the number of iterations to spawn.
3004          */
3005         unsigned int ns = 0;
3006         /*
3007          * Bonding device
3008          *   < 0 - no bonding device (single one)
3009          *  >= 0 - bonding device (value is slave PF index)
3010          */
3011         int bd = -1;
3012         struct mlx5_dev_spawn_data *list = NULL;
3013         struct mlx5_dev_config dev_config;
3014         int ret;
3015
3016         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) {
3017                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
3018                         " driver.");
3019                 return 1;
3020         }
3021         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3022                 mlx5_pmd_socket_init();
3023         ret = mlx5_init_once();
3024         if (ret) {
3025                 DRV_LOG(ERR, "unable to init PMD global data: %s",
3026                         strerror(rte_errno));
3027                 return -rte_errno;
3028         }
3029         MLX5_ASSERT(pci_drv == &mlx5_driver);
3030         errno = 0;
3031         ibv_list = mlx5_glue->get_device_list(&ret);
3032         if (!ibv_list) {
3033                 rte_errno = errno ? errno : ENOSYS;
3034                 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
3035                 return -rte_errno;
3036         }
3037         /*
3038          * First scan the list of all Infiniband devices to find
3039          * matching ones, gathering into the list.
3040          */
3041         struct ibv_device *ibv_match[ret + 1];
3042         int nl_route = mlx5_nl_init(NETLINK_ROUTE);
3043         int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
3044         unsigned int i;
3045
3046         while (ret-- > 0) {
3047                 struct rte_pci_addr pci_addr;
3048
3049                 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
3050                 bd = mlx5_device_bond_pci_match
3051                                 (ibv_list[ret], pci_dev, nl_rdma);
3052                 if (bd >= 0) {
3053                         /*
3054                          * Bonding device detected. Only one match is allowed,
3055                          * the bonding is supported over multi-port IB device,
3056                          * there should be no matches on representor PCI
3057                          * functions or non VF LAG bonding devices with
3058                          * specified address.
3059                          */
3060                         if (nd) {
3061                                 DRV_LOG(ERR,
3062                                         "multiple PCI match on bonding device"
3063                                         "\"%s\" found", ibv_list[ret]->name);
3064                                 rte_errno = ENOENT;
3065                                 ret = -rte_errno;
3066                                 goto exit;
3067                         }
3068                         DRV_LOG(INFO, "PCI information matches for"
3069                                       " slave %d bonding device \"%s\"",
3070                                       bd, ibv_list[ret]->name);
3071                         ibv_match[nd++] = ibv_list[ret];
3072                         break;
3073                 }
3074                 if (mlx5_dev_to_pci_addr
3075                         (ibv_list[ret]->ibdev_path, &pci_addr))
3076                         continue;
3077                 if (pci_dev->addr.domain != pci_addr.domain ||
3078                     pci_dev->addr.bus != pci_addr.bus ||
3079                     pci_dev->addr.devid != pci_addr.devid ||
3080                     pci_dev->addr.function != pci_addr.function)
3081                         continue;
3082                 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
3083                         ibv_list[ret]->name);
3084                 ibv_match[nd++] = ibv_list[ret];
3085         }
3086         ibv_match[nd] = NULL;
3087         if (!nd) {
3088                 /* No device matches, just complain and bail out. */
3089                 DRV_LOG(WARNING,
3090                         "no Verbs device matches PCI device " PCI_PRI_FMT ","
3091                         " are kernel drivers loaded?",
3092                         pci_dev->addr.domain, pci_dev->addr.bus,
3093                         pci_dev->addr.devid, pci_dev->addr.function);
3094                 rte_errno = ENOENT;
3095                 ret = -rte_errno;
3096                 goto exit;
3097         }
3098         if (nd == 1) {
3099                 /*
3100                  * Found single matching device may have multiple ports.
3101                  * Each port may be representor, we have to check the port
3102                  * number and check the representors existence.
3103                  */
3104                 if (nl_rdma >= 0)
3105                         np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
3106                 if (!np)
3107                         DRV_LOG(WARNING, "can not get IB device \"%s\""
3108                                          " ports number", ibv_match[0]->name);
3109                 if (bd >= 0 && !np) {
3110                         DRV_LOG(ERR, "can not get ports"
3111                                      " for bonding device");
3112                         rte_errno = ENOENT;
3113                         ret = -rte_errno;
3114                         goto exit;
3115                 }
3116         }
3117 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
3118         if (bd >= 0) {
3119                 /*
3120                  * This may happen if there is VF LAG kernel support and
3121                  * application is compiled with older rdma_core library.
3122                  */
3123                 DRV_LOG(ERR,
3124                         "No kernel/verbs support for VF LAG bonding found.");
3125                 rte_errno = ENOTSUP;
3126                 ret = -rte_errno;
3127                 goto exit;
3128         }
3129 #endif
3130         /*
3131          * Now we can determine the maximal
3132          * amount of devices to be spawned.
3133          */
3134         list = rte_zmalloc("device spawn data",
3135                          sizeof(struct mlx5_dev_spawn_data) *
3136                          (np ? np : nd),
3137                          RTE_CACHE_LINE_SIZE);
3138         if (!list) {
3139                 DRV_LOG(ERR, "spawn data array allocation failure");
3140                 rte_errno = ENOMEM;
3141                 ret = -rte_errno;
3142                 goto exit;
3143         }
3144         if (bd >= 0 || np > 1) {
3145                 /*
3146                  * Single IB device with multiple ports found,
3147                  * it may be E-Switch master device and representors.
3148                  * We have to perform identification trough the ports.
3149                  */
3150                 MLX5_ASSERT(nl_rdma >= 0);
3151                 MLX5_ASSERT(ns == 0);
3152                 MLX5_ASSERT(nd == 1);
3153                 MLX5_ASSERT(np);
3154                 for (i = 1; i <= np; ++i) {
3155                         list[ns].max_port = np;
3156                         list[ns].ibv_port = i;
3157                         list[ns].ibv_dev = ibv_match[0];
3158                         list[ns].eth_dev = NULL;
3159                         list[ns].pci_dev = pci_dev;
3160                         list[ns].pf_bond = bd;
3161                         list[ns].ifindex = mlx5_nl_ifindex
3162                                         (nl_rdma, list[ns].ibv_dev->name, i);
3163                         if (!list[ns].ifindex) {
3164                                 /*
3165                                  * No network interface index found for the
3166                                  * specified port, it means there is no
3167                                  * representor on this port. It's OK,
3168                                  * there can be disabled ports, for example
3169                                  * if sriov_numvfs < sriov_totalvfs.
3170                                  */
3171                                 continue;
3172                         }
3173                         ret = -1;
3174                         if (nl_route >= 0)
3175                                 ret = mlx5_nl_switch_info
3176                                                (nl_route,
3177                                                 list[ns].ifindex,
3178                                                 &list[ns].info);
3179                         if (ret || (!list[ns].info.representor &&
3180                                     !list[ns].info.master)) {
3181                                 /*
3182                                  * We failed to recognize representors with
3183                                  * Netlink, let's try to perform the task
3184                                  * with sysfs.
3185                                  */
3186                                 ret =  mlx5_sysfs_switch_info
3187                                                 (list[ns].ifindex,
3188                                                  &list[ns].info);
3189                         }
3190                         if (!ret && bd >= 0) {
3191                                 switch (list[ns].info.name_type) {
3192                                 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
3193                                         if (list[ns].info.port_name == bd)
3194                                                 ns++;
3195                                         break;
3196                                 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
3197                                         if (list[ns].info.pf_num == bd)
3198                                                 ns++;
3199                                         break;
3200                                 default:
3201                                         break;
3202                                 }
3203                                 continue;
3204                         }
3205                         if (!ret && (list[ns].info.representor ^
3206                                      list[ns].info.master))
3207                                 ns++;
3208                 }
3209                 if (!ns) {
3210                         DRV_LOG(ERR,
3211                                 "unable to recognize master/representors"
3212                                 " on the IB device with multiple ports");
3213                         rte_errno = ENOENT;
3214                         ret = -rte_errno;
3215                         goto exit;
3216                 }
3217         } else {
3218                 /*
3219                  * The existence of several matching entries (nd > 1) means
3220                  * port representors have been instantiated. No existing Verbs
3221                  * call nor sysfs entries can tell them apart, this can only
3222                  * be done through Netlink calls assuming kernel drivers are
3223                  * recent enough to support them.
3224                  *
3225                  * In the event of identification failure through Netlink,
3226                  * try again through sysfs, then:
3227                  *
3228                  * 1. A single IB device matches (nd == 1) with single
3229                  *    port (np=0/1) and is not a representor, assume
3230                  *    no switch support.
3231                  *
3232                  * 2. Otherwise no safe assumptions can be made;
3233                  *    complain louder and bail out.
3234                  */
3235                 np = 1;
3236                 for (i = 0; i != nd; ++i) {
3237                         memset(&list[ns].info, 0, sizeof(list[ns].info));
3238                         list[ns].max_port = 1;
3239                         list[ns].ibv_port = 1;
3240                         list[ns].ibv_dev = ibv_match[i];
3241                         list[ns].eth_dev = NULL;
3242                         list[ns].pci_dev = pci_dev;
3243                         list[ns].pf_bond = -1;
3244                         list[ns].ifindex = 0;
3245                         if (nl_rdma >= 0)
3246                                 list[ns].ifindex = mlx5_nl_ifindex
3247                                         (nl_rdma, list[ns].ibv_dev->name, 1);
3248                         if (!list[ns].ifindex) {
3249                                 char ifname[IF_NAMESIZE];
3250
3251                                 /*
3252                                  * Netlink failed, it may happen with old
3253                                  * ib_core kernel driver (before 4.16).
3254                                  * We can assume there is old driver because
3255                                  * here we are processing single ports IB
3256                                  * devices. Let's try sysfs to retrieve
3257                                  * the ifindex. The method works for
3258                                  * master device only.
3259                                  */
3260                                 if (nd > 1) {
3261                                         /*
3262                                          * Multiple devices found, assume
3263                                          * representors, can not distinguish
3264                                          * master/representor and retrieve
3265                                          * ifindex via sysfs.
3266                                          */
3267                                         continue;
3268                                 }
3269                                 ret = mlx5_get_master_ifname
3270                                         (ibv_match[i]->ibdev_path, &ifname);
3271                                 if (!ret)
3272                                         list[ns].ifindex =
3273                                                 if_nametoindex(ifname);
3274                                 if (!list[ns].ifindex) {
3275                                         /*
3276                                          * No network interface index found
3277                                          * for the specified device, it means
3278                                          * there it is neither representor
3279                                          * nor master.
3280                                          */
3281                                         continue;
3282                                 }
3283                         }
3284                         ret = -1;
3285                         if (nl_route >= 0)
3286                                 ret = mlx5_nl_switch_info
3287                                                (nl_route,
3288                                                 list[ns].ifindex,
3289                                                 &list[ns].info);
3290                         if (ret || (!list[ns].info.representor &&
3291                                     !list[ns].info.master)) {
3292                                 /*
3293                                  * We failed to recognize representors with
3294                                  * Netlink, let's try to perform the task
3295                                  * with sysfs.
3296                                  */
3297                                 ret =  mlx5_sysfs_switch_info
3298                                                 (list[ns].ifindex,
3299                                                  &list[ns].info);
3300                         }
3301                         if (!ret && (list[ns].info.representor ^
3302                                      list[ns].info.master)) {
3303                                 ns++;
3304                         } else if ((nd == 1) &&
3305                                    !list[ns].info.representor &&
3306                                    !list[ns].info.master) {
3307                                 /*
3308                                  * Single IB device with
3309                                  * one physical port and
3310                                  * attached network device.
3311                                  * May be SRIOV is not enabled
3312                                  * or there is no representors.
3313                                  */
3314                                 DRV_LOG(INFO, "no E-Switch support detected");
3315                                 ns++;
3316                                 break;
3317                         }
3318                 }
3319                 if (!ns) {
3320                         DRV_LOG(ERR,
3321                                 "unable to recognize master/representors"
3322                                 " on the multiple IB devices");
3323                         rte_errno = ENOENT;
3324                         ret = -rte_errno;
3325                         goto exit;
3326                 }
3327         }
3328         MLX5_ASSERT(ns);
3329         /*
3330          * Sort list to probe devices in natural order for users convenience
3331          * (i.e. master first, then representors from lowest to highest ID).
3332          */
3333         qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
3334         /* Default configuration. */
3335         dev_config = (struct mlx5_dev_config){
3336                 .hw_padding = 0,
3337                 .mps = MLX5_ARG_UNSET,
3338                 .dbnc = MLX5_ARG_UNSET,
3339                 .rx_vec_en = 1,
3340                 .txq_inline_max = MLX5_ARG_UNSET,
3341                 .txq_inline_min = MLX5_ARG_UNSET,
3342                 .txq_inline_mpw = MLX5_ARG_UNSET,
3343                 .txqs_inline = MLX5_ARG_UNSET,
3344                 .vf_nl_en = 1,
3345                 .mr_ext_memseg_en = 1,
3346                 .mprq = {
3347                         .enabled = 0, /* Disabled by default. */
3348                         .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
3349                         .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
3350                         .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
3351                 },
3352                 .dv_esw_en = 1,
3353                 .dv_flow_en = 1,
3354         };
3355         /* Device specific configuration. */
3356         switch (pci_dev->id.device_id) {
3357         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
3358         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
3359         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
3360         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
3361         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
3362         case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
3363         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
3364                 dev_config.vf = 1;
3365                 break;
3366         default:
3367                 break;
3368         }
3369         for (i = 0; i != ns; ++i) {
3370                 uint32_t restore;
3371
3372                 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
3373                                                  &list[i],
3374                                                  dev_config);
3375                 if (!list[i].eth_dev) {
3376                         if (rte_errno != EBUSY && rte_errno != EEXIST)
3377                                 break;
3378                         /* Device is disabled or already spawned. Ignore it. */
3379                         continue;
3380                 }
3381                 restore = list[i].eth_dev->data->dev_flags;
3382                 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
3383                 /* Restore non-PCI flags cleared by the above call. */
3384                 list[i].eth_dev->data->dev_flags |= restore;
3385                 mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev);
3386                 rte_eth_dev_probing_finish(list[i].eth_dev);
3387         }
3388         if (i != ns) {
3389                 DRV_LOG(ERR,
3390                         "probe of PCI device " PCI_PRI_FMT " aborted after"
3391                         " encountering an error: %s",
3392                         pci_dev->addr.domain, pci_dev->addr.bus,
3393                         pci_dev->addr.devid, pci_dev->addr.function,
3394                         strerror(rte_errno));
3395                 ret = -rte_errno;
3396                 /* Roll back. */
3397                 while (i--) {
3398                         if (!list[i].eth_dev)
3399                                 continue;
3400                         mlx5_dev_close(list[i].eth_dev);
3401                         /* mac_addrs must not be freed because in dev_private */
3402                         list[i].eth_dev->data->mac_addrs = NULL;
3403                         claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
3404                 }
3405                 /* Restore original error. */
3406                 rte_errno = -ret;
3407         } else {
3408                 ret = 0;
3409         }
3410 exit:
3411         /*
3412          * Do the routine cleanup:
3413          * - close opened Netlink sockets
3414          * - free allocated spawn data array
3415          * - free the Infiniband device list
3416          */
3417         if (nl_rdma >= 0)
3418                 close(nl_rdma);
3419         if (nl_route >= 0)
3420                 close(nl_route);
3421         if (list)
3422                 rte_free(list);
3423         MLX5_ASSERT(ibv_list);
3424         mlx5_glue->free_device_list(ibv_list);
3425         return ret;
3426 }
3427
3428 /**
3429  * Look for the ethernet device belonging to mlx5 driver.
3430  *
3431  * @param[in] port_id
3432  *   port_id to start looking for device.
3433  * @param[in] pci_dev
3434  *   Pointer to the hint PCI device. When device is being probed
3435  *   the its siblings (master and preceding representors might
3436  *   not have assigned driver yet (because the mlx5_pci_probe()
3437  *   is not completed yet, for this case match on hint PCI
3438  *   device may be used to detect sibling device.
3439  *
3440  * @return
3441  *   port_id of found device, RTE_MAX_ETHPORT if not found.
3442  */
3443 uint16_t
3444 mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev)
3445 {
3446         while (port_id < RTE_MAX_ETHPORTS) {
3447                 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3448
3449                 if (dev->state != RTE_ETH_DEV_UNUSED &&
3450                     dev->device &&
3451                     (dev->device == &pci_dev->device ||
3452                      (dev->device->driver &&
3453                      dev->device->driver->name &&
3454                      !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME))))
3455                         break;
3456                 port_id++;
3457         }
3458         if (port_id >= RTE_MAX_ETHPORTS)
3459                 return RTE_MAX_ETHPORTS;
3460         return port_id;
3461 }
3462
3463 /**
3464  * DPDK callback to remove a PCI device.
3465  *
3466  * This function removes all Ethernet devices belong to a given PCI device.
3467  *
3468  * @param[in] pci_dev
3469  *   Pointer to the PCI device.
3470  *
3471  * @return
3472  *   0 on success, the function cannot fail.
3473  */
3474 static int
3475 mlx5_pci_remove(struct rte_pci_device *pci_dev)
3476 {
3477         uint16_t port_id;
3478
3479         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
3480                 rte_eth_dev_close(port_id);
3481         return 0;
3482 }
3483
3484 static const struct rte_pci_id mlx5_pci_id_map[] = {
3485         {
3486                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3487                                PCI_DEVICE_ID_MELLANOX_CONNECTX4)
3488         },
3489         {
3490                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3491                                PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
3492         },
3493         {
3494                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3495                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
3496         },
3497         {
3498                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3499                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
3500         },
3501         {
3502                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3503                                PCI_DEVICE_ID_MELLANOX_CONNECTX5)
3504         },
3505         {
3506                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3507                                PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
3508         },
3509         {
3510                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3511                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
3512         },
3513         {
3514                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3515                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
3516         },
3517         {
3518                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3519                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
3520         },
3521         {
3522                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3523                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
3524         },
3525         {
3526                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3527                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
3528         },
3529         {
3530                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3531                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
3532         },
3533         {
3534                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3535                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
3536         },
3537         {
3538                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3539                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
3540         },
3541         {
3542                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3543                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
3544         },
3545         {
3546                 .vendor_id = 0
3547         }
3548 };
3549
3550 static struct rte_pci_driver mlx5_driver = {
3551         .driver = {
3552                 .name = MLX5_DRIVER_NAME
3553         },
3554         .id_table = mlx5_pci_id_map,
3555         .probe = mlx5_pci_probe,
3556         .remove = mlx5_pci_remove,
3557         .dma_map = mlx5_dma_map,
3558         .dma_unmap = mlx5_dma_unmap,
3559         .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
3560                      RTE_PCI_DRV_PROBE_AGAIN,
3561 };
3562
3563 /**
3564  * Driver initialization routine.
3565  */
3566 RTE_INIT(rte_mlx5_pmd_init)
3567 {
3568         /* Initialize driver log type. */
3569         mlx5_logtype = rte_log_register("pmd.net.mlx5");
3570         if (mlx5_logtype >= 0)
3571                 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
3572
3573         /* Build the static tables for Verbs conversion. */
3574         mlx5_set_ptype_table();
3575         mlx5_set_cksum_table();
3576         mlx5_set_swp_types_table();
3577         if (mlx5_glue)
3578                 rte_pci_register(&mlx5_driver);
3579 }
3580
3581 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
3582 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
3583 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");