net/mlx5: reduce Netlink commands dependencies
[dpdk.git] / drivers / net / mlx5 / mlx5.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <errno.h>
13 #include <net/if.h>
14 #include <sys/mman.h>
15 #include <linux/rtnetlink.h>
16
17 /* Verbs header. */
18 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
19 #ifdef PEDANTIC
20 #pragma GCC diagnostic ignored "-Wpedantic"
21 #endif
22 #include <infiniband/verbs.h>
23 #ifdef PEDANTIC
24 #pragma GCC diagnostic error "-Wpedantic"
25 #endif
26
27 #include <rte_malloc.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_ethdev_pci.h>
30 #include <rte_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_config.h>
34 #include <rte_kvargs.h>
35 #include <rte_rwlock.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_alarm.h>
39
40 #include <mlx5_glue.h>
41 #include <mlx5_devx_cmds.h>
42 #include <mlx5_common.h>
43
44 #include "mlx5_defs.h"
45 #include "mlx5.h"
46 #include "mlx5_utils.h"
47 #include "mlx5_rxtx.h"
48 #include "mlx5_autoconf.h"
49 #include "mlx5_mr.h"
50 #include "mlx5_flow.h"
51
52 /* Device parameter to enable RX completion queue compression. */
53 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
54
55 /* Device parameter to enable RX completion entry padding to 128B. */
56 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
57
58 /* Device parameter to enable padding Rx packet to cacheline size. */
59 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
60
61 /* Device parameter to enable Multi-Packet Rx queue. */
62 #define MLX5_RX_MPRQ_EN "mprq_en"
63
64 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
65 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
66
67 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
68 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
69
70 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
71 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
72
73 /* Device parameter to configure inline send. Deprecated, ignored.*/
74 #define MLX5_TXQ_INLINE "txq_inline"
75
76 /* Device parameter to limit packet size to inline with ordinary SEND. */
77 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
78
79 /* Device parameter to configure minimal data size to inline. */
80 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
81
82 /* Device parameter to limit packet size to inline with Enhanced MPW. */
83 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
84
85 /*
86  * Device parameter to configure the number of TX queues threshold for
87  * enabling inline send.
88  */
89 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
90
91 /*
92  * Device parameter to configure the number of TX queues threshold for
93  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
94  */
95 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
96
97 /* Device parameter to enable multi-packet send WQEs. */
98 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
99
100 /*
101  * Device parameter to force doorbell register mapping
102  * to non-cahed region eliminating the extra write memory barrier.
103  */
104 #define MLX5_TX_DB_NC "tx_db_nc"
105
106 /*
107  * Device parameter to include 2 dsegs in the title WQEBB.
108  * Deprecated, ignored.
109  */
110 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
111
112 /*
113  * Device parameter to limit the size of inlining packet.
114  * Deprecated, ignored.
115  */
116 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
117
118 /*
119  * Device parameter to enable hardware Tx vector.
120  * Deprecated, ignored (no vectorized Tx routines anymore).
121  */
122 #define MLX5_TX_VEC_EN "tx_vec_en"
123
124 /* Device parameter to enable hardware Rx vector. */
125 #define MLX5_RX_VEC_EN "rx_vec_en"
126
127 /* Allow L3 VXLAN flow creation. */
128 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
129
130 /* Activate DV E-Switch flow steering. */
131 #define MLX5_DV_ESW_EN "dv_esw_en"
132
133 /* Activate DV flow steering. */
134 #define MLX5_DV_FLOW_EN "dv_flow_en"
135
136 /* Enable extensive flow metadata support. */
137 #define MLX5_DV_XMETA_EN "dv_xmeta_en"
138
139 /* Activate Netlink support in VF mode. */
140 #define MLX5_VF_NL_EN "vf_nl_en"
141
142 /* Enable extending memsegs when creating a MR. */
143 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
144
145 /* Select port representors to instantiate. */
146 #define MLX5_REPRESENTOR "representor"
147
148 /* Device parameter to configure the maximum number of dump files per queue. */
149 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
150
151 /* Configure timeout of LRO session (in microseconds). */
152 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
153
154 #ifndef HAVE_IBV_MLX5_MOD_MPW
155 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
156 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
157 #endif
158
159 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
160 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
161 #endif
162
163 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
164
165 /* Shared memory between primary and secondary processes. */
166 struct mlx5_shared_data *mlx5_shared_data;
167
168 /* Spinlock for mlx5_shared_data allocation. */
169 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
170
171 /* Process local data for secondary processes. */
172 static struct mlx5_local_data mlx5_local_data;
173
174 /** Driver-specific log messages type. */
175 int mlx5_logtype;
176
177 /** Data associated with devices to spawn. */
178 struct mlx5_dev_spawn_data {
179         uint32_t ifindex; /**< Network interface index. */
180         uint32_t max_port; /**< IB device maximal port index. */
181         uint32_t ibv_port; /**< IB device physical port index. */
182         int pf_bond; /**< bonding device PF index. < 0 - no bonding */
183         struct mlx5_switch_info info; /**< Switch information. */
184         struct ibv_device *ibv_dev; /**< Associated IB device. */
185         struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
186         struct rte_pci_device *pci_dev; /**< Backend PCI device. */
187 };
188
189 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
190 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
191
192 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
193 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
194
195 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
196 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
197
198 /**
199  * Allocate ID pool structure.
200  *
201  * @param[in] max_id
202  *   The maximum id can be allocated from the pool.
203  *
204  * @return
205  *   Pointer to pool object, NULL value otherwise.
206  */
207 struct mlx5_flow_id_pool *
208 mlx5_flow_id_pool_alloc(uint32_t max_id)
209 {
210         struct mlx5_flow_id_pool *pool;
211         void *mem;
212
213         pool = rte_zmalloc("id pool allocation", sizeof(*pool),
214                            RTE_CACHE_LINE_SIZE);
215         if (!pool) {
216                 DRV_LOG(ERR, "can't allocate id pool");
217                 rte_errno  = ENOMEM;
218                 return NULL;
219         }
220         mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
221                           RTE_CACHE_LINE_SIZE);
222         if (!mem) {
223                 DRV_LOG(ERR, "can't allocate mem for id pool");
224                 rte_errno  = ENOMEM;
225                 goto error;
226         }
227         pool->free_arr = mem;
228         pool->curr = pool->free_arr;
229         pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE;
230         pool->base_index = 0;
231         pool->max_id = max_id;
232         return pool;
233 error:
234         rte_free(pool);
235         return NULL;
236 }
237
238 /**
239  * Release ID pool structure.
240  *
241  * @param[in] pool
242  *   Pointer to flow id pool object to free.
243  */
244 void
245 mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
246 {
247         rte_free(pool->free_arr);
248         rte_free(pool);
249 }
250
251 /**
252  * Generate ID.
253  *
254  * @param[in] pool
255  *   Pointer to flow id pool.
256  * @param[out] id
257  *   The generated ID.
258  *
259  * @return
260  *   0 on success, error value otherwise.
261  */
262 uint32_t
263 mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id)
264 {
265         if (pool->curr == pool->free_arr) {
266                 if (pool->base_index == pool->max_id) {
267                         rte_errno  = ENOMEM;
268                         DRV_LOG(ERR, "no free id");
269                         return -rte_errno;
270                 }
271                 *id = ++pool->base_index;
272                 return 0;
273         }
274         *id = *(--pool->curr);
275         return 0;
276 }
277
278 /**
279  * Release ID.
280  *
281  * @param[in] pool
282  *   Pointer to flow id pool.
283  * @param[out] id
284  *   The generated ID.
285  *
286  * @return
287  *   0 on success, error value otherwise.
288  */
289 uint32_t
290 mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id)
291 {
292         uint32_t size;
293         uint32_t size2;
294         void *mem;
295
296         if (pool->curr == pool->last) {
297                 size = pool->curr - pool->free_arr;
298                 size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
299                 assert(size2 > size);
300                 mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
301                 if (!mem) {
302                         DRV_LOG(ERR, "can't allocate mem for id pool");
303                         rte_errno  = ENOMEM;
304                         return -rte_errno;
305                 }
306                 memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
307                 rte_free(pool->free_arr);
308                 pool->free_arr = mem;
309                 pool->curr = pool->free_arr + size;
310                 pool->last = pool->free_arr + size2;
311         }
312         *pool->curr = id;
313         pool->curr++;
314         return 0;
315 }
316
317 /**
318  * Initialize the counters management structure.
319  *
320  * @param[in] sh
321  *   Pointer to mlx5_ibv_shared object to free
322  */
323 static void
324 mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
325 {
326         uint8_t i;
327
328         TAILQ_INIT(&sh->cmng.flow_counters);
329         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
330                 TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
331 }
332
333 /**
334  * Destroy all the resources allocated for a counter memory management.
335  *
336  * @param[in] mng
337  *   Pointer to the memory management structure.
338  */
339 static void
340 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
341 {
342         uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
343
344         LIST_REMOVE(mng, next);
345         claim_zero(mlx5_devx_cmd_destroy(mng->dm));
346         claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
347         rte_free(mem);
348 }
349
350 /**
351  * Close and release all the resources of the counters management.
352  *
353  * @param[in] sh
354  *   Pointer to mlx5_ibv_shared object to free.
355  */
356 static void
357 mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
358 {
359         struct mlx5_counter_stats_mem_mng *mng;
360         uint8_t i;
361         int j;
362         int retries = 1024;
363
364         rte_errno = 0;
365         while (--retries) {
366                 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
367                 if (rte_errno != EINPROGRESS)
368                         break;
369                 rte_pause();
370         }
371         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
372                 struct mlx5_flow_counter_pool *pool;
373                 uint32_t batch = !!(i % 2);
374
375                 if (!sh->cmng.ccont[i].pools)
376                         continue;
377                 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
378                 while (pool) {
379                         if (batch) {
380                                 if (pool->min_dcs)
381                                         claim_zero
382                                         (mlx5_devx_cmd_destroy(pool->min_dcs));
383                         }
384                         for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
385                                 if (pool->counters_raw[j].action)
386                                         claim_zero
387                                         (mlx5_glue->destroy_flow_action
388                                                (pool->counters_raw[j].action));
389                                 if (!batch && pool->counters_raw[j].dcs)
390                                         claim_zero(mlx5_devx_cmd_destroy
391                                                   (pool->counters_raw[j].dcs));
392                         }
393                         TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
394                                      next);
395                         rte_free(pool);
396                         pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
397                 }
398                 rte_free(sh->cmng.ccont[i].pools);
399         }
400         mng = LIST_FIRST(&sh->cmng.mem_mngs);
401         while (mng) {
402                 mlx5_flow_destroy_counter_stat_mem_mng(mng);
403                 mng = LIST_FIRST(&sh->cmng.mem_mngs);
404         }
405         memset(&sh->cmng, 0, sizeof(sh->cmng));
406 }
407
408 /**
409  * Extract pdn of PD object using DV API.
410  *
411  * @param[in] pd
412  *   Pointer to the verbs PD object.
413  * @param[out] pdn
414  *   Pointer to the PD object number variable.
415  *
416  * @return
417  *   0 on success, error value otherwise.
418  */
419 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
420 static int
421 mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused)
422 {
423         struct mlx5dv_obj obj;
424         struct mlx5dv_pd pd_info;
425         int ret = 0;
426
427         obj.pd.in = pd;
428         obj.pd.out = &pd_info;
429         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
430         if (ret) {
431                 DRV_LOG(DEBUG, "Fail to get PD object info");
432                 return ret;
433         }
434         *pdn = pd_info.pdn;
435         return 0;
436 }
437 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
438
439 static int
440 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
441 {
442         char *env;
443         int value;
444
445         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
446         /* Get environment variable to store. */
447         env = getenv(MLX5_SHUT_UP_BF);
448         value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
449         if (config->dbnc == MLX5_ARG_UNSET)
450                 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
451         else
452                 setenv(MLX5_SHUT_UP_BF,
453                        config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
454         return value;
455 }
456
457 static void
458 mlx5_restore_doorbell_mapping_env(int value)
459 {
460         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
461         /* Restore the original environment variable state. */
462         if (value == MLX5_ARG_UNSET)
463                 unsetenv(MLX5_SHUT_UP_BF);
464         else
465                 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
466 }
467
468 /**
469  * Allocate shared IB device context. If there is multiport device the
470  * master and representors will share this context, if there is single
471  * port dedicated IB device, the context will be used by only given
472  * port due to unification.
473  *
474  * Routine first searches the context for the specified IB device name,
475  * if found the shared context assumed and reference counter is incremented.
476  * If no context found the new one is created and initialized with specified
477  * IB device context and parameters.
478  *
479  * @param[in] spawn
480  *   Pointer to the IB device attributes (name, port, etc).
481  * @param[in] config
482  *   Pointer to device configuration structure.
483  *
484  * @return
485  *   Pointer to mlx5_ibv_shared object on success,
486  *   otherwise NULL and rte_errno is set.
487  */
488 static struct mlx5_ibv_shared *
489 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
490                         const struct mlx5_dev_config *config)
491 {
492         struct mlx5_ibv_shared *sh;
493         int dbmap_env;
494         int err = 0;
495         uint32_t i;
496 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
497         struct mlx5_devx_tis_attr tis_attr = { 0 };
498 #endif
499
500         assert(spawn);
501         /* Secondary process should not create the shared context. */
502         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
503         pthread_mutex_lock(&mlx5_ibv_list_mutex);
504         /* Search for IB context by device name. */
505         LIST_FOREACH(sh, &mlx5_ibv_list, next) {
506                 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
507                         sh->refcnt++;
508                         goto exit;
509                 }
510         }
511         /* No device found, we have to create new shared context. */
512         assert(spawn->max_port);
513         sh = rte_zmalloc("ethdev shared ib context",
514                          sizeof(struct mlx5_ibv_shared) +
515                          spawn->max_port *
516                          sizeof(struct mlx5_ibv_shared_port),
517                          RTE_CACHE_LINE_SIZE);
518         if (!sh) {
519                 DRV_LOG(ERR, "shared context allocation failure");
520                 rte_errno  = ENOMEM;
521                 goto exit;
522         }
523         /*
524          * Configure environment variable "MLX5_BF_SHUT_UP"
525          * before the device creation. The rdma_core library
526          * checks the variable at device creation and
527          * stores the result internally.
528          */
529         dbmap_env = mlx5_config_doorbell_mapping_env(config);
530         /* Try to open IB device with DV first, then usual Verbs. */
531         errno = 0;
532         sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
533         if (sh->ctx) {
534                 sh->devx = 1;
535                 DRV_LOG(DEBUG, "DevX is supported");
536                 /* The device is created, no need for environment. */
537                 mlx5_restore_doorbell_mapping_env(dbmap_env);
538         } else {
539                 /* The environment variable is still configured. */
540                 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
541                 err = errno ? errno : ENODEV;
542                 /*
543                  * The environment variable is not needed anymore,
544                  * all device creation attempts are completed.
545                  */
546                 mlx5_restore_doorbell_mapping_env(dbmap_env);
547                 if (!sh->ctx)
548                         goto error;
549                 DRV_LOG(DEBUG, "DevX is NOT supported");
550         }
551         err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
552         if (err) {
553                 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
554                 goto error;
555         }
556         sh->refcnt = 1;
557         sh->max_port = spawn->max_port;
558         strncpy(sh->ibdev_name, sh->ctx->device->name,
559                 sizeof(sh->ibdev_name));
560         strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
561                 sizeof(sh->ibdev_path));
562         pthread_mutex_init(&sh->intr_mutex, NULL);
563         /*
564          * Setting port_id to max unallowed value means
565          * there is no interrupt subhandler installed for
566          * the given port index i.
567          */
568         for (i = 0; i < sh->max_port; i++) {
569                 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
570                 sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
571         }
572         sh->pd = mlx5_glue->alloc_pd(sh->ctx);
573         if (sh->pd == NULL) {
574                 DRV_LOG(ERR, "PD allocation failure");
575                 err = ENOMEM;
576                 goto error;
577         }
578 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
579         if (sh->devx) {
580                 err = mlx5_get_pdn(sh->pd, &sh->pdn);
581                 if (err) {
582                         DRV_LOG(ERR, "Fail to extract pdn from PD");
583                         goto error;
584                 }
585                 sh->td = mlx5_devx_cmd_create_td(sh->ctx);
586                 if (!sh->td) {
587                         DRV_LOG(ERR, "TD allocation failure");
588                         err = ENOMEM;
589                         goto error;
590                 }
591                 tis_attr.transport_domain = sh->td->id;
592                 sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
593                 if (!sh->tis) {
594                         DRV_LOG(ERR, "TIS allocation failure");
595                         err = ENOMEM;
596                         goto error;
597                 }
598         }
599         sh->flow_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX);
600         if (!sh->flow_id_pool) {
601                 DRV_LOG(ERR, "can't create flow id pool");
602                 err = ENOMEM;
603                 goto error;
604         }
605 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
606         /*
607          * Once the device is added to the list of memory event
608          * callback, its global MR cache table cannot be expanded
609          * on the fly because of deadlock. If it overflows, lookup
610          * should be done by searching MR list linearly, which is slow.
611          *
612          * At this point the device is not added to the memory
613          * event list yet, context is just being created.
614          */
615         err = mlx5_mr_btree_init(&sh->mr.cache,
616                                  MLX5_MR_BTREE_CACHE_N * 2,
617                                  spawn->pci_dev->device.numa_node);
618         if (err) {
619                 err = rte_errno;
620                 goto error;
621         }
622         mlx5_flow_counters_mng_init(sh);
623         /* Add device to memory callback list. */
624         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
625         LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
626                          sh, mem_event_cb);
627         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
628         /* Add context to the global device list. */
629         LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
630 exit:
631         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
632         return sh;
633 error:
634         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
635         assert(sh);
636         if (sh->tis)
637                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
638         if (sh->td)
639                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
640         if (sh->pd)
641                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
642         if (sh->ctx)
643                 claim_zero(mlx5_glue->close_device(sh->ctx));
644         if (sh->flow_id_pool)
645                 mlx5_flow_id_pool_release(sh->flow_id_pool);
646         rte_free(sh);
647         assert(err > 0);
648         rte_errno = err;
649         return NULL;
650 }
651
652 /**
653  * Free shared IB device context. Decrement counter and if zero free
654  * all allocated resources and close handles.
655  *
656  * @param[in] sh
657  *   Pointer to mlx5_ibv_shared object to free
658  */
659 static void
660 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
661 {
662         pthread_mutex_lock(&mlx5_ibv_list_mutex);
663 #ifndef NDEBUG
664         /* Check the object presence in the list. */
665         struct mlx5_ibv_shared *lctx;
666
667         LIST_FOREACH(lctx, &mlx5_ibv_list, next)
668                 if (lctx == sh)
669                         break;
670         assert(lctx);
671         if (lctx != sh) {
672                 DRV_LOG(ERR, "Freeing non-existing shared IB context");
673                 goto exit;
674         }
675 #endif
676         assert(sh);
677         assert(sh->refcnt);
678         /* Secondary process should not free the shared context. */
679         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
680         if (--sh->refcnt)
681                 goto exit;
682         /* Release created Memory Regions. */
683         mlx5_mr_release(sh);
684         /* Remove from memory callback device list. */
685         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
686         LIST_REMOVE(sh, mem_event_cb);
687         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
688         /* Remove context from the global device list. */
689         LIST_REMOVE(sh, next);
690         /*
691          *  Ensure there is no async event handler installed.
692          *  Only primary process handles async device events.
693          **/
694         mlx5_flow_counters_mng_close(sh);
695         assert(!sh->intr_cnt);
696         if (sh->intr_cnt)
697                 mlx5_intr_callback_unregister
698                         (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
699 #ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT
700         if (sh->devx_intr_cnt) {
701                 if (sh->intr_handle_devx.fd)
702                         rte_intr_callback_unregister(&sh->intr_handle_devx,
703                                           mlx5_dev_interrupt_handler_devx, sh);
704                 if (sh->devx_comp)
705                         mlx5dv_devx_destroy_cmd_comp(sh->devx_comp);
706         }
707 #endif
708         pthread_mutex_destroy(&sh->intr_mutex);
709         if (sh->pd)
710                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
711         if (sh->tis)
712                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
713         if (sh->td)
714                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
715         if (sh->ctx)
716                 claim_zero(mlx5_glue->close_device(sh->ctx));
717         if (sh->flow_id_pool)
718                 mlx5_flow_id_pool_release(sh->flow_id_pool);
719         rte_free(sh);
720 exit:
721         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
722 }
723
724 /**
725  * Destroy table hash list and all the root entries per domain.
726  *
727  * @param[in] priv
728  *   Pointer to the private device data structure.
729  */
730 static void
731 mlx5_free_table_hash_list(struct mlx5_priv *priv)
732 {
733         struct mlx5_ibv_shared *sh = priv->sh;
734         struct mlx5_flow_tbl_data_entry *tbl_data;
735         union mlx5_flow_tbl_key table_key = {
736                 {
737                         .table_id = 0,
738                         .reserved = 0,
739                         .domain = 0,
740                         .direction = 0,
741                 }
742         };
743         struct mlx5_hlist_entry *pos;
744
745         if (!sh->flow_tbls)
746                 return;
747         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
748         if (pos) {
749                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
750                                         entry);
751                 assert(tbl_data);
752                 mlx5_hlist_remove(sh->flow_tbls, pos);
753                 rte_free(tbl_data);
754         }
755         table_key.direction = 1;
756         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
757         if (pos) {
758                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
759                                         entry);
760                 assert(tbl_data);
761                 mlx5_hlist_remove(sh->flow_tbls, pos);
762                 rte_free(tbl_data);
763         }
764         table_key.direction = 0;
765         table_key.domain = 1;
766         pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
767         if (pos) {
768                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
769                                         entry);
770                 assert(tbl_data);
771                 mlx5_hlist_remove(sh->flow_tbls, pos);
772                 rte_free(tbl_data);
773         }
774         mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
775 }
776
777 /**
778  * Initialize flow table hash list and create the root tables entry
779  * for each domain.
780  *
781  * @param[in] priv
782  *   Pointer to the private device data structure.
783  *
784  * @return
785  *   Zero on success, positive error code otherwise.
786  */
787 static int
788 mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
789 {
790         struct mlx5_ibv_shared *sh = priv->sh;
791         char s[MLX5_HLIST_NAMESIZE];
792         int err = 0;
793
794         assert(sh);
795         snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
796         sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
797         if (!sh->flow_tbls) {
798                 DRV_LOG(ERR, "flow tables with hash creation failed.\n");
799                 err = ENOMEM;
800                 return err;
801         }
802 #ifndef HAVE_MLX5DV_DR
803         /*
804          * In case we have not DR support, the zero tables should be created
805          * because DV expect to see them even if they cannot be created by
806          * RDMA-CORE.
807          */
808         union mlx5_flow_tbl_key table_key = {
809                 {
810                         .table_id = 0,
811                         .reserved = 0,
812                         .domain = 0,
813                         .direction = 0,
814                 }
815         };
816         struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
817                                                           sizeof(*tbl_data), 0);
818
819         if (!tbl_data) {
820                 err = ENOMEM;
821                 goto error;
822         }
823         tbl_data->entry.key = table_key.v64;
824         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
825         if (err)
826                 goto error;
827         rte_atomic32_init(&tbl_data->tbl.refcnt);
828         rte_atomic32_inc(&tbl_data->tbl.refcnt);
829         table_key.direction = 1;
830         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
831         if (!tbl_data) {
832                 err = ENOMEM;
833                 goto error;
834         }
835         tbl_data->entry.key = table_key.v64;
836         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
837         if (err)
838                 goto error;
839         rte_atomic32_init(&tbl_data->tbl.refcnt);
840         rte_atomic32_inc(&tbl_data->tbl.refcnt);
841         table_key.direction = 0;
842         table_key.domain = 1;
843         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
844         if (!tbl_data) {
845                 err = ENOMEM;
846                 goto error;
847         }
848         tbl_data->entry.key = table_key.v64;
849         err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
850         if (err)
851                 goto error;
852         rte_atomic32_init(&tbl_data->tbl.refcnt);
853         rte_atomic32_inc(&tbl_data->tbl.refcnt);
854         return err;
855 error:
856         mlx5_free_table_hash_list(priv);
857 #endif /* HAVE_MLX5DV_DR */
858         return err;
859 }
860
861 /**
862  * Initialize DR related data within private structure.
863  * Routine checks the reference counter and does actual
864  * resources creation/initialization only if counter is zero.
865  *
866  * @param[in] priv
867  *   Pointer to the private device data structure.
868  *
869  * @return
870  *   Zero on success, positive error code otherwise.
871  */
872 static int
873 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
874 {
875         struct mlx5_ibv_shared *sh = priv->sh;
876         char s[MLX5_HLIST_NAMESIZE];
877         int err = 0;
878
879         if (!sh->flow_tbls)
880                 err = mlx5_alloc_table_hash_list(priv);
881         else
882                 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
883                         (void *)sh->flow_tbls);
884         if (err)
885                 return err;
886         /* Create tags hash list table. */
887         snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
888         sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
889         if (!sh->tag_table) {
890                 DRV_LOG(ERR, "tags with hash creation failed.\n");
891                 err = ENOMEM;
892                 goto error;
893         }
894 #ifdef HAVE_MLX5DV_DR
895         void *domain;
896
897         if (sh->dv_refcnt) {
898                 /* Shared DV/DR structures is already initialized. */
899                 sh->dv_refcnt++;
900                 priv->dr_shared = 1;
901                 return 0;
902         }
903         /* Reference counter is zero, we should initialize structures. */
904         domain = mlx5_glue->dr_create_domain(sh->ctx,
905                                              MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
906         if (!domain) {
907                 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
908                 err = errno;
909                 goto error;
910         }
911         sh->rx_domain = domain;
912         domain = mlx5_glue->dr_create_domain(sh->ctx,
913                                              MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
914         if (!domain) {
915                 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
916                 err = errno;
917                 goto error;
918         }
919         pthread_mutex_init(&sh->dv_mutex, NULL);
920         sh->tx_domain = domain;
921 #ifdef HAVE_MLX5DV_DR_ESWITCH
922         if (priv->config.dv_esw_en) {
923                 domain  = mlx5_glue->dr_create_domain
924                         (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
925                 if (!domain) {
926                         DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
927                         err = errno;
928                         goto error;
929                 }
930                 sh->fdb_domain = domain;
931                 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
932         }
933 #endif
934         sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
935 #endif /* HAVE_MLX5DV_DR */
936         sh->dv_refcnt++;
937         priv->dr_shared = 1;
938         return 0;
939 error:
940         /* Rollback the created objects. */
941         if (sh->rx_domain) {
942                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
943                 sh->rx_domain = NULL;
944         }
945         if (sh->tx_domain) {
946                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
947                 sh->tx_domain = NULL;
948         }
949         if (sh->fdb_domain) {
950                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
951                 sh->fdb_domain = NULL;
952         }
953         if (sh->esw_drop_action) {
954                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
955                 sh->esw_drop_action = NULL;
956         }
957         if (sh->pop_vlan_action) {
958                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
959                 sh->pop_vlan_action = NULL;
960         }
961         if (sh->tag_table) {
962                 /* tags should be destroyed with flow before. */
963                 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
964                 sh->tag_table = NULL;
965         }
966         mlx5_free_table_hash_list(priv);
967         return err;
968 }
969
970 /**
971  * Destroy DR related data within private structure.
972  *
973  * @param[in] priv
974  *   Pointer to the private device data structure.
975  */
976 static void
977 mlx5_free_shared_dr(struct mlx5_priv *priv)
978 {
979         struct mlx5_ibv_shared *sh;
980
981         if (!priv->dr_shared)
982                 return;
983         priv->dr_shared = 0;
984         sh = priv->sh;
985         assert(sh);
986 #ifdef HAVE_MLX5DV_DR
987         assert(sh->dv_refcnt);
988         if (sh->dv_refcnt && --sh->dv_refcnt)
989                 return;
990         if (sh->rx_domain) {
991                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
992                 sh->rx_domain = NULL;
993         }
994         if (sh->tx_domain) {
995                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
996                 sh->tx_domain = NULL;
997         }
998 #ifdef HAVE_MLX5DV_DR_ESWITCH
999         if (sh->fdb_domain) {
1000                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
1001                 sh->fdb_domain = NULL;
1002         }
1003         if (sh->esw_drop_action) {
1004                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
1005                 sh->esw_drop_action = NULL;
1006         }
1007 #endif
1008         if (sh->pop_vlan_action) {
1009                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
1010                 sh->pop_vlan_action = NULL;
1011         }
1012         pthread_mutex_destroy(&sh->dv_mutex);
1013 #endif /* HAVE_MLX5DV_DR */
1014         if (sh->tag_table) {
1015                 /* tags should be destroyed with flow before. */
1016                 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
1017                 sh->tag_table = NULL;
1018         }
1019         mlx5_free_table_hash_list(priv);
1020 }
1021
1022 /**
1023  * Initialize shared data between primary and secondary process.
1024  *
1025  * A memzone is reserved by primary process and secondary processes attach to
1026  * the memzone.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 static int
1032 mlx5_init_shared_data(void)
1033 {
1034         const struct rte_memzone *mz;
1035         int ret = 0;
1036
1037         rte_spinlock_lock(&mlx5_shared_data_lock);
1038         if (mlx5_shared_data == NULL) {
1039                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1040                         /* Allocate shared memory. */
1041                         mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
1042                                                  sizeof(*mlx5_shared_data),
1043                                                  SOCKET_ID_ANY, 0);
1044                         if (mz == NULL) {
1045                                 DRV_LOG(ERR,
1046                                         "Cannot allocate mlx5 shared data");
1047                                 ret = -rte_errno;
1048                                 goto error;
1049                         }
1050                         mlx5_shared_data = mz->addr;
1051                         memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
1052                         rte_spinlock_init(&mlx5_shared_data->lock);
1053                 } else {
1054                         /* Lookup allocated shared memory. */
1055                         mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
1056                         if (mz == NULL) {
1057                                 DRV_LOG(ERR,
1058                                         "Cannot attach mlx5 shared data");
1059                                 ret = -rte_errno;
1060                                 goto error;
1061                         }
1062                         mlx5_shared_data = mz->addr;
1063                         memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
1064                 }
1065         }
1066 error:
1067         rte_spinlock_unlock(&mlx5_shared_data_lock);
1068         return ret;
1069 }
1070
1071 /**
1072  * Retrieve integer value from environment variable.
1073  *
1074  * @param[in] name
1075  *   Environment variable name.
1076  *
1077  * @return
1078  *   Integer value, 0 if the variable is not set.
1079  */
1080 int
1081 mlx5_getenv_int(const char *name)
1082 {
1083         const char *val = getenv(name);
1084
1085         if (val == NULL)
1086                 return 0;
1087         return atoi(val);
1088 }
1089
1090 /**
1091  * Verbs callback to allocate a memory. This function should allocate the space
1092  * according to the size provided residing inside a huge page.
1093  * Please note that all allocation must respect the alignment from libmlx5
1094  * (i.e. currently sysconf(_SC_PAGESIZE)).
1095  *
1096  * @param[in] size
1097  *   The size in bytes of the memory to allocate.
1098  * @param[in] data
1099  *   A pointer to the callback data.
1100  *
1101  * @return
1102  *   Allocated buffer, NULL otherwise and rte_errno is set.
1103  */
1104 static void *
1105 mlx5_alloc_verbs_buf(size_t size, void *data)
1106 {
1107         struct mlx5_priv *priv = data;
1108         void *ret;
1109         size_t alignment = sysconf(_SC_PAGESIZE);
1110         unsigned int socket = SOCKET_ID_ANY;
1111
1112         if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
1113                 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
1114
1115                 socket = ctrl->socket;
1116         } else if (priv->verbs_alloc_ctx.type ==
1117                    MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
1118                 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
1119
1120                 socket = ctrl->socket;
1121         }
1122         assert(data != NULL);
1123         ret = rte_malloc_socket(__func__, size, alignment, socket);
1124         if (!ret && size)
1125                 rte_errno = ENOMEM;
1126         return ret;
1127 }
1128
1129 /**
1130  * Verbs callback to free a memory.
1131  *
1132  * @param[in] ptr
1133  *   A pointer to the memory to free.
1134  * @param[in] data
1135  *   A pointer to the callback data.
1136  */
1137 static void
1138 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
1139 {
1140         assert(data != NULL);
1141         rte_free(ptr);
1142 }
1143
1144 /**
1145  * DPDK callback to add udp tunnel port
1146  *
1147  * @param[in] dev
1148  *   A pointer to eth_dev
1149  * @param[in] udp_tunnel
1150  *   A pointer to udp tunnel
1151  *
1152  * @return
1153  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
1154  */
1155 int
1156 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
1157                          struct rte_eth_udp_tunnel *udp_tunnel)
1158 {
1159         assert(udp_tunnel != NULL);
1160         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
1161             udp_tunnel->udp_port == 4789)
1162                 return 0;
1163         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
1164             udp_tunnel->udp_port == 4790)
1165                 return 0;
1166         return -ENOTSUP;
1167 }
1168
1169 /**
1170  * Initialize process private data structure.
1171  *
1172  * @param dev
1173  *   Pointer to Ethernet device structure.
1174  *
1175  * @return
1176  *   0 on success, a negative errno value otherwise and rte_errno is set.
1177  */
1178 int
1179 mlx5_proc_priv_init(struct rte_eth_dev *dev)
1180 {
1181         struct mlx5_priv *priv = dev->data->dev_private;
1182         struct mlx5_proc_priv *ppriv;
1183         size_t ppriv_size;
1184
1185         /*
1186          * UAR register table follows the process private structure. BlueFlame
1187          * registers for Tx queues are stored in the table.
1188          */
1189         ppriv_size =
1190                 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
1191         ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
1192                                   RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1193         if (!ppriv) {
1194                 rte_errno = ENOMEM;
1195                 return -rte_errno;
1196         }
1197         ppriv->uar_table_sz = ppriv_size;
1198         dev->process_private = ppriv;
1199         return 0;
1200 }
1201
1202 /**
1203  * Un-initialize process private data structure.
1204  *
1205  * @param dev
1206  *   Pointer to Ethernet device structure.
1207  */
1208 static void
1209 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
1210 {
1211         if (!dev->process_private)
1212                 return;
1213         rte_free(dev->process_private);
1214         dev->process_private = NULL;
1215 }
1216
1217 /**
1218  * DPDK callback to close the device.
1219  *
1220  * Destroy all queues and objects, free memory.
1221  *
1222  * @param dev
1223  *   Pointer to Ethernet device structure.
1224  */
1225 static void
1226 mlx5_dev_close(struct rte_eth_dev *dev)
1227 {
1228         struct mlx5_priv *priv = dev->data->dev_private;
1229         unsigned int i;
1230         int ret;
1231
1232         DRV_LOG(DEBUG, "port %u closing device \"%s\"",
1233                 dev->data->port_id,
1234                 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
1235         /* In case mlx5_dev_stop() has not been called. */
1236         mlx5_dev_interrupt_handler_uninstall(dev);
1237         mlx5_dev_interrupt_handler_devx_uninstall(dev);
1238         mlx5_traffic_disable(dev);
1239         mlx5_flow_flush(dev, NULL);
1240         mlx5_flow_meter_flush(dev, NULL);
1241         /* Prevent crashes when queues are still in use. */
1242         dev->rx_pkt_burst = removed_rx_burst;
1243         dev->tx_pkt_burst = removed_tx_burst;
1244         rte_wmb();
1245         /* Disable datapath on secondary process. */
1246         mlx5_mp_req_stop_rxtx(dev);
1247         if (priv->rxqs != NULL) {
1248                 /* XXX race condition if mlx5_rx_burst() is still running. */
1249                 usleep(1000);
1250                 for (i = 0; (i != priv->rxqs_n); ++i)
1251                         mlx5_rxq_release(dev, i);
1252                 priv->rxqs_n = 0;
1253                 priv->rxqs = NULL;
1254         }
1255         if (priv->txqs != NULL) {
1256                 /* XXX race condition if mlx5_tx_burst() is still running. */
1257                 usleep(1000);
1258                 for (i = 0; (i != priv->txqs_n); ++i)
1259                         mlx5_txq_release(dev, i);
1260                 priv->txqs_n = 0;
1261                 priv->txqs = NULL;
1262         }
1263         mlx5_proc_priv_uninit(dev);
1264         if (priv->mreg_cp_tbl)
1265                 mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
1266         mlx5_mprq_free_mp(dev);
1267         mlx5_free_shared_dr(priv);
1268         if (priv->rss_conf.rss_key != NULL)
1269                 rte_free(priv->rss_conf.rss_key);
1270         if (priv->reta_idx != NULL)
1271                 rte_free(priv->reta_idx);
1272         if (priv->config.vf)
1273                 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
1274                                        dev->data->mac_addrs,
1275                                        MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
1276         if (priv->nl_socket_route >= 0)
1277                 close(priv->nl_socket_route);
1278         if (priv->nl_socket_rdma >= 0)
1279                 close(priv->nl_socket_rdma);
1280         if (priv->vmwa_context)
1281                 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1282         if (priv->sh) {
1283                 /*
1284                  * Free the shared context in last turn, because the cleanup
1285                  * routines above may use some shared fields, like
1286                  * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
1287                  * ifindex if Netlink fails.
1288                  */
1289                 mlx5_free_shared_ibctx(priv->sh);
1290                 priv->sh = NULL;
1291         }
1292         ret = mlx5_hrxq_verify(dev);
1293         if (ret)
1294                 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
1295                         dev->data->port_id);
1296         ret = mlx5_ind_table_obj_verify(dev);
1297         if (ret)
1298                 DRV_LOG(WARNING, "port %u some indirection table still remain",
1299                         dev->data->port_id);
1300         ret = mlx5_rxq_obj_verify(dev);
1301         if (ret)
1302                 DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
1303                         dev->data->port_id);
1304         ret = mlx5_rxq_verify(dev);
1305         if (ret)
1306                 DRV_LOG(WARNING, "port %u some Rx queues still remain",
1307                         dev->data->port_id);
1308         ret = mlx5_txq_obj_verify(dev);
1309         if (ret)
1310                 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
1311                         dev->data->port_id);
1312         ret = mlx5_txq_verify(dev);
1313         if (ret)
1314                 DRV_LOG(WARNING, "port %u some Tx queues still remain",
1315                         dev->data->port_id);
1316         ret = mlx5_flow_verify(dev);
1317         if (ret)
1318                 DRV_LOG(WARNING, "port %u some flows still remain",
1319                         dev->data->port_id);
1320         if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1321                 unsigned int c = 0;
1322                 uint16_t port_id;
1323
1324                 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1325                         struct mlx5_priv *opriv =
1326                                 rte_eth_devices[port_id].data->dev_private;
1327
1328                         if (!opriv ||
1329                             opriv->domain_id != priv->domain_id ||
1330                             &rte_eth_devices[port_id] == dev)
1331                                 continue;
1332                         ++c;
1333                         break;
1334                 }
1335                 if (!c)
1336                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1337         }
1338         memset(priv, 0, sizeof(*priv));
1339         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1340         /*
1341          * Reset mac_addrs to NULL such that it is not freed as part of
1342          * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
1343          * it is freed when dev_private is freed.
1344          */
1345         dev->data->mac_addrs = NULL;
1346 }
1347
1348 const struct eth_dev_ops mlx5_dev_ops = {
1349         .dev_configure = mlx5_dev_configure,
1350         .dev_start = mlx5_dev_start,
1351         .dev_stop = mlx5_dev_stop,
1352         .dev_set_link_down = mlx5_set_link_down,
1353         .dev_set_link_up = mlx5_set_link_up,
1354         .dev_close = mlx5_dev_close,
1355         .promiscuous_enable = mlx5_promiscuous_enable,
1356         .promiscuous_disable = mlx5_promiscuous_disable,
1357         .allmulticast_enable = mlx5_allmulticast_enable,
1358         .allmulticast_disable = mlx5_allmulticast_disable,
1359         .link_update = mlx5_link_update,
1360         .stats_get = mlx5_stats_get,
1361         .stats_reset = mlx5_stats_reset,
1362         .xstats_get = mlx5_xstats_get,
1363         .xstats_reset = mlx5_xstats_reset,
1364         .xstats_get_names = mlx5_xstats_get_names,
1365         .fw_version_get = mlx5_fw_version_get,
1366         .dev_infos_get = mlx5_dev_infos_get,
1367         .read_clock = mlx5_read_clock,
1368         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1369         .vlan_filter_set = mlx5_vlan_filter_set,
1370         .rx_queue_setup = mlx5_rx_queue_setup,
1371         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1372         .tx_queue_setup = mlx5_tx_queue_setup,
1373         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1374         .rx_queue_release = mlx5_rx_queue_release,
1375         .tx_queue_release = mlx5_tx_queue_release,
1376         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1377         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1378         .mac_addr_remove = mlx5_mac_addr_remove,
1379         .mac_addr_add = mlx5_mac_addr_add,
1380         .mac_addr_set = mlx5_mac_addr_set,
1381         .set_mc_addr_list = mlx5_set_mc_addr_list,
1382         .mtu_set = mlx5_dev_set_mtu,
1383         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1384         .vlan_offload_set = mlx5_vlan_offload_set,
1385         .reta_update = mlx5_dev_rss_reta_update,
1386         .reta_query = mlx5_dev_rss_reta_query,
1387         .rss_hash_update = mlx5_rss_hash_update,
1388         .rss_hash_conf_get = mlx5_rss_hash_conf_get,
1389         .filter_ctrl = mlx5_dev_filter_ctrl,
1390         .rx_descriptor_status = mlx5_rx_descriptor_status,
1391         .tx_descriptor_status = mlx5_tx_descriptor_status,
1392         .rx_queue_count = mlx5_rx_queue_count,
1393         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1394         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1395         .is_removed = mlx5_is_removed,
1396         .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
1397         .get_module_info = mlx5_get_module_info,
1398         .get_module_eeprom = mlx5_get_module_eeprom,
1399         .hairpin_cap_get = mlx5_hairpin_cap_get,
1400         .mtr_ops_get = mlx5_flow_meter_ops_get,
1401 };
1402
1403 /* Available operations from secondary process. */
1404 static const struct eth_dev_ops mlx5_dev_sec_ops = {
1405         .stats_get = mlx5_stats_get,
1406         .stats_reset = mlx5_stats_reset,
1407         .xstats_get = mlx5_xstats_get,
1408         .xstats_reset = mlx5_xstats_reset,
1409         .xstats_get_names = mlx5_xstats_get_names,
1410         .fw_version_get = mlx5_fw_version_get,
1411         .dev_infos_get = mlx5_dev_infos_get,
1412         .rx_descriptor_status = mlx5_rx_descriptor_status,
1413         .tx_descriptor_status = mlx5_tx_descriptor_status,
1414         .get_module_info = mlx5_get_module_info,
1415         .get_module_eeprom = mlx5_get_module_eeprom,
1416 };
1417
1418 /* Available operations in flow isolated mode. */
1419 const struct eth_dev_ops mlx5_dev_ops_isolate = {
1420         .dev_configure = mlx5_dev_configure,
1421         .dev_start = mlx5_dev_start,
1422         .dev_stop = mlx5_dev_stop,
1423         .dev_set_link_down = mlx5_set_link_down,
1424         .dev_set_link_up = mlx5_set_link_up,
1425         .dev_close = mlx5_dev_close,
1426         .promiscuous_enable = mlx5_promiscuous_enable,
1427         .promiscuous_disable = mlx5_promiscuous_disable,
1428         .allmulticast_enable = mlx5_allmulticast_enable,
1429         .allmulticast_disable = mlx5_allmulticast_disable,
1430         .link_update = mlx5_link_update,
1431         .stats_get = mlx5_stats_get,
1432         .stats_reset = mlx5_stats_reset,
1433         .xstats_get = mlx5_xstats_get,
1434         .xstats_reset = mlx5_xstats_reset,
1435         .xstats_get_names = mlx5_xstats_get_names,
1436         .fw_version_get = mlx5_fw_version_get,
1437         .dev_infos_get = mlx5_dev_infos_get,
1438         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1439         .vlan_filter_set = mlx5_vlan_filter_set,
1440         .rx_queue_setup = mlx5_rx_queue_setup,
1441         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1442         .tx_queue_setup = mlx5_tx_queue_setup,
1443         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1444         .rx_queue_release = mlx5_rx_queue_release,
1445         .tx_queue_release = mlx5_tx_queue_release,
1446         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1447         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1448         .mac_addr_remove = mlx5_mac_addr_remove,
1449         .mac_addr_add = mlx5_mac_addr_add,
1450         .mac_addr_set = mlx5_mac_addr_set,
1451         .set_mc_addr_list = mlx5_set_mc_addr_list,
1452         .mtu_set = mlx5_dev_set_mtu,
1453         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1454         .vlan_offload_set = mlx5_vlan_offload_set,
1455         .filter_ctrl = mlx5_dev_filter_ctrl,
1456         .rx_descriptor_status = mlx5_rx_descriptor_status,
1457         .tx_descriptor_status = mlx5_tx_descriptor_status,
1458         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1459         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1460         .is_removed = mlx5_is_removed,
1461         .get_module_info = mlx5_get_module_info,
1462         .get_module_eeprom = mlx5_get_module_eeprom,
1463         .hairpin_cap_get = mlx5_hairpin_cap_get,
1464         .mtr_ops_get = mlx5_flow_meter_ops_get,
1465 };
1466
1467 /**
1468  * Verify and store value for device argument.
1469  *
1470  * @param[in] key
1471  *   Key argument to verify.
1472  * @param[in] val
1473  *   Value associated with key.
1474  * @param opaque
1475  *   User data.
1476  *
1477  * @return
1478  *   0 on success, a negative errno value otherwise and rte_errno is set.
1479  */
1480 static int
1481 mlx5_args_check(const char *key, const char *val, void *opaque)
1482 {
1483         struct mlx5_dev_config *config = opaque;
1484         unsigned long tmp;
1485
1486         /* No-op, port representors are processed in mlx5_dev_spawn(). */
1487         if (!strcmp(MLX5_REPRESENTOR, key))
1488                 return 0;
1489         errno = 0;
1490         tmp = strtoul(val, NULL, 0);
1491         if (errno) {
1492                 rte_errno = errno;
1493                 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1494                 return -rte_errno;
1495         }
1496         if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
1497                 config->cqe_comp = !!tmp;
1498         } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
1499                 config->cqe_pad = !!tmp;
1500         } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
1501                 config->hw_padding = !!tmp;
1502         } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
1503                 config->mprq.enabled = !!tmp;
1504         } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
1505                 config->mprq.stride_num_n = tmp;
1506         } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
1507                 config->mprq.max_memcpy_len = tmp;
1508         } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
1509                 config->mprq.min_rxqs_num = tmp;
1510         } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
1511                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1512                                  " converted to txq_inline_max", key);
1513                 config->txq_inline_max = tmp;
1514         } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
1515                 config->txq_inline_max = tmp;
1516         } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
1517                 config->txq_inline_min = tmp;
1518         } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
1519                 config->txq_inline_mpw = tmp;
1520         } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
1521                 config->txqs_inline = tmp;
1522         } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
1523                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1524         } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
1525                 config->mps = !!tmp;
1526         } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
1527                 if (tmp != MLX5_TXDB_CACHED &&
1528                     tmp != MLX5_TXDB_NCACHED &&
1529                     tmp != MLX5_TXDB_HEURISTIC) {
1530                         DRV_LOG(ERR, "invalid Tx doorbell "
1531                                      "mapping parameter");
1532                         rte_errno = EINVAL;
1533                         return -rte_errno;
1534                 }
1535                 config->dbnc = tmp;
1536         } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
1537                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1538         } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
1539                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1540                                  " converted to txq_inline_mpw", key);
1541                 config->txq_inline_mpw = tmp;
1542         } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
1543                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1544         } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
1545                 config->rx_vec_en = !!tmp;
1546         } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1547                 config->l3_vxlan_en = !!tmp;
1548         } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1549                 config->vf_nl_en = !!tmp;
1550         } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1551                 config->dv_esw_en = !!tmp;
1552         } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1553                 config->dv_flow_en = !!tmp;
1554         } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
1555                 if (tmp != MLX5_XMETA_MODE_LEGACY &&
1556                     tmp != MLX5_XMETA_MODE_META16 &&
1557                     tmp != MLX5_XMETA_MODE_META32) {
1558                         DRV_LOG(ERR, "invalid extensive "
1559                                      "metadata parameter");
1560                         rte_errno = EINVAL;
1561                         return -rte_errno;
1562                 }
1563                 config->dv_xmeta_en = tmp;
1564         } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
1565                 config->mr_ext_memseg_en = !!tmp;
1566         } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
1567                 config->max_dump_files_num = tmp;
1568         } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
1569                 config->lro.timeout = tmp;
1570         } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) {
1571                 DRV_LOG(DEBUG, "class argument is %s.", val);
1572         } else {
1573                 DRV_LOG(WARNING, "%s: unknown parameter", key);
1574                 rte_errno = EINVAL;
1575                 return -rte_errno;
1576         }
1577         return 0;
1578 }
1579
1580 /**
1581  * Parse device parameters.
1582  *
1583  * @param config
1584  *   Pointer to device configuration structure.
1585  * @param devargs
1586  *   Device arguments structure.
1587  *
1588  * @return
1589  *   0 on success, a negative errno value otherwise and rte_errno is set.
1590  */
1591 static int
1592 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
1593 {
1594         const char **params = (const char *[]){
1595                 MLX5_RXQ_CQE_COMP_EN,
1596                 MLX5_RXQ_CQE_PAD_EN,
1597                 MLX5_RXQ_PKT_PAD_EN,
1598                 MLX5_RX_MPRQ_EN,
1599                 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
1600                 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
1601                 MLX5_RXQS_MIN_MPRQ,
1602                 MLX5_TXQ_INLINE,
1603                 MLX5_TXQ_INLINE_MIN,
1604                 MLX5_TXQ_INLINE_MAX,
1605                 MLX5_TXQ_INLINE_MPW,
1606                 MLX5_TXQS_MIN_INLINE,
1607                 MLX5_TXQS_MAX_VEC,
1608                 MLX5_TXQ_MPW_EN,
1609                 MLX5_TXQ_MPW_HDR_DSEG_EN,
1610                 MLX5_TXQ_MAX_INLINE_LEN,
1611                 MLX5_TX_DB_NC,
1612                 MLX5_TX_VEC_EN,
1613                 MLX5_RX_VEC_EN,
1614                 MLX5_L3_VXLAN_EN,
1615                 MLX5_VF_NL_EN,
1616                 MLX5_DV_ESW_EN,
1617                 MLX5_DV_FLOW_EN,
1618                 MLX5_DV_XMETA_EN,
1619                 MLX5_MR_EXT_MEMSEG_EN,
1620                 MLX5_REPRESENTOR,
1621                 MLX5_MAX_DUMP_FILES_NUM,
1622                 MLX5_LRO_TIMEOUT_USEC,
1623                 MLX5_CLASS_ARG_NAME,
1624                 NULL,
1625         };
1626         struct rte_kvargs *kvlist;
1627         int ret = 0;
1628         int i;
1629
1630         if (devargs == NULL)
1631                 return 0;
1632         /* Following UGLY cast is done to pass checkpatch. */
1633         kvlist = rte_kvargs_parse(devargs->args, params);
1634         if (kvlist == NULL) {
1635                 rte_errno = EINVAL;
1636                 return -rte_errno;
1637         }
1638         /* Process parameters. */
1639         for (i = 0; (params[i] != NULL); ++i) {
1640                 if (rte_kvargs_count(kvlist, params[i])) {
1641                         ret = rte_kvargs_process(kvlist, params[i],
1642                                                  mlx5_args_check, config);
1643                         if (ret) {
1644                                 rte_errno = EINVAL;
1645                                 rte_kvargs_free(kvlist);
1646                                 return -rte_errno;
1647                         }
1648                 }
1649         }
1650         rte_kvargs_free(kvlist);
1651         return 0;
1652 }
1653
1654 static struct rte_pci_driver mlx5_driver;
1655
1656 /**
1657  * PMD global initialization.
1658  *
1659  * Independent from individual device, this function initializes global
1660  * per-PMD data structures distinguishing primary and secondary processes.
1661  * Hence, each initialization is called once per a process.
1662  *
1663  * @return
1664  *   0 on success, a negative errno value otherwise and rte_errno is set.
1665  */
1666 static int
1667 mlx5_init_once(void)
1668 {
1669         struct mlx5_shared_data *sd;
1670         struct mlx5_local_data *ld = &mlx5_local_data;
1671         int ret = 0;
1672
1673         if (mlx5_init_shared_data())
1674                 return -rte_errno;
1675         sd = mlx5_shared_data;
1676         assert(sd);
1677         rte_spinlock_lock(&sd->lock);
1678         switch (rte_eal_process_type()) {
1679         case RTE_PROC_PRIMARY:
1680                 if (sd->init_done)
1681                         break;
1682                 LIST_INIT(&sd->mem_event_cb_list);
1683                 rte_rwlock_init(&sd->mem_event_rwlock);
1684                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1685                                                 mlx5_mr_mem_event_cb, NULL);
1686                 ret = mlx5_mp_init_primary();
1687                 if (ret)
1688                         goto out;
1689                 sd->init_done = true;
1690                 break;
1691         case RTE_PROC_SECONDARY:
1692                 if (ld->init_done)
1693                         break;
1694                 ret = mlx5_mp_init_secondary();
1695                 if (ret)
1696                         goto out;
1697                 ++sd->secondary_cnt;
1698                 ld->init_done = true;
1699                 break;
1700         default:
1701                 break;
1702         }
1703 out:
1704         rte_spinlock_unlock(&sd->lock);
1705         return ret;
1706 }
1707
1708 /**
1709  * Configures the minimal amount of data to inline into WQE
1710  * while sending packets.
1711  *
1712  * - the txq_inline_min has the maximal priority, if this
1713  *   key is specified in devargs
1714  * - if DevX is enabled the inline mode is queried from the
1715  *   device (HCA attributes and NIC vport context if needed).
1716  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
1717  *   and none (0 bytes) for other NICs
1718  *
1719  * @param spawn
1720  *   Verbs device parameters (name, port, switch_info) to spawn.
1721  * @param config
1722  *   Device configuration parameters.
1723  */
1724 static void
1725 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1726                     struct mlx5_dev_config *config)
1727 {
1728         if (config->txq_inline_min != MLX5_ARG_UNSET) {
1729                 /* Application defines size of inlined data explicitly. */
1730                 switch (spawn->pci_dev->id.device_id) {
1731                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1732                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1733                         if (config->txq_inline_min <
1734                                        (int)MLX5_INLINE_HSIZE_L2) {
1735                                 DRV_LOG(DEBUG,
1736                                         "txq_inline_mix aligned to minimal"
1737                                         " ConnectX-4 required value %d",
1738                                         (int)MLX5_INLINE_HSIZE_L2);
1739                                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1740                         }
1741                         break;
1742                 }
1743                 goto exit;
1744         }
1745         if (config->hca_attr.eth_net_offloads) {
1746                 /* We have DevX enabled, inline mode queried successfully. */
1747                 switch (config->hca_attr.wqe_inline_mode) {
1748                 case MLX5_CAP_INLINE_MODE_L2:
1749                         /* outer L2 header must be inlined. */
1750                         config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1751                         goto exit;
1752                 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1753                         /* No inline data are required by NIC. */
1754                         config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1755                         config->hw_vlan_insert =
1756                                 config->hca_attr.wqe_vlan_insert;
1757                         DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
1758                         goto exit;
1759                 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1760                         /* inline mode is defined by NIC vport context. */
1761                         if (!config->hca_attr.eth_virt)
1762                                 break;
1763                         switch (config->hca_attr.vport_inline_mode) {
1764                         case MLX5_INLINE_MODE_NONE:
1765                                 config->txq_inline_min =
1766                                         MLX5_INLINE_HSIZE_NONE;
1767                                 goto exit;
1768                         case MLX5_INLINE_MODE_L2:
1769                                 config->txq_inline_min =
1770                                         MLX5_INLINE_HSIZE_L2;
1771                                 goto exit;
1772                         case MLX5_INLINE_MODE_IP:
1773                                 config->txq_inline_min =
1774                                         MLX5_INLINE_HSIZE_L3;
1775                                 goto exit;
1776                         case MLX5_INLINE_MODE_TCP_UDP:
1777                                 config->txq_inline_min =
1778                                         MLX5_INLINE_HSIZE_L4;
1779                                 goto exit;
1780                         case MLX5_INLINE_MODE_INNER_L2:
1781                                 config->txq_inline_min =
1782                                         MLX5_INLINE_HSIZE_INNER_L2;
1783                                 goto exit;
1784                         case MLX5_INLINE_MODE_INNER_IP:
1785                                 config->txq_inline_min =
1786                                         MLX5_INLINE_HSIZE_INNER_L3;
1787                                 goto exit;
1788                         case MLX5_INLINE_MODE_INNER_TCP_UDP:
1789                                 config->txq_inline_min =
1790                                         MLX5_INLINE_HSIZE_INNER_L4;
1791                                 goto exit;
1792                         }
1793                 }
1794         }
1795         /*
1796          * We get here if we are unable to deduce
1797          * inline data size with DevX. Try PCI ID
1798          * to determine old NICs.
1799          */
1800         switch (spawn->pci_dev->id.device_id) {
1801         case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1802         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1803         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1804         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1805                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1806                 config->hw_vlan_insert = 0;
1807                 break;
1808         case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
1809         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1810         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
1811         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1812                 /*
1813                  * These NICs support VLAN insertion from WQE and
1814                  * report the wqe_vlan_insert flag. But there is the bug
1815                  * and PFC control may be broken, so disable feature.
1816                  */
1817                 config->hw_vlan_insert = 0;
1818                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1819                 break;
1820         default:
1821                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1822                 break;
1823         }
1824 exit:
1825         DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
1826 }
1827
1828 /**
1829  * Configures the metadata mask fields in the shared context.
1830  *
1831  * @param [in] dev
1832  *   Pointer to Ethernet device.
1833  */
1834 static void
1835 mlx5_set_metadata_mask(struct rte_eth_dev *dev)
1836 {
1837         struct mlx5_priv *priv = dev->data->dev_private;
1838         struct mlx5_ibv_shared *sh = priv->sh;
1839         uint32_t meta, mark, reg_c0;
1840
1841         reg_c0 = ~priv->vport_meta_mask;
1842         switch (priv->config.dv_xmeta_en) {
1843         case MLX5_XMETA_MODE_LEGACY:
1844                 meta = UINT32_MAX;
1845                 mark = MLX5_FLOW_MARK_MASK;
1846                 break;
1847         case MLX5_XMETA_MODE_META16:
1848                 meta = reg_c0 >> rte_bsf32(reg_c0);
1849                 mark = MLX5_FLOW_MARK_MASK;
1850                 break;
1851         case MLX5_XMETA_MODE_META32:
1852                 meta = UINT32_MAX;
1853                 mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
1854                 break;
1855         default:
1856                 meta = 0;
1857                 mark = 0;
1858                 assert(false);
1859                 break;
1860         }
1861         if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
1862                 DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
1863                                  sh->dv_mark_mask, mark);
1864         else
1865                 sh->dv_mark_mask = mark;
1866         if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
1867                 DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
1868                                  sh->dv_meta_mask, meta);
1869         else
1870                 sh->dv_meta_mask = meta;
1871         if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
1872                 DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
1873                                  sh->dv_meta_mask, reg_c0);
1874         else
1875                 sh->dv_regc0_mask = reg_c0;
1876         DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en);
1877         DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
1878         DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
1879         DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
1880 }
1881
1882 /**
1883  * Allocate page of door-bells and register it using DevX API.
1884  *
1885  * @param [in] dev
1886  *   Pointer to Ethernet device.
1887  *
1888  * @return
1889  *   Pointer to new page on success, NULL otherwise.
1890  */
1891 static struct mlx5_devx_dbr_page *
1892 mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
1893 {
1894         struct mlx5_priv *priv = dev->data->dev_private;
1895         struct mlx5_devx_dbr_page *page;
1896
1897         /* Allocate space for door-bell page and management data. */
1898         page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
1899                                  RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1900         if (!page) {
1901                 DRV_LOG(ERR, "port %u cannot allocate dbr page",
1902                         dev->data->port_id);
1903                 return NULL;
1904         }
1905         /* Register allocated memory. */
1906         page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
1907                                               MLX5_DBR_PAGE_SIZE, 0);
1908         if (!page->umem) {
1909                 DRV_LOG(ERR, "port %u cannot umem reg dbr page",
1910                         dev->data->port_id);
1911                 rte_free(page);
1912                 return NULL;
1913         }
1914         return page;
1915 }
1916
1917 /**
1918  * Find the next available door-bell, allocate new page if needed.
1919  *
1920  * @param [in] dev
1921  *   Pointer to Ethernet device.
1922  * @param [out] dbr_page
1923  *   Door-bell page containing the page data.
1924  *
1925  * @return
1926  *   Door-bell address offset on success, a negative error value otherwise.
1927  */
1928 int64_t
1929 mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
1930 {
1931         struct mlx5_priv *priv = dev->data->dev_private;
1932         struct mlx5_devx_dbr_page *page = NULL;
1933         uint32_t i, j;
1934
1935         LIST_FOREACH(page, &priv->dbrpgs, next)
1936                 if (page->dbr_count < MLX5_DBR_PER_PAGE)
1937                         break;
1938         if (!page) { /* No page with free door-bell exists. */
1939                 page = mlx5_alloc_dbr_page(dev);
1940                 if (!page) /* Failed to allocate new page. */
1941                         return (-1);
1942                 LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
1943         }
1944         /* Loop to find bitmap part with clear bit. */
1945         for (i = 0;
1946              i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
1947              i++)
1948                 ; /* Empty. */
1949         /* Find the first clear bit. */
1950         j = rte_bsf64(~page->dbr_bitmap[i]);
1951         assert(i < (MLX5_DBR_PER_PAGE / 64));
1952         page->dbr_bitmap[i] |= (1 << j);
1953         page->dbr_count++;
1954         *dbr_page = page;
1955         return (((i * 64) + j) * sizeof(uint64_t));
1956 }
1957
1958 /**
1959  * Release a door-bell record.
1960  *
1961  * @param [in] dev
1962  *   Pointer to Ethernet device.
1963  * @param [in] umem_id
1964  *   UMEM ID of page containing the door-bell record to release.
1965  * @param [in] offset
1966  *   Offset of door-bell record in page.
1967  *
1968  * @return
1969  *   0 on success, a negative error value otherwise.
1970  */
1971 int32_t
1972 mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
1973 {
1974         struct mlx5_priv *priv = dev->data->dev_private;
1975         struct mlx5_devx_dbr_page *page = NULL;
1976         int ret = 0;
1977
1978         LIST_FOREACH(page, &priv->dbrpgs, next)
1979                 /* Find the page this address belongs to. */
1980                 if (page->umem->umem_id == umem_id)
1981                         break;
1982         if (!page)
1983                 return -EINVAL;
1984         page->dbr_count--;
1985         if (!page->dbr_count) {
1986                 /* Page not used, free it and remove from list. */
1987                 LIST_REMOVE(page, next);
1988                 if (page->umem)
1989                         ret = -mlx5_glue->devx_umem_dereg(page->umem);
1990                 rte_free(page);
1991         } else {
1992                 /* Mark in bitmap that this door-bell is not in use. */
1993                 offset /= MLX5_DBR_SIZE;
1994                 int i = offset / 64;
1995                 int j = offset % 64;
1996
1997                 page->dbr_bitmap[i] &= ~(1 << j);
1998         }
1999         return ret;
2000 }
2001
2002 /**
2003  * Check sibling device configurations.
2004  *
2005  * Sibling devices sharing the Infiniband device context
2006  * should have compatible configurations. This regards
2007  * representors and bonding slaves.
2008  *
2009  * @param priv
2010  *   Private device descriptor.
2011  * @param config
2012  *   Configuration of the device is going to be created.
2013  *
2014  * @return
2015  *   0 on success, EINVAL otherwise
2016  */
2017 static int
2018 mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
2019                               struct mlx5_dev_config *config)
2020 {
2021         struct mlx5_ibv_shared *sh = priv->sh;
2022         struct mlx5_dev_config *sh_conf = NULL;
2023         uint16_t port_id;
2024
2025         assert(sh);
2026         /* Nothing to compare for the single/first device. */
2027         if (sh->refcnt == 1)
2028                 return 0;
2029         /* Find the device with shared context. */
2030         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
2031                 struct mlx5_priv *opriv =
2032                         rte_eth_devices[port_id].data->dev_private;
2033
2034                 if (opriv && opriv != priv && opriv->sh == sh) {
2035                         sh_conf = &opriv->config;
2036                         break;
2037                 }
2038         }
2039         if (!sh_conf)
2040                 return 0;
2041         if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
2042                 DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
2043                              " for shared %s context", sh->ibdev_name);
2044                 rte_errno = EINVAL;
2045                 return rte_errno;
2046         }
2047         if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) {
2048                 DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch"
2049                              " for shared %s context", sh->ibdev_name);
2050                 rte_errno = EINVAL;
2051                 return rte_errno;
2052         }
2053         return 0;
2054 }
2055 /**
2056  * Spawn an Ethernet device from Verbs information.
2057  *
2058  * @param dpdk_dev
2059  *   Backing DPDK device.
2060  * @param spawn
2061  *   Verbs device parameters (name, port, switch_info) to spawn.
2062  * @param config
2063  *   Device configuration parameters.
2064  *
2065  * @return
2066  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
2067  *   is set. The following errors are defined:
2068  *
2069  *   EBUSY: device is not supposed to be spawned.
2070  *   EEXIST: device is already spawned
2071  */
2072 static struct rte_eth_dev *
2073 mlx5_dev_spawn(struct rte_device *dpdk_dev,
2074                struct mlx5_dev_spawn_data *spawn,
2075                struct mlx5_dev_config config)
2076 {
2077         const struct mlx5_switch_info *switch_info = &spawn->info;
2078         struct mlx5_ibv_shared *sh = NULL;
2079         struct ibv_port_attr port_attr;
2080         struct mlx5dv_context dv_attr = { .comp_mask = 0 };
2081         struct rte_eth_dev *eth_dev = NULL;
2082         struct mlx5_priv *priv = NULL;
2083         int err = 0;
2084         unsigned int hw_padding = 0;
2085         unsigned int mps;
2086         unsigned int cqe_comp;
2087         unsigned int cqe_pad = 0;
2088         unsigned int tunnel_en = 0;
2089         unsigned int mpls_en = 0;
2090         unsigned int swp = 0;
2091         unsigned int mprq = 0;
2092         unsigned int mprq_min_stride_size_n = 0;
2093         unsigned int mprq_max_stride_size_n = 0;
2094         unsigned int mprq_min_stride_num_n = 0;
2095         unsigned int mprq_max_stride_num_n = 0;
2096         struct rte_ether_addr mac;
2097         char name[RTE_ETH_NAME_MAX_LEN];
2098         int own_domain_id = 0;
2099         uint16_t port_id;
2100         unsigned int i;
2101 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2102         struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
2103 #endif
2104
2105         /* Determine if this port representor is supposed to be spawned. */
2106         if (switch_info->representor && dpdk_dev->devargs) {
2107                 struct rte_eth_devargs eth_da;
2108
2109                 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
2110                 if (err) {
2111                         rte_errno = -err;
2112                         DRV_LOG(ERR, "failed to process device arguments: %s",
2113                                 strerror(rte_errno));
2114                         return NULL;
2115                 }
2116                 for (i = 0; i < eth_da.nb_representor_ports; ++i)
2117                         if (eth_da.representor_ports[i] ==
2118                             (uint16_t)switch_info->port_name)
2119                                 break;
2120                 if (i == eth_da.nb_representor_ports) {
2121                         rte_errno = EBUSY;
2122                         return NULL;
2123                 }
2124         }
2125         /* Build device name. */
2126         if (spawn->pf_bond <  0) {
2127                 /* Single device. */
2128                 if (!switch_info->representor)
2129                         strlcpy(name, dpdk_dev->name, sizeof(name));
2130                 else
2131                         snprintf(name, sizeof(name), "%s_representor_%u",
2132                                  dpdk_dev->name, switch_info->port_name);
2133         } else {
2134                 /* Bonding device. */
2135                 if (!switch_info->representor)
2136                         snprintf(name, sizeof(name), "%s_%s",
2137                                  dpdk_dev->name, spawn->ibv_dev->name);
2138                 else
2139                         snprintf(name, sizeof(name), "%s_%s_representor_%u",
2140                                  dpdk_dev->name, spawn->ibv_dev->name,
2141                                  switch_info->port_name);
2142         }
2143         /* check if the device is already spawned */
2144         if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
2145                 rte_errno = EEXIST;
2146                 return NULL;
2147         }
2148         DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
2149         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2150                 eth_dev = rte_eth_dev_attach_secondary(name);
2151                 if (eth_dev == NULL) {
2152                         DRV_LOG(ERR, "can not attach rte ethdev");
2153                         rte_errno = ENOMEM;
2154                         return NULL;
2155                 }
2156                 eth_dev->device = dpdk_dev;
2157                 eth_dev->dev_ops = &mlx5_dev_sec_ops;
2158                 err = mlx5_proc_priv_init(eth_dev);
2159                 if (err)
2160                         return NULL;
2161                 /* Receive command fd from primary process */
2162                 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
2163                 if (err < 0)
2164                         return NULL;
2165                 /* Remap UAR for Tx queues. */
2166                 err = mlx5_tx_uar_init_secondary(eth_dev, err);
2167                 if (err)
2168                         return NULL;
2169                 /*
2170                  * Ethdev pointer is still required as input since
2171                  * the primary device is not accessible from the
2172                  * secondary process.
2173                  */
2174                 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
2175                 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
2176                 return eth_dev;
2177         }
2178         /*
2179          * Some parameters ("tx_db_nc" in particularly) are needed in
2180          * advance to create dv/verbs device context. We proceed the
2181          * devargs here to get ones, and later proceed devargs again
2182          * to override some hardware settings.
2183          */
2184         err = mlx5_args(&config, dpdk_dev->devargs);
2185         if (err) {
2186                 err = rte_errno;
2187                 DRV_LOG(ERR, "failed to process device arguments: %s",
2188                         strerror(rte_errno));
2189                 goto error;
2190         }
2191         sh = mlx5_alloc_shared_ibctx(spawn, &config);
2192         if (!sh)
2193                 return NULL;
2194         config.devx = sh->devx;
2195 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
2196         config.dest_tir = 1;
2197 #endif
2198 #ifdef HAVE_IBV_MLX5_MOD_SWP
2199         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
2200 #endif
2201         /*
2202          * Multi-packet send is supported by ConnectX-4 Lx PF as well
2203          * as all ConnectX-5 devices.
2204          */
2205 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2206         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
2207 #endif
2208 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2209         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
2210 #endif
2211         mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
2212         if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
2213                 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
2214                         DRV_LOG(DEBUG, "enhanced MPW is supported");
2215                         mps = MLX5_MPW_ENHANCED;
2216                 } else {
2217                         DRV_LOG(DEBUG, "MPW is supported");
2218                         mps = MLX5_MPW;
2219                 }
2220         } else {
2221                 DRV_LOG(DEBUG, "MPW isn't supported");
2222                 mps = MLX5_MPW_DISABLED;
2223         }
2224 #ifdef HAVE_IBV_MLX5_MOD_SWP
2225         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
2226                 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
2227         DRV_LOG(DEBUG, "SWP support: %u", swp);
2228 #endif
2229         config.swp = !!swp;
2230 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2231         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
2232                 struct mlx5dv_striding_rq_caps mprq_caps =
2233                         dv_attr.striding_rq_caps;
2234
2235                 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
2236                         mprq_caps.min_single_stride_log_num_of_bytes);
2237                 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
2238                         mprq_caps.max_single_stride_log_num_of_bytes);
2239                 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
2240                         mprq_caps.min_single_wqe_log_num_of_strides);
2241                 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
2242                         mprq_caps.max_single_wqe_log_num_of_strides);
2243                 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
2244                         mprq_caps.supported_qpts);
2245                 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
2246                 mprq = 1;
2247                 mprq_min_stride_size_n =
2248                         mprq_caps.min_single_stride_log_num_of_bytes;
2249                 mprq_max_stride_size_n =
2250                         mprq_caps.max_single_stride_log_num_of_bytes;
2251                 mprq_min_stride_num_n =
2252                         mprq_caps.min_single_wqe_log_num_of_strides;
2253                 mprq_max_stride_num_n =
2254                         mprq_caps.max_single_wqe_log_num_of_strides;
2255                 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
2256                                                    mprq_min_stride_num_n);
2257         }
2258 #endif
2259         if (RTE_CACHE_LINE_SIZE == 128 &&
2260             !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
2261                 cqe_comp = 0;
2262         else
2263                 cqe_comp = 1;
2264         config.cqe_comp = cqe_comp;
2265 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
2266         /* Whether device supports 128B Rx CQE padding. */
2267         cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
2268                   (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
2269 #endif
2270 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2271         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
2272                 tunnel_en = ((dv_attr.tunnel_offloads_caps &
2273                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
2274                              (dv_attr.tunnel_offloads_caps &
2275                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
2276                              (dv_attr.tunnel_offloads_caps &
2277                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
2278         }
2279         DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
2280                 tunnel_en ? "" : "not ");
2281 #else
2282         DRV_LOG(WARNING,
2283                 "tunnel offloading disabled due to old OFED/rdma-core version");
2284 #endif
2285         config.tunnel_en = tunnel_en;
2286 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2287         mpls_en = ((dv_attr.tunnel_offloads_caps &
2288                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
2289                    (dv_attr.tunnel_offloads_caps &
2290                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
2291         DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
2292                 mpls_en ? "" : "not ");
2293 #else
2294         DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
2295                 " old OFED/rdma-core version or firmware configuration");
2296 #endif
2297         config.mpls_en = mpls_en;
2298         /* Check port status. */
2299         err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
2300         if (err) {
2301                 DRV_LOG(ERR, "port query failed: %s", strerror(err));
2302                 goto error;
2303         }
2304         if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
2305                 DRV_LOG(ERR, "port is not configured in Ethernet mode");
2306                 err = EINVAL;
2307                 goto error;
2308         }
2309         if (port_attr.state != IBV_PORT_ACTIVE)
2310                 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
2311                         mlx5_glue->port_state_str(port_attr.state),
2312                         port_attr.state);
2313         /* Allocate private eth device data. */
2314         priv = rte_zmalloc("ethdev private structure",
2315                            sizeof(*priv),
2316                            RTE_CACHE_LINE_SIZE);
2317         if (priv == NULL) {
2318                 DRV_LOG(ERR, "priv allocation failure");
2319                 err = ENOMEM;
2320                 goto error;
2321         }
2322         priv->sh = sh;
2323         priv->ibv_port = spawn->ibv_port;
2324         priv->pci_dev = spawn->pci_dev;
2325         priv->mtu = RTE_ETHER_MTU;
2326 #ifndef RTE_ARCH_64
2327         /* Initialize UAR access locks for 32bit implementations. */
2328         rte_spinlock_init(&priv->uar_lock_cq);
2329         for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
2330                 rte_spinlock_init(&priv->uar_lock[i]);
2331 #endif
2332         /* Some internal functions rely on Netlink sockets, open them now. */
2333         priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
2334         priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
2335         priv->representor = !!switch_info->representor;
2336         priv->master = !!switch_info->master;
2337         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2338         priv->vport_meta_tag = 0;
2339         priv->vport_meta_mask = 0;
2340         priv->pf_bond = spawn->pf_bond;
2341 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2342         /*
2343          * The DevX port query API is implemented. E-Switch may use
2344          * either vport or reg_c[0] metadata register to match on
2345          * vport index. The engaged part of metadata register is
2346          * defined by mask.
2347          */
2348         if (switch_info->representor || switch_info->master) {
2349                 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
2350                                       MLX5DV_DEVX_PORT_MATCH_REG_C_0;
2351                 err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port,
2352                                                  &devx_port);
2353                 if (err) {
2354                         DRV_LOG(WARNING,
2355                                 "can't query devx port %d on device %s",
2356                                 spawn->ibv_port, spawn->ibv_dev->name);
2357                         devx_port.comp_mask = 0;
2358                 }
2359         }
2360         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
2361                 priv->vport_meta_tag = devx_port.reg_c_0.value;
2362                 priv->vport_meta_mask = devx_port.reg_c_0.mask;
2363                 if (!priv->vport_meta_mask) {
2364                         DRV_LOG(ERR, "vport zero mask for port %d"
2365                                      " on bonding device %s",
2366                                      spawn->ibv_port, spawn->ibv_dev->name);
2367                         err = ENOTSUP;
2368                         goto error;
2369                 }
2370                 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
2371                         DRV_LOG(ERR, "invalid vport tag for port %d"
2372                                      " on bonding device %s",
2373                                      spawn->ibv_port, spawn->ibv_dev->name);
2374                         err = ENOTSUP;
2375                         goto error;
2376                 }
2377         }
2378         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
2379                 priv->vport_id = devx_port.vport_num;
2380         } else if (spawn->pf_bond >= 0) {
2381                 DRV_LOG(ERR, "can't deduce vport index for port %d"
2382                              " on bonding device %s",
2383                              spawn->ibv_port, spawn->ibv_dev->name);
2384                 err = ENOTSUP;
2385                 goto error;
2386         } else {
2387                 /* Suppose vport index in compatible way. */
2388                 priv->vport_id = switch_info->representor ?
2389                                  switch_info->port_name + 1 : -1;
2390         }
2391 #else
2392         /*
2393          * Kernel/rdma_core support single E-Switch per PF configurations
2394          * only and vport_id field contains the vport index for
2395          * associated VF, which is deduced from representor port name.
2396          * For example, let's have the IB device port 10, it has
2397          * attached network device eth0, which has port name attribute
2398          * pf0vf2, we can deduce the VF number as 2, and set vport index
2399          * as 3 (2+1). This assigning schema should be changed if the
2400          * multiple E-Switch instances per PF configurations or/and PCI
2401          * subfunctions are added.
2402          */
2403         priv->vport_id = switch_info->representor ?
2404                          switch_info->port_name + 1 : -1;
2405 #endif
2406         /* representor_id field keeps the unmodified VF index. */
2407         priv->representor_id = switch_info->representor ?
2408                                switch_info->port_name : -1;
2409         /*
2410          * Look for sibling devices in order to reuse their switch domain
2411          * if any, otherwise allocate one.
2412          */
2413         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
2414                 const struct mlx5_priv *opriv =
2415                         rte_eth_devices[port_id].data->dev_private;
2416
2417                 if (!opriv ||
2418                     opriv->sh != priv->sh ||
2419                         opriv->domain_id ==
2420                         RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
2421                         continue;
2422                 priv->domain_id = opriv->domain_id;
2423                 break;
2424         }
2425         if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
2426                 err = rte_eth_switch_domain_alloc(&priv->domain_id);
2427                 if (err) {
2428                         err = rte_errno;
2429                         DRV_LOG(ERR, "unable to allocate switch domain: %s",
2430                                 strerror(rte_errno));
2431                         goto error;
2432                 }
2433                 own_domain_id = 1;
2434         }
2435         /* Override some values set by hardware configuration. */
2436         mlx5_args(&config, dpdk_dev->devargs);
2437         err = mlx5_dev_check_sibling_config(priv, &config);
2438         if (err)
2439                 goto error;
2440         config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
2441                             IBV_DEVICE_RAW_IP_CSUM);
2442         DRV_LOG(DEBUG, "checksum offloading is %ssupported",
2443                 (config.hw_csum ? "" : "not "));
2444 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
2445         !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
2446         DRV_LOG(DEBUG, "counters are not supported");
2447 #endif
2448 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
2449         if (config.dv_flow_en) {
2450                 DRV_LOG(WARNING, "DV flow is not supported");
2451                 config.dv_flow_en = 0;
2452         }
2453 #endif
2454         config.ind_table_max_size =
2455                 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
2456         /*
2457          * Remove this check once DPDK supports larger/variable
2458          * indirection tables.
2459          */
2460         if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
2461                 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
2462         DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
2463                 config.ind_table_max_size);
2464         config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
2465                                   IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
2466         DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
2467                 (config.hw_vlan_strip ? "" : "not "));
2468         config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
2469                                  IBV_RAW_PACKET_CAP_SCATTER_FCS);
2470         DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
2471                 (config.hw_fcs_strip ? "" : "not "));
2472 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
2473         hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
2474 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
2475         hw_padding = !!(sh->device_attr.device_cap_flags_ex &
2476                         IBV_DEVICE_PCI_WRITE_END_PADDING);
2477 #endif
2478         if (config.hw_padding && !hw_padding) {
2479                 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
2480                 config.hw_padding = 0;
2481         } else if (config.hw_padding) {
2482                 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
2483         }
2484         config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
2485                       (sh->device_attr.tso_caps.supported_qpts &
2486                        (1 << IBV_QPT_RAW_PACKET)));
2487         if (config.tso)
2488                 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
2489         /*
2490          * MPW is disabled by default, while the Enhanced MPW is enabled
2491          * by default.
2492          */
2493         if (config.mps == MLX5_ARG_UNSET)
2494                 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
2495                                                           MLX5_MPW_DISABLED;
2496         else
2497                 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
2498         DRV_LOG(INFO, "%sMPS is %s",
2499                 config.mps == MLX5_MPW_ENHANCED ? "enhanced " :
2500                 config.mps == MLX5_MPW ? "legacy " : "",
2501                 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2502         if (config.cqe_comp && !cqe_comp) {
2503                 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
2504                 config.cqe_comp = 0;
2505         }
2506         if (config.cqe_pad && !cqe_pad) {
2507                 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
2508                 config.cqe_pad = 0;
2509         } else if (config.cqe_pad) {
2510                 DRV_LOG(INFO, "Rx CQE padding is enabled");
2511         }
2512         if (config.devx) {
2513                 priv->counter_fallback = 0;
2514                 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
2515                 if (err) {
2516                         err = -err;
2517                         goto error;
2518                 }
2519                 if (!config.hca_attr.flow_counters_dump)
2520                         priv->counter_fallback = 1;
2521 #ifndef HAVE_IBV_DEVX_ASYNC
2522                 priv->counter_fallback = 1;
2523 #endif
2524                 if (priv->counter_fallback)
2525                         DRV_LOG(INFO, "Use fall-back DV counter management");
2526                 /* Check for LRO support. */
2527                 if (config.dest_tir && config.hca_attr.lro_cap &&
2528                     config.dv_flow_en) {
2529                         /* TBD check tunnel lro caps. */
2530                         config.lro.supported = config.hca_attr.lro_cap;
2531                         DRV_LOG(DEBUG, "Device supports LRO");
2532                         /*
2533                          * If LRO timeout is not configured by application,
2534                          * use the minimal supported value.
2535                          */
2536                         if (!config.lro.timeout)
2537                                 config.lro.timeout =
2538                                 config.hca_attr.lro_timer_supported_periods[0];
2539                         DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
2540                                 config.lro.timeout);
2541                 }
2542 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
2543                 if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup &&
2544                     config.dv_flow_en) {
2545                         uint8_t reg_c_mask =
2546                                 config.hca_attr.qos.flow_meter_reg_c_ids;
2547                         /*
2548                          * Meter needs two REG_C's for color match and pre-sfx
2549                          * flow match. Here get the REG_C for color match.
2550                          * REG_C_0 and REG_C_1 is reserved for metadata feature.
2551                          */
2552                         reg_c_mask &= 0xfc;
2553                         if (__builtin_popcount(reg_c_mask) < 1) {
2554                                 priv->mtr_en = 0;
2555                                 DRV_LOG(WARNING, "No available register for"
2556                                         " meter.");
2557                         } else {
2558                                 priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
2559                                                       REG_C_0;
2560                                 priv->mtr_en = 1;
2561                                 priv->mtr_reg_share =
2562                                       config.hca_attr.qos.flow_meter_reg_share;
2563                                 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
2564                                         priv->mtr_color_reg);
2565                         }
2566                 }
2567 #endif
2568         }
2569         if (config.mprq.enabled && mprq) {
2570                 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
2571                     config.mprq.stride_num_n < mprq_min_stride_num_n) {
2572                         config.mprq.stride_num_n =
2573                                 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
2574                                         mprq_min_stride_num_n);
2575                         DRV_LOG(WARNING,
2576                                 "the number of strides"
2577                                 " for Multi-Packet RQ is out of range,"
2578                                 " setting default value (%u)",
2579                                 1 << config.mprq.stride_num_n);
2580                 }
2581                 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
2582                 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
2583         } else if (config.mprq.enabled && !mprq) {
2584                 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
2585                 config.mprq.enabled = 0;
2586         }
2587         if (config.max_dump_files_num == 0)
2588                 config.max_dump_files_num = 128;
2589         eth_dev = rte_eth_dev_allocate(name);
2590         if (eth_dev == NULL) {
2591                 DRV_LOG(ERR, "can not allocate rte ethdev");
2592                 err = ENOMEM;
2593                 goto error;
2594         }
2595         /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
2596         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2597         if (priv->representor) {
2598                 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
2599                 eth_dev->data->representor_id = priv->representor_id;
2600         }
2601         /*
2602          * Store associated network device interface index. This index
2603          * is permanent throughout the lifetime of device. So, we may store
2604          * the ifindex here and use the cached value further.
2605          */
2606         assert(spawn->ifindex);
2607         priv->if_index = spawn->ifindex;
2608         eth_dev->data->dev_private = priv;
2609         priv->dev_data = eth_dev->data;
2610         eth_dev->data->mac_addrs = priv->mac;
2611         eth_dev->device = dpdk_dev;
2612         /* Configure the first MAC address by default. */
2613         if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
2614                 DRV_LOG(ERR,
2615                         "port %u cannot get MAC address, is mlx5_en"
2616                         " loaded? (errno: %s)",
2617                         eth_dev->data->port_id, strerror(rte_errno));
2618                 err = ENODEV;
2619                 goto error;
2620         }
2621         DRV_LOG(INFO,
2622                 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
2623                 eth_dev->data->port_id,
2624                 mac.addr_bytes[0], mac.addr_bytes[1],
2625                 mac.addr_bytes[2], mac.addr_bytes[3],
2626                 mac.addr_bytes[4], mac.addr_bytes[5]);
2627 #ifndef NDEBUG
2628         {
2629                 char ifname[IF_NAMESIZE];
2630
2631                 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
2632                         DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
2633                                 eth_dev->data->port_id, ifname);
2634                 else
2635                         DRV_LOG(DEBUG, "port %u ifname is unknown",
2636                                 eth_dev->data->port_id);
2637         }
2638 #endif
2639         /* Get actual MTU if possible. */
2640         err = mlx5_get_mtu(eth_dev, &priv->mtu);
2641         if (err) {
2642                 err = rte_errno;
2643                 goto error;
2644         }
2645         DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
2646                 priv->mtu);
2647         /* Initialize burst functions to prevent crashes before link-up. */
2648         eth_dev->rx_pkt_burst = removed_rx_burst;
2649         eth_dev->tx_pkt_burst = removed_tx_burst;
2650         eth_dev->dev_ops = &mlx5_dev_ops;
2651         /* Register MAC address. */
2652         claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
2653         if (config.vf && config.vf_nl_en)
2654                 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
2655                                       mlx5_ifindex(eth_dev),
2656                                       eth_dev->data->mac_addrs,
2657                                       MLX5_MAX_MAC_ADDRESSES);
2658         TAILQ_INIT(&priv->flows);
2659         TAILQ_INIT(&priv->ctrl_flows);
2660         TAILQ_INIT(&priv->flow_meters);
2661         TAILQ_INIT(&priv->flow_meter_profiles);
2662         /* Hint libmlx5 to use PMD allocator for data plane resources */
2663         struct mlx5dv_ctx_allocators alctr = {
2664                 .alloc = &mlx5_alloc_verbs_buf,
2665                 .free = &mlx5_free_verbs_buf,
2666                 .data = priv,
2667         };
2668         mlx5_glue->dv_set_context_attr(sh->ctx,
2669                                        MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2670                                        (void *)((uintptr_t)&alctr));
2671         /* Bring Ethernet device up. */
2672         DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
2673                 eth_dev->data->port_id);
2674         mlx5_set_link_up(eth_dev);
2675         /*
2676          * Even though the interrupt handler is not installed yet,
2677          * interrupts will still trigger on the async_fd from
2678          * Verbs context returned by ibv_open_device().
2679          */
2680         mlx5_link_update(eth_dev, 0);
2681 #ifdef HAVE_MLX5DV_DR_ESWITCH
2682         if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
2683               (switch_info->representor || switch_info->master)))
2684                 config.dv_esw_en = 0;
2685 #else
2686         config.dv_esw_en = 0;
2687 #endif
2688         /* Detect minimal data bytes to inline. */
2689         mlx5_set_min_inline(spawn, &config);
2690         /* Store device configuration on private structure. */
2691         priv->config = config;
2692         /* Create context for virtual machine VLAN workaround. */
2693         priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
2694         if (config.dv_flow_en) {
2695                 err = mlx5_alloc_shared_dr(priv);
2696                 if (err)
2697                         goto error;
2698                 /*
2699                  * RSS id is shared with meter flow id. Meter flow id can only
2700                  * use the 24 MSB of the register.
2701                  */
2702                 priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
2703                                      MLX5_MTR_COLOR_BITS);
2704                 if (!priv->qrss_id_pool) {
2705                         DRV_LOG(ERR, "can't create flow id pool");
2706                         err = ENOMEM;
2707                         goto error;
2708                 }
2709         }
2710         /* Supported Verbs flow priority number detection. */
2711         err = mlx5_flow_discover_priorities(eth_dev);
2712         if (err < 0) {
2713                 err = -err;
2714                 goto error;
2715         }
2716         priv->config.flow_prio = err;
2717         if (!priv->config.dv_esw_en &&
2718             priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2719                 DRV_LOG(WARNING, "metadata mode %u is not supported "
2720                                  "(no E-Switch)", priv->config.dv_xmeta_en);
2721                 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
2722         }
2723         mlx5_set_metadata_mask(eth_dev);
2724         if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
2725             !priv->sh->dv_regc0_mask) {
2726                 DRV_LOG(ERR, "metadata mode %u is not supported "
2727                              "(no metadata reg_c[0] is available)",
2728                              priv->config.dv_xmeta_en);
2729                         err = ENOTSUP;
2730                         goto error;
2731         }
2732         /* Query availibility of metadata reg_c's. */
2733         err = mlx5_flow_discover_mreg_c(eth_dev);
2734         if (err < 0) {
2735                 err = -err;
2736                 goto error;
2737         }
2738         if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
2739                 DRV_LOG(DEBUG,
2740                         "port %u extensive metadata register is not supported",
2741                         eth_dev->data->port_id);
2742                 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2743                         DRV_LOG(ERR, "metadata mode %u is not supported "
2744                                      "(no metadata registers available)",
2745                                      priv->config.dv_xmeta_en);
2746                         err = ENOTSUP;
2747                         goto error;
2748                 }
2749         }
2750         if (priv->config.dv_flow_en &&
2751             priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
2752             mlx5_flow_ext_mreg_supported(eth_dev) &&
2753             priv->sh->dv_regc0_mask) {
2754                 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
2755                                                       MLX5_FLOW_MREG_HTABLE_SZ);
2756                 if (!priv->mreg_cp_tbl) {
2757                         err = ENOMEM;
2758                         goto error;
2759                 }
2760         }
2761         return eth_dev;
2762 error:
2763         if (priv) {
2764                 if (priv->mreg_cp_tbl)
2765                         mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
2766                 if (priv->sh)
2767                         mlx5_free_shared_dr(priv);
2768                 if (priv->nl_socket_route >= 0)
2769                         close(priv->nl_socket_route);
2770                 if (priv->nl_socket_rdma >= 0)
2771                         close(priv->nl_socket_rdma);
2772                 if (priv->vmwa_context)
2773                         mlx5_vlan_vmwa_exit(priv->vmwa_context);
2774                 if (priv->qrss_id_pool)
2775                         mlx5_flow_id_pool_release(priv->qrss_id_pool);
2776                 if (own_domain_id)
2777                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
2778                 rte_free(priv);
2779                 if (eth_dev != NULL)
2780                         eth_dev->data->dev_private = NULL;
2781         }
2782         if (eth_dev != NULL) {
2783                 /* mac_addrs must not be freed alone because part of dev_private */
2784                 eth_dev->data->mac_addrs = NULL;
2785                 rte_eth_dev_release_port(eth_dev);
2786         }
2787         if (sh)
2788                 mlx5_free_shared_ibctx(sh);
2789         assert(err > 0);
2790         rte_errno = err;
2791         return NULL;
2792 }
2793
2794 /**
2795  * Comparison callback to sort device data.
2796  *
2797  * This is meant to be used with qsort().
2798  *
2799  * @param a[in]
2800  *   Pointer to pointer to first data object.
2801  * @param b[in]
2802  *   Pointer to pointer to second data object.
2803  *
2804  * @return
2805  *   0 if both objects are equal, less than 0 if the first argument is less
2806  *   than the second, greater than 0 otherwise.
2807  */
2808 static int
2809 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
2810 {
2811         const struct mlx5_switch_info *si_a =
2812                 &((const struct mlx5_dev_spawn_data *)a)->info;
2813         const struct mlx5_switch_info *si_b =
2814                 &((const struct mlx5_dev_spawn_data *)b)->info;
2815         int ret;
2816
2817         /* Master device first. */
2818         ret = si_b->master - si_a->master;
2819         if (ret)
2820                 return ret;
2821         /* Then representor devices. */
2822         ret = si_b->representor - si_a->representor;
2823         if (ret)
2824                 return ret;
2825         /* Unidentified devices come last in no specific order. */
2826         if (!si_a->representor)
2827                 return 0;
2828         /* Order representors by name. */
2829         return si_a->port_name - si_b->port_name;
2830 }
2831
2832 /**
2833  * Match PCI information for possible slaves of bonding device.
2834  *
2835  * @param[in] ibv_dev
2836  *   Pointer to Infiniband device structure.
2837  * @param[in] pci_dev
2838  *   Pointer to PCI device structure to match PCI address.
2839  * @param[in] nl_rdma
2840  *   Netlink RDMA group socket handle.
2841  *
2842  * @return
2843  *   negative value if no bonding device found, otherwise
2844  *   positive index of slave PF in bonding.
2845  */
2846 static int
2847 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
2848                            const struct rte_pci_device *pci_dev,
2849                            int nl_rdma)
2850 {
2851         char ifname[IF_NAMESIZE + 1];
2852         unsigned int ifindex;
2853         unsigned int np, i;
2854         FILE *file = NULL;
2855         int pf = -1;
2856
2857         /*
2858          * Try to get master device name. If something goes
2859          * wrong suppose the lack of kernel support and no
2860          * bonding devices.
2861          */
2862         if (nl_rdma < 0)
2863                 return -1;
2864         if (!strstr(ibv_dev->name, "bond"))
2865                 return -1;
2866         np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
2867         if (!np)
2868                 return -1;
2869         /*
2870          * The Master device might not be on the predefined
2871          * port (not on port index 1, it is not garanted),
2872          * we have to scan all Infiniband device port and
2873          * find master.
2874          */
2875         for (i = 1; i <= np; ++i) {
2876                 /* Check whether Infiniband port is populated. */
2877                 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
2878                 if (!ifindex)
2879                         continue;
2880                 if (!if_indextoname(ifindex, ifname))
2881                         continue;
2882                 /* Try to read bonding slave names from sysfs. */
2883                 MKSTR(slaves,
2884                       "/sys/class/net/%s/master/bonding/slaves", ifname);
2885                 file = fopen(slaves, "r");
2886                 if (file)
2887                         break;
2888         }
2889         if (!file)
2890                 return -1;
2891         /* Use safe format to check maximal buffer length. */
2892         assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
2893         while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
2894                 char tmp_str[IF_NAMESIZE + 32];
2895                 struct rte_pci_addr pci_addr;
2896                 struct mlx5_switch_info info;
2897
2898                 /* Process slave interface names in the loop. */
2899                 snprintf(tmp_str, sizeof(tmp_str),
2900                          "/sys/class/net/%s", ifname);
2901                 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
2902                         DRV_LOG(WARNING, "can not get PCI address"
2903                                          " for netdev \"%s\"", ifname);
2904                         continue;
2905                 }
2906                 if (pci_dev->addr.domain != pci_addr.domain ||
2907                     pci_dev->addr.bus != pci_addr.bus ||
2908                     pci_dev->addr.devid != pci_addr.devid ||
2909                     pci_dev->addr.function != pci_addr.function)
2910                         continue;
2911                 /* Slave interface PCI address match found. */
2912                 fclose(file);
2913                 snprintf(tmp_str, sizeof(tmp_str),
2914                          "/sys/class/net/%s/phys_port_name", ifname);
2915                 file = fopen(tmp_str, "rb");
2916                 if (!file)
2917                         break;
2918                 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2919                 if (fscanf(file, "%32s", tmp_str) == 1)
2920                         mlx5_translate_port_name(tmp_str, &info);
2921                 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
2922                     info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2923                         pf = info.port_name;
2924                 break;
2925         }
2926         if (file)
2927                 fclose(file);
2928         return pf;
2929 }
2930
2931 /**
2932  * DPDK callback to register a PCI device.
2933  *
2934  * This function spawns Ethernet devices out of a given PCI device.
2935  *
2936  * @param[in] pci_drv
2937  *   PCI driver structure (mlx5_driver).
2938  * @param[in] pci_dev
2939  *   PCI device information.
2940  *
2941  * @return
2942  *   0 on success, a negative errno value otherwise and rte_errno is set.
2943  */
2944 static int
2945 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2946                struct rte_pci_device *pci_dev)
2947 {
2948         struct ibv_device **ibv_list;
2949         /*
2950          * Number of found IB Devices matching with requested PCI BDF.
2951          * nd != 1 means there are multiple IB devices over the same
2952          * PCI device and we have representors and master.
2953          */
2954         unsigned int nd = 0;
2955         /*
2956          * Number of found IB device Ports. nd = 1 and np = 1..n means
2957          * we have the single multiport IB device, and there may be
2958          * representors attached to some of found ports.
2959          */
2960         unsigned int np = 0;
2961         /*
2962          * Number of DPDK ethernet devices to Spawn - either over
2963          * multiple IB devices or multiple ports of single IB device.
2964          * Actually this is the number of iterations to spawn.
2965          */
2966         unsigned int ns = 0;
2967         /*
2968          * Bonding device
2969          *   < 0 - no bonding device (single one)
2970          *  >= 0 - bonding device (value is slave PF index)
2971          */
2972         int bd = -1;
2973         struct mlx5_dev_spawn_data *list = NULL;
2974         struct mlx5_dev_config dev_config;
2975         int ret;
2976
2977         if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) {
2978                 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
2979                         " driver.");
2980                 return 1;
2981         }
2982         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2983                 mlx5_pmd_socket_init();
2984         ret = mlx5_init_once();
2985         if (ret) {
2986                 DRV_LOG(ERR, "unable to init PMD global data: %s",
2987                         strerror(rte_errno));
2988                 return -rte_errno;
2989         }
2990         assert(pci_drv == &mlx5_driver);
2991         errno = 0;
2992         ibv_list = mlx5_glue->get_device_list(&ret);
2993         if (!ibv_list) {
2994                 rte_errno = errno ? errno : ENOSYS;
2995                 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
2996                 return -rte_errno;
2997         }
2998         /*
2999          * First scan the list of all Infiniband devices to find
3000          * matching ones, gathering into the list.
3001          */
3002         struct ibv_device *ibv_match[ret + 1];
3003         int nl_route = mlx5_nl_init(NETLINK_ROUTE);
3004         int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
3005         unsigned int i;
3006
3007         while (ret-- > 0) {
3008                 struct rte_pci_addr pci_addr;
3009
3010                 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
3011                 bd = mlx5_device_bond_pci_match
3012                                 (ibv_list[ret], pci_dev, nl_rdma);
3013                 if (bd >= 0) {
3014                         /*
3015                          * Bonding device detected. Only one match is allowed,
3016                          * the bonding is supported over multi-port IB device,
3017                          * there should be no matches on representor PCI
3018                          * functions or non VF LAG bonding devices with
3019                          * specified address.
3020                          */
3021                         if (nd) {
3022                                 DRV_LOG(ERR,
3023                                         "multiple PCI match on bonding device"
3024                                         "\"%s\" found", ibv_list[ret]->name);
3025                                 rte_errno = ENOENT;
3026                                 ret = -rte_errno;
3027                                 goto exit;
3028                         }
3029                         DRV_LOG(INFO, "PCI information matches for"
3030                                       " slave %d bonding device \"%s\"",
3031                                       bd, ibv_list[ret]->name);
3032                         ibv_match[nd++] = ibv_list[ret];
3033                         break;
3034                 }
3035                 if (mlx5_dev_to_pci_addr
3036                         (ibv_list[ret]->ibdev_path, &pci_addr))
3037                         continue;
3038                 if (pci_dev->addr.domain != pci_addr.domain ||
3039                     pci_dev->addr.bus != pci_addr.bus ||
3040                     pci_dev->addr.devid != pci_addr.devid ||
3041                     pci_dev->addr.function != pci_addr.function)
3042                         continue;
3043                 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
3044                         ibv_list[ret]->name);
3045                 ibv_match[nd++] = ibv_list[ret];
3046         }
3047         ibv_match[nd] = NULL;
3048         if (!nd) {
3049                 /* No device matches, just complain and bail out. */
3050                 DRV_LOG(WARNING,
3051                         "no Verbs device matches PCI device " PCI_PRI_FMT ","
3052                         " are kernel drivers loaded?",
3053                         pci_dev->addr.domain, pci_dev->addr.bus,
3054                         pci_dev->addr.devid, pci_dev->addr.function);
3055                 rte_errno = ENOENT;
3056                 ret = -rte_errno;
3057                 goto exit;
3058         }
3059         if (nd == 1) {
3060                 /*
3061                  * Found single matching device may have multiple ports.
3062                  * Each port may be representor, we have to check the port
3063                  * number and check the representors existence.
3064                  */
3065                 if (nl_rdma >= 0)
3066                         np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
3067                 if (!np)
3068                         DRV_LOG(WARNING, "can not get IB device \"%s\""
3069                                          " ports number", ibv_match[0]->name);
3070                 if (bd >= 0 && !np) {
3071                         DRV_LOG(ERR, "can not get ports"
3072                                      " for bonding device");
3073                         rte_errno = ENOENT;
3074                         ret = -rte_errno;
3075                         goto exit;
3076                 }
3077         }
3078 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
3079         if (bd >= 0) {
3080                 /*
3081                  * This may happen if there is VF LAG kernel support and
3082                  * application is compiled with older rdma_core library.
3083                  */
3084                 DRV_LOG(ERR,
3085                         "No kernel/verbs support for VF LAG bonding found.");
3086                 rte_errno = ENOTSUP;
3087                 ret = -rte_errno;
3088                 goto exit;
3089         }
3090 #endif
3091         /*
3092          * Now we can determine the maximal
3093          * amount of devices to be spawned.
3094          */
3095         list = rte_zmalloc("device spawn data",
3096                          sizeof(struct mlx5_dev_spawn_data) *
3097                          (np ? np : nd),
3098                          RTE_CACHE_LINE_SIZE);
3099         if (!list) {
3100                 DRV_LOG(ERR, "spawn data array allocation failure");
3101                 rte_errno = ENOMEM;
3102                 ret = -rte_errno;
3103                 goto exit;
3104         }
3105         if (bd >= 0 || np > 1) {
3106                 /*
3107                  * Single IB device with multiple ports found,
3108                  * it may be E-Switch master device and representors.
3109                  * We have to perform identification trough the ports.
3110                  */
3111                 assert(nl_rdma >= 0);
3112                 assert(ns == 0);
3113                 assert(nd == 1);
3114                 assert(np);
3115                 for (i = 1; i <= np; ++i) {
3116                         list[ns].max_port = np;
3117                         list[ns].ibv_port = i;
3118                         list[ns].ibv_dev = ibv_match[0];
3119                         list[ns].eth_dev = NULL;
3120                         list[ns].pci_dev = pci_dev;
3121                         list[ns].pf_bond = bd;
3122                         list[ns].ifindex = mlx5_nl_ifindex
3123                                         (nl_rdma, list[ns].ibv_dev->name, i);
3124                         if (!list[ns].ifindex) {
3125                                 /*
3126                                  * No network interface index found for the
3127                                  * specified port, it means there is no
3128                                  * representor on this port. It's OK,
3129                                  * there can be disabled ports, for example
3130                                  * if sriov_numvfs < sriov_totalvfs.
3131                                  */
3132                                 continue;
3133                         }
3134                         ret = -1;
3135                         if (nl_route >= 0)
3136                                 ret = mlx5_nl_switch_info
3137                                                (nl_route,
3138                                                 list[ns].ifindex,
3139                                                 &list[ns].info);
3140                         if (ret || (!list[ns].info.representor &&
3141                                     !list[ns].info.master)) {
3142                                 /*
3143                                  * We failed to recognize representors with
3144                                  * Netlink, let's try to perform the task
3145                                  * with sysfs.
3146                                  */
3147                                 ret =  mlx5_sysfs_switch_info
3148                                                 (list[ns].ifindex,
3149                                                  &list[ns].info);
3150                         }
3151                         if (!ret && bd >= 0) {
3152                                 switch (list[ns].info.name_type) {
3153                                 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
3154                                         if (list[ns].info.port_name == bd)
3155                                                 ns++;
3156                                         break;
3157                                 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
3158                                         if (list[ns].info.pf_num == bd)
3159                                                 ns++;
3160                                         break;
3161                                 default:
3162                                         break;
3163                                 }
3164                                 continue;
3165                         }
3166                         if (!ret && (list[ns].info.representor ^
3167                                      list[ns].info.master))
3168                                 ns++;
3169                 }
3170                 if (!ns) {
3171                         DRV_LOG(ERR,
3172                                 "unable to recognize master/representors"
3173                                 " on the IB device with multiple ports");
3174                         rte_errno = ENOENT;
3175                         ret = -rte_errno;
3176                         goto exit;
3177                 }
3178         } else {
3179                 /*
3180                  * The existence of several matching entries (nd > 1) means
3181                  * port representors have been instantiated. No existing Verbs
3182                  * call nor sysfs entries can tell them apart, this can only
3183                  * be done through Netlink calls assuming kernel drivers are
3184                  * recent enough to support them.
3185                  *
3186                  * In the event of identification failure through Netlink,
3187                  * try again through sysfs, then:
3188                  *
3189                  * 1. A single IB device matches (nd == 1) with single
3190                  *    port (np=0/1) and is not a representor, assume
3191                  *    no switch support.
3192                  *
3193                  * 2. Otherwise no safe assumptions can be made;
3194                  *    complain louder and bail out.
3195                  */
3196                 np = 1;
3197                 for (i = 0; i != nd; ++i) {
3198                         memset(&list[ns].info, 0, sizeof(list[ns].info));
3199                         list[ns].max_port = 1;
3200                         list[ns].ibv_port = 1;
3201                         list[ns].ibv_dev = ibv_match[i];
3202                         list[ns].eth_dev = NULL;
3203                         list[ns].pci_dev = pci_dev;
3204                         list[ns].pf_bond = -1;
3205                         list[ns].ifindex = 0;
3206                         if (nl_rdma >= 0)
3207                                 list[ns].ifindex = mlx5_nl_ifindex
3208                                         (nl_rdma, list[ns].ibv_dev->name, 1);
3209                         if (!list[ns].ifindex) {
3210                                 char ifname[IF_NAMESIZE];
3211
3212                                 /*
3213                                  * Netlink failed, it may happen with old
3214                                  * ib_core kernel driver (before 4.16).
3215                                  * We can assume there is old driver because
3216                                  * here we are processing single ports IB
3217                                  * devices. Let's try sysfs to retrieve
3218                                  * the ifindex. The method works for
3219                                  * master device only.
3220                                  */
3221                                 if (nd > 1) {
3222                                         /*
3223                                          * Multiple devices found, assume
3224                                          * representors, can not distinguish
3225                                          * master/representor and retrieve
3226                                          * ifindex via sysfs.
3227                                          */
3228                                         continue;
3229                                 }
3230                                 ret = mlx5_get_master_ifname
3231                                         (ibv_match[i]->ibdev_path, &ifname);
3232                                 if (!ret)
3233                                         list[ns].ifindex =
3234                                                 if_nametoindex(ifname);
3235                                 if (!list[ns].ifindex) {
3236                                         /*
3237                                          * No network interface index found
3238                                          * for the specified device, it means
3239                                          * there it is neither representor
3240                                          * nor master.
3241                                          */
3242                                         continue;
3243                                 }
3244                         }
3245                         ret = -1;
3246                         if (nl_route >= 0)
3247                                 ret = mlx5_nl_switch_info
3248                                                (nl_route,
3249                                                 list[ns].ifindex,
3250                                                 &list[ns].info);
3251                         if (ret || (!list[ns].info.representor &&
3252                                     !list[ns].info.master)) {
3253                                 /*
3254                                  * We failed to recognize representors with
3255                                  * Netlink, let's try to perform the task
3256                                  * with sysfs.
3257                                  */
3258                                 ret =  mlx5_sysfs_switch_info
3259                                                 (list[ns].ifindex,
3260                                                  &list[ns].info);
3261                         }
3262                         if (!ret && (list[ns].info.representor ^
3263                                      list[ns].info.master)) {
3264                                 ns++;
3265                         } else if ((nd == 1) &&
3266                                    !list[ns].info.representor &&
3267                                    !list[ns].info.master) {
3268                                 /*
3269                                  * Single IB device with
3270                                  * one physical port and
3271                                  * attached network device.
3272                                  * May be SRIOV is not enabled
3273                                  * or there is no representors.
3274                                  */
3275                                 DRV_LOG(INFO, "no E-Switch support detected");
3276                                 ns++;
3277                                 break;
3278                         }
3279                 }
3280                 if (!ns) {
3281                         DRV_LOG(ERR,
3282                                 "unable to recognize master/representors"
3283                                 " on the multiple IB devices");
3284                         rte_errno = ENOENT;
3285                         ret = -rte_errno;
3286                         goto exit;
3287                 }
3288         }
3289         assert(ns);
3290         /*
3291          * Sort list to probe devices in natural order for users convenience
3292          * (i.e. master first, then representors from lowest to highest ID).
3293          */
3294         qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
3295         /* Default configuration. */
3296         dev_config = (struct mlx5_dev_config){
3297                 .hw_padding = 0,
3298                 .mps = MLX5_ARG_UNSET,
3299                 .dbnc = MLX5_ARG_UNSET,
3300                 .rx_vec_en = 1,
3301                 .txq_inline_max = MLX5_ARG_UNSET,
3302                 .txq_inline_min = MLX5_ARG_UNSET,
3303                 .txq_inline_mpw = MLX5_ARG_UNSET,
3304                 .txqs_inline = MLX5_ARG_UNSET,
3305                 .vf_nl_en = 1,
3306                 .mr_ext_memseg_en = 1,
3307                 .mprq = {
3308                         .enabled = 0, /* Disabled by default. */
3309                         .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
3310                         .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
3311                         .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
3312                 },
3313                 .dv_esw_en = 1,
3314                 .dv_flow_en = 1,
3315         };
3316         /* Device specific configuration. */
3317         switch (pci_dev->id.device_id) {
3318         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
3319         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
3320         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
3321         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
3322         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
3323         case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
3324         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
3325                 dev_config.vf = 1;
3326                 break;
3327         default:
3328                 break;
3329         }
3330         for (i = 0; i != ns; ++i) {
3331                 uint32_t restore;
3332
3333                 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
3334                                                  &list[i],
3335                                                  dev_config);
3336                 if (!list[i].eth_dev) {
3337                         if (rte_errno != EBUSY && rte_errno != EEXIST)
3338                                 break;
3339                         /* Device is disabled or already spawned. Ignore it. */
3340                         continue;
3341                 }
3342                 restore = list[i].eth_dev->data->dev_flags;
3343                 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
3344                 /* Restore non-PCI flags cleared by the above call. */
3345                 list[i].eth_dev->data->dev_flags |= restore;
3346                 mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev);
3347                 rte_eth_dev_probing_finish(list[i].eth_dev);
3348         }
3349         if (i != ns) {
3350                 DRV_LOG(ERR,
3351                         "probe of PCI device " PCI_PRI_FMT " aborted after"
3352                         " encountering an error: %s",
3353                         pci_dev->addr.domain, pci_dev->addr.bus,
3354                         pci_dev->addr.devid, pci_dev->addr.function,
3355                         strerror(rte_errno));
3356                 ret = -rte_errno;
3357                 /* Roll back. */
3358                 while (i--) {
3359                         if (!list[i].eth_dev)
3360                                 continue;
3361                         mlx5_dev_close(list[i].eth_dev);
3362                         /* mac_addrs must not be freed because in dev_private */
3363                         list[i].eth_dev->data->mac_addrs = NULL;
3364                         claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
3365                 }
3366                 /* Restore original error. */
3367                 rte_errno = -ret;
3368         } else {
3369                 ret = 0;
3370         }
3371 exit:
3372         /*
3373          * Do the routine cleanup:
3374          * - close opened Netlink sockets
3375          * - free allocated spawn data array
3376          * - free the Infiniband device list
3377          */
3378         if (nl_rdma >= 0)
3379                 close(nl_rdma);
3380         if (nl_route >= 0)
3381                 close(nl_route);
3382         if (list)
3383                 rte_free(list);
3384         assert(ibv_list);
3385         mlx5_glue->free_device_list(ibv_list);
3386         return ret;
3387 }
3388
3389 /**
3390  * Look for the ethernet device belonging to mlx5 driver.
3391  *
3392  * @param[in] port_id
3393  *   port_id to start looking for device.
3394  * @param[in] pci_dev
3395  *   Pointer to the hint PCI device. When device is being probed
3396  *   the its siblings (master and preceding representors might
3397  *   not have assigned driver yet (because the mlx5_pci_probe()
3398  *   is not completed yet, for this case match on hint PCI
3399  *   device may be used to detect sibling device.
3400  *
3401  * @return
3402  *   port_id of found device, RTE_MAX_ETHPORT if not found.
3403  */
3404 uint16_t
3405 mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev)
3406 {
3407         while (port_id < RTE_MAX_ETHPORTS) {
3408                 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3409
3410                 if (dev->state != RTE_ETH_DEV_UNUSED &&
3411                     dev->device &&
3412                     (dev->device == &pci_dev->device ||
3413                      (dev->device->driver &&
3414                      dev->device->driver->name &&
3415                      !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME))))
3416                         break;
3417                 port_id++;
3418         }
3419         if (port_id >= RTE_MAX_ETHPORTS)
3420                 return RTE_MAX_ETHPORTS;
3421         return port_id;
3422 }
3423
3424 /**
3425  * DPDK callback to remove a PCI device.
3426  *
3427  * This function removes all Ethernet devices belong to a given PCI device.
3428  *
3429  * @param[in] pci_dev
3430  *   Pointer to the PCI device.
3431  *
3432  * @return
3433  *   0 on success, the function cannot fail.
3434  */
3435 static int
3436 mlx5_pci_remove(struct rte_pci_device *pci_dev)
3437 {
3438         uint16_t port_id;
3439
3440         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
3441                 rte_eth_dev_close(port_id);
3442         return 0;
3443 }
3444
3445 static const struct rte_pci_id mlx5_pci_id_map[] = {
3446         {
3447                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3448                                PCI_DEVICE_ID_MELLANOX_CONNECTX4)
3449         },
3450         {
3451                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3452                                PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
3453         },
3454         {
3455                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3456                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
3457         },
3458         {
3459                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3460                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
3461         },
3462         {
3463                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3464                                PCI_DEVICE_ID_MELLANOX_CONNECTX5)
3465         },
3466         {
3467                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3468                                PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
3469         },
3470         {
3471                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3472                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
3473         },
3474         {
3475                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3476                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
3477         },
3478         {
3479                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3480                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
3481         },
3482         {
3483                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3484                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
3485         },
3486         {
3487                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3488                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
3489         },
3490         {
3491                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3492                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
3493         },
3494         {
3495                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3496                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
3497         },
3498         {
3499                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3500                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
3501         },
3502         {
3503                 .vendor_id = 0
3504         }
3505 };
3506
3507 static struct rte_pci_driver mlx5_driver = {
3508         .driver = {
3509                 .name = MLX5_DRIVER_NAME
3510         },
3511         .id_table = mlx5_pci_id_map,
3512         .probe = mlx5_pci_probe,
3513         .remove = mlx5_pci_remove,
3514         .dma_map = mlx5_dma_map,
3515         .dma_unmap = mlx5_dma_unmap,
3516         .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
3517                      RTE_PCI_DRV_PROBE_AGAIN,
3518 };
3519
3520 /**
3521  * Driver initialization routine.
3522  */
3523 RTE_INIT(rte_mlx5_pmd_init)
3524 {
3525         /* Initialize driver log type. */
3526         mlx5_logtype = rte_log_register("pmd.net.mlx5");
3527         if (mlx5_logtype >= 0)
3528                 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
3529
3530         /* Build the static tables for Verbs conversion. */
3531         mlx5_set_ptype_table();
3532         mlx5_set_cksum_table();
3533         mlx5_set_swp_types_table();
3534         if (mlx5_glue)
3535                 rte_pci_register(&mlx5_driver);
3536 }
3537
3538 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
3539 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
3540 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");