net/mlx5: support Tx hairpin queues
[dpdk.git] / drivers / net / mlx5 / mlx5.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <assert.h>
10 #include <dlfcn.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <errno.h>
14 #include <net/if.h>
15 #include <sys/mman.h>
16 #include <linux/rtnetlink.h>
17
18 /* Verbs header. */
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic ignored "-Wpedantic"
22 #endif
23 #include <infiniband/verbs.h>
24 #ifdef PEDANTIC
25 #pragma GCC diagnostic error "-Wpedantic"
26 #endif
27
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_kvargs.h>
36 #include <rte_rwlock.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_alarm.h>
40
41 #include "mlx5.h"
42 #include "mlx5_utils.h"
43 #include "mlx5_rxtx.h"
44 #include "mlx5_autoconf.h"
45 #include "mlx5_defs.h"
46 #include "mlx5_glue.h"
47 #include "mlx5_mr.h"
48 #include "mlx5_flow.h"
49
50 /* Device parameter to enable RX completion queue compression. */
51 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
52
53 /* Device parameter to enable RX completion entry padding to 128B. */
54 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
55
56 /* Device parameter to enable padding Rx packet to cacheline size. */
57 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
58
59 /* Device parameter to enable Multi-Packet Rx queue. */
60 #define MLX5_RX_MPRQ_EN "mprq_en"
61
62 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
63 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
64
65 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
66 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
67
68 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
69 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
70
71 /* Device parameter to configure inline send. Deprecated, ignored.*/
72 #define MLX5_TXQ_INLINE "txq_inline"
73
74 /* Device parameter to limit packet size to inline with ordinary SEND. */
75 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
76
77 /* Device parameter to configure minimal data size to inline. */
78 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
79
80 /* Device parameter to limit packet size to inline with Enhanced MPW. */
81 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
82
83 /*
84  * Device parameter to configure the number of TX queues threshold for
85  * enabling inline send.
86  */
87 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
88
89 /*
90  * Device parameter to configure the number of TX queues threshold for
91  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
92  */
93 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
94
95 /* Device parameter to enable multi-packet send WQEs. */
96 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
97
98 /*
99  * Device parameter to include 2 dsegs in the title WQEBB.
100  * Deprecated, ignored.
101  */
102 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
103
104 /*
105  * Device parameter to limit the size of inlining packet.
106  * Deprecated, ignored.
107  */
108 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
109
110 /*
111  * Device parameter to enable hardware Tx vector.
112  * Deprecated, ignored (no vectorized Tx routines anymore).
113  */
114 #define MLX5_TX_VEC_EN "tx_vec_en"
115
116 /* Device parameter to enable hardware Rx vector. */
117 #define MLX5_RX_VEC_EN "rx_vec_en"
118
119 /* Allow L3 VXLAN flow creation. */
120 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
121
122 /* Activate DV E-Switch flow steering. */
123 #define MLX5_DV_ESW_EN "dv_esw_en"
124
125 /* Activate DV flow steering. */
126 #define MLX5_DV_FLOW_EN "dv_flow_en"
127
128 /* Activate Netlink support in VF mode. */
129 #define MLX5_VF_NL_EN "vf_nl_en"
130
131 /* Enable extending memsegs when creating a MR. */
132 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
133
134 /* Select port representors to instantiate. */
135 #define MLX5_REPRESENTOR "representor"
136
137 /* Device parameter to configure the maximum number of dump files per queue. */
138 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
139
140 /* Configure timeout of LRO session (in microseconds). */
141 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
142
143 #ifndef HAVE_IBV_MLX5_MOD_MPW
144 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
145 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
146 #endif
147
148 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
149 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
150 #endif
151
152 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
153
154 /* Shared memory between primary and secondary processes. */
155 struct mlx5_shared_data *mlx5_shared_data;
156
157 /* Spinlock for mlx5_shared_data allocation. */
158 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
159
160 /* Process local data for secondary processes. */
161 static struct mlx5_local_data mlx5_local_data;
162
163 /** Driver-specific log messages type. */
164 int mlx5_logtype;
165
166 /** Data associated with devices to spawn. */
167 struct mlx5_dev_spawn_data {
168         uint32_t ifindex; /**< Network interface index. */
169         uint32_t max_port; /**< IB device maximal port index. */
170         uint32_t ibv_port; /**< IB device physical port index. */
171         int pf_bond; /**< bonding device PF index. < 0 - no bonding */
172         struct mlx5_switch_info info; /**< Switch information. */
173         struct ibv_device *ibv_dev; /**< Associated IB device. */
174         struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
175         struct rte_pci_device *pci_dev; /**< Backend PCI device. */
176 };
177
178 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
179 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
180
181 /**
182  * Initialize the counters management structure.
183  *
184  * @param[in] sh
185  *   Pointer to mlx5_ibv_shared object to free
186  */
187 static void
188 mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
189 {
190         uint8_t i;
191
192         TAILQ_INIT(&sh->cmng.flow_counters);
193         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
194                 TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
195 }
196
197 /**
198  * Destroy all the resources allocated for a counter memory management.
199  *
200  * @param[in] mng
201  *   Pointer to the memory management structure.
202  */
203 static void
204 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
205 {
206         uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
207
208         LIST_REMOVE(mng, next);
209         claim_zero(mlx5_devx_cmd_destroy(mng->dm));
210         claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
211         rte_free(mem);
212 }
213
214 /**
215  * Close and release all the resources of the counters management.
216  *
217  * @param[in] sh
218  *   Pointer to mlx5_ibv_shared object to free.
219  */
220 static void
221 mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
222 {
223         struct mlx5_counter_stats_mem_mng *mng;
224         uint8_t i;
225         int j;
226         int retries = 1024;
227
228         rte_errno = 0;
229         while (--retries) {
230                 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
231                 if (rte_errno != EINPROGRESS)
232                         break;
233                 rte_pause();
234         }
235         for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
236                 struct mlx5_flow_counter_pool *pool;
237                 uint32_t batch = !!(i % 2);
238
239                 if (!sh->cmng.ccont[i].pools)
240                         continue;
241                 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
242                 while (pool) {
243                         if (batch) {
244                                 if (pool->min_dcs)
245                                         claim_zero
246                                         (mlx5_devx_cmd_destroy(pool->min_dcs));
247                         }
248                         for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
249                                 if (pool->counters_raw[j].action)
250                                         claim_zero
251                                         (mlx5_glue->destroy_flow_action
252                                                (pool->counters_raw[j].action));
253                                 if (!batch && pool->counters_raw[j].dcs)
254                                         claim_zero(mlx5_devx_cmd_destroy
255                                                   (pool->counters_raw[j].dcs));
256                         }
257                         TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
258                                      next);
259                         rte_free(pool);
260                         pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
261                 }
262                 rte_free(sh->cmng.ccont[i].pools);
263         }
264         mng = LIST_FIRST(&sh->cmng.mem_mngs);
265         while (mng) {
266                 mlx5_flow_destroy_counter_stat_mem_mng(mng);
267                 mng = LIST_FIRST(&sh->cmng.mem_mngs);
268         }
269         memset(&sh->cmng, 0, sizeof(sh->cmng));
270 }
271
272 /**
273  * Extract pdn of PD object using DV API.
274  *
275  * @param[in] pd
276  *   Pointer to the verbs PD object.
277  * @param[out] pdn
278  *   Pointer to the PD object number variable.
279  *
280  * @return
281  *   0 on success, error value otherwise.
282  */
283 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
284 static int
285 mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused)
286 {
287         struct mlx5dv_obj obj;
288         struct mlx5dv_pd pd_info;
289         int ret = 0;
290
291         obj.pd.in = pd;
292         obj.pd.out = &pd_info;
293         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
294         if (ret) {
295                 DRV_LOG(DEBUG, "Fail to get PD object info");
296                 return ret;
297         }
298         *pdn = pd_info.pdn;
299         return 0;
300 }
301 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
302
303 /**
304  * Allocate shared IB device context. If there is multiport device the
305  * master and representors will share this context, if there is single
306  * port dedicated IB device, the context will be used by only given
307  * port due to unification.
308  *
309  * Routine first searches the context for the specified IB device name,
310  * if found the shared context assumed and reference counter is incremented.
311  * If no context found the new one is created and initialized with specified
312  * IB device context and parameters.
313  *
314  * @param[in] spawn
315  *   Pointer to the IB device attributes (name, port, etc).
316  *
317  * @return
318  *   Pointer to mlx5_ibv_shared object on success,
319  *   otherwise NULL and rte_errno is set.
320  */
321 static struct mlx5_ibv_shared *
322 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
323 {
324         struct mlx5_ibv_shared *sh;
325         int err = 0;
326         uint32_t i;
327 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
328         struct mlx5_devx_tis_attr tis_attr = { 0 };
329 #endif
330
331         assert(spawn);
332         /* Secondary process should not create the shared context. */
333         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
334         pthread_mutex_lock(&mlx5_ibv_list_mutex);
335         /* Search for IB context by device name. */
336         LIST_FOREACH(sh, &mlx5_ibv_list, next) {
337                 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
338                         sh->refcnt++;
339                         goto exit;
340                 }
341         }
342         /* No device found, we have to create new shared context. */
343         assert(spawn->max_port);
344         sh = rte_zmalloc("ethdev shared ib context",
345                          sizeof(struct mlx5_ibv_shared) +
346                          spawn->max_port *
347                          sizeof(struct mlx5_ibv_shared_port),
348                          RTE_CACHE_LINE_SIZE);
349         if (!sh) {
350                 DRV_LOG(ERR, "shared context allocation failure");
351                 rte_errno  = ENOMEM;
352                 goto exit;
353         }
354         /* Try to open IB device with DV first, then usual Verbs. */
355         errno = 0;
356         sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
357         if (sh->ctx) {
358                 sh->devx = 1;
359                 DRV_LOG(DEBUG, "DevX is supported");
360         } else {
361                 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
362                 if (!sh->ctx) {
363                         err = errno ? errno : ENODEV;
364                         goto error;
365                 }
366                 DRV_LOG(DEBUG, "DevX is NOT supported");
367         }
368         err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
369         if (err) {
370                 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
371                 goto error;
372         }
373         sh->refcnt = 1;
374         sh->max_port = spawn->max_port;
375         strncpy(sh->ibdev_name, sh->ctx->device->name,
376                 sizeof(sh->ibdev_name));
377         strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
378                 sizeof(sh->ibdev_path));
379         pthread_mutex_init(&sh->intr_mutex, NULL);
380         /*
381          * Setting port_id to max unallowed value means
382          * there is no interrupt subhandler installed for
383          * the given port index i.
384          */
385         for (i = 0; i < sh->max_port; i++) {
386                 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
387                 sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
388         }
389         sh->pd = mlx5_glue->alloc_pd(sh->ctx);
390         if (sh->pd == NULL) {
391                 DRV_LOG(ERR, "PD allocation failure");
392                 err = ENOMEM;
393                 goto error;
394         }
395 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
396         if (sh->devx) {
397                 err = mlx5_get_pdn(sh->pd, &sh->pdn);
398                 if (err) {
399                         DRV_LOG(ERR, "Fail to extract pdn from PD");
400                         goto error;
401                 }
402                 sh->td = mlx5_devx_cmd_create_td(sh->ctx);
403                 if (!sh->td) {
404                         DRV_LOG(ERR, "TD allocation failure");
405                         err = ENOMEM;
406                         goto error;
407                 }
408                 tis_attr.transport_domain = sh->td->id;
409                 sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
410                 if (!sh->tis) {
411                         DRV_LOG(ERR, "TIS allocation failure");
412                         err = ENOMEM;
413                         goto error;
414                 }
415         }
416 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
417         /*
418          * Once the device is added to the list of memory event
419          * callback, its global MR cache table cannot be expanded
420          * on the fly because of deadlock. If it overflows, lookup
421          * should be done by searching MR list linearly, which is slow.
422          *
423          * At this point the device is not added to the memory
424          * event list yet, context is just being created.
425          */
426         err = mlx5_mr_btree_init(&sh->mr.cache,
427                                  MLX5_MR_BTREE_CACHE_N * 2,
428                                  spawn->pci_dev->device.numa_node);
429         if (err) {
430                 err = rte_errno;
431                 goto error;
432         }
433         mlx5_flow_counters_mng_init(sh);
434         /* Add device to memory callback list. */
435         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
436         LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
437                          sh, mem_event_cb);
438         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
439         /* Add context to the global device list. */
440         LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
441 exit:
442         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
443         return sh;
444 error:
445         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
446         assert(sh);
447         if (sh->tis)
448                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
449         if (sh->td)
450                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
451         if (sh->pd)
452                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
453         if (sh->ctx)
454                 claim_zero(mlx5_glue->close_device(sh->ctx));
455         rte_free(sh);
456         assert(err > 0);
457         rte_errno = err;
458         return NULL;
459 }
460
461 /**
462  * Free shared IB device context. Decrement counter and if zero free
463  * all allocated resources and close handles.
464  *
465  * @param[in] sh
466  *   Pointer to mlx5_ibv_shared object to free
467  */
468 static void
469 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
470 {
471         pthread_mutex_lock(&mlx5_ibv_list_mutex);
472 #ifndef NDEBUG
473         /* Check the object presence in the list. */
474         struct mlx5_ibv_shared *lctx;
475
476         LIST_FOREACH(lctx, &mlx5_ibv_list, next)
477                 if (lctx == sh)
478                         break;
479         assert(lctx);
480         if (lctx != sh) {
481                 DRV_LOG(ERR, "Freeing non-existing shared IB context");
482                 goto exit;
483         }
484 #endif
485         assert(sh);
486         assert(sh->refcnt);
487         /* Secondary process should not free the shared context. */
488         assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
489         if (--sh->refcnt)
490                 goto exit;
491         /* Release created Memory Regions. */
492         mlx5_mr_release(sh);
493         /* Remove from memory callback device list. */
494         rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
495         LIST_REMOVE(sh, mem_event_cb);
496         rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
497         /* Remove context from the global device list. */
498         LIST_REMOVE(sh, next);
499         /*
500          *  Ensure there is no async event handler installed.
501          *  Only primary process handles async device events.
502          **/
503         mlx5_flow_counters_mng_close(sh);
504         assert(!sh->intr_cnt);
505         if (sh->intr_cnt)
506                 mlx5_intr_callback_unregister
507                         (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
508 #ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT
509         if (sh->devx_intr_cnt) {
510                 if (sh->intr_handle_devx.fd)
511                         rte_intr_callback_unregister(&sh->intr_handle_devx,
512                                           mlx5_dev_interrupt_handler_devx, sh);
513                 if (sh->devx_comp)
514                         mlx5dv_devx_destroy_cmd_comp(sh->devx_comp);
515         }
516 #endif
517         pthread_mutex_destroy(&sh->intr_mutex);
518         if (sh->pd)
519                 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
520         if (sh->tis)
521                 claim_zero(mlx5_devx_cmd_destroy(sh->tis));
522         if (sh->td)
523                 claim_zero(mlx5_devx_cmd_destroy(sh->td));
524         if (sh->ctx)
525                 claim_zero(mlx5_glue->close_device(sh->ctx));
526         rte_free(sh);
527 exit:
528         pthread_mutex_unlock(&mlx5_ibv_list_mutex);
529 }
530
531 /**
532  * Initialize DR related data within private structure.
533  * Routine checks the reference counter and does actual
534  * resources creation/initialization only if counter is zero.
535  *
536  * @param[in] priv
537  *   Pointer to the private device data structure.
538  *
539  * @return
540  *   Zero on success, positive error code otherwise.
541  */
542 static int
543 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
544 {
545 #ifdef HAVE_MLX5DV_DR
546         struct mlx5_ibv_shared *sh = priv->sh;
547         int err = 0;
548         void *domain;
549
550         assert(sh);
551         if (sh->dv_refcnt) {
552                 /* Shared DV/DR structures is already initialized. */
553                 sh->dv_refcnt++;
554                 priv->dr_shared = 1;
555                 return 0;
556         }
557         /* Reference counter is zero, we should initialize structures. */
558         domain = mlx5_glue->dr_create_domain(sh->ctx,
559                                              MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
560         if (!domain) {
561                 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
562                 err = errno;
563                 goto error;
564         }
565         sh->rx_domain = domain;
566         domain = mlx5_glue->dr_create_domain(sh->ctx,
567                                              MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
568         if (!domain) {
569                 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
570                 err = errno;
571                 goto error;
572         }
573         pthread_mutex_init(&sh->dv_mutex, NULL);
574         sh->tx_domain = domain;
575 #ifdef HAVE_MLX5DV_DR_ESWITCH
576         if (priv->config.dv_esw_en) {
577                 domain  = mlx5_glue->dr_create_domain
578                         (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
579                 if (!domain) {
580                         DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
581                         err = errno;
582                         goto error;
583                 }
584                 sh->fdb_domain = domain;
585                 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
586         }
587 #endif
588         sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
589         sh->dv_refcnt++;
590         priv->dr_shared = 1;
591         return 0;
592
593 error:
594        /* Rollback the created objects. */
595         if (sh->rx_domain) {
596                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
597                 sh->rx_domain = NULL;
598         }
599         if (sh->tx_domain) {
600                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
601                 sh->tx_domain = NULL;
602         }
603         if (sh->fdb_domain) {
604                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
605                 sh->fdb_domain = NULL;
606         }
607         if (sh->esw_drop_action) {
608                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
609                 sh->esw_drop_action = NULL;
610         }
611         if (sh->pop_vlan_action) {
612                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
613                 sh->pop_vlan_action = NULL;
614         }
615         return err;
616 #else
617         (void)priv;
618         return 0;
619 #endif
620 }
621
622 /**
623  * Destroy DR related data within private structure.
624  *
625  * @param[in] priv
626  *   Pointer to the private device data structure.
627  */
628 static void
629 mlx5_free_shared_dr(struct mlx5_priv *priv)
630 {
631 #ifdef HAVE_MLX5DV_DR
632         struct mlx5_ibv_shared *sh;
633
634         if (!priv->dr_shared)
635                 return;
636         priv->dr_shared = 0;
637         sh = priv->sh;
638         assert(sh);
639         assert(sh->dv_refcnt);
640         if (sh->dv_refcnt && --sh->dv_refcnt)
641                 return;
642         if (sh->rx_domain) {
643                 mlx5_glue->dr_destroy_domain(sh->rx_domain);
644                 sh->rx_domain = NULL;
645         }
646         if (sh->tx_domain) {
647                 mlx5_glue->dr_destroy_domain(sh->tx_domain);
648                 sh->tx_domain = NULL;
649         }
650 #ifdef HAVE_MLX5DV_DR_ESWITCH
651         if (sh->fdb_domain) {
652                 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
653                 sh->fdb_domain = NULL;
654         }
655         if (sh->esw_drop_action) {
656                 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
657                 sh->esw_drop_action = NULL;
658         }
659 #endif
660         if (sh->pop_vlan_action) {
661                 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
662                 sh->pop_vlan_action = NULL;
663         }
664         pthread_mutex_destroy(&sh->dv_mutex);
665 #else
666         (void)priv;
667 #endif
668 }
669
670 /**
671  * Initialize shared data between primary and secondary process.
672  *
673  * A memzone is reserved by primary process and secondary processes attach to
674  * the memzone.
675  *
676  * @return
677  *   0 on success, a negative errno value otherwise and rte_errno is set.
678  */
679 static int
680 mlx5_init_shared_data(void)
681 {
682         const struct rte_memzone *mz;
683         int ret = 0;
684
685         rte_spinlock_lock(&mlx5_shared_data_lock);
686         if (mlx5_shared_data == NULL) {
687                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
688                         /* Allocate shared memory. */
689                         mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
690                                                  sizeof(*mlx5_shared_data),
691                                                  SOCKET_ID_ANY, 0);
692                         if (mz == NULL) {
693                                 DRV_LOG(ERR,
694                                         "Cannot allocate mlx5 shared data\n");
695                                 ret = -rte_errno;
696                                 goto error;
697                         }
698                         mlx5_shared_data = mz->addr;
699                         memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
700                         rte_spinlock_init(&mlx5_shared_data->lock);
701                 } else {
702                         /* Lookup allocated shared memory. */
703                         mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
704                         if (mz == NULL) {
705                                 DRV_LOG(ERR,
706                                         "Cannot attach mlx5 shared data\n");
707                                 ret = -rte_errno;
708                                 goto error;
709                         }
710                         mlx5_shared_data = mz->addr;
711                         memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
712                 }
713         }
714 error:
715         rte_spinlock_unlock(&mlx5_shared_data_lock);
716         return ret;
717 }
718
719 /**
720  * Retrieve integer value from environment variable.
721  *
722  * @param[in] name
723  *   Environment variable name.
724  *
725  * @return
726  *   Integer value, 0 if the variable is not set.
727  */
728 int
729 mlx5_getenv_int(const char *name)
730 {
731         const char *val = getenv(name);
732
733         if (val == NULL)
734                 return 0;
735         return atoi(val);
736 }
737
738 /**
739  * Verbs callback to allocate a memory. This function should allocate the space
740  * according to the size provided residing inside a huge page.
741  * Please note that all allocation must respect the alignment from libmlx5
742  * (i.e. currently sysconf(_SC_PAGESIZE)).
743  *
744  * @param[in] size
745  *   The size in bytes of the memory to allocate.
746  * @param[in] data
747  *   A pointer to the callback data.
748  *
749  * @return
750  *   Allocated buffer, NULL otherwise and rte_errno is set.
751  */
752 static void *
753 mlx5_alloc_verbs_buf(size_t size, void *data)
754 {
755         struct mlx5_priv *priv = data;
756         void *ret;
757         size_t alignment = sysconf(_SC_PAGESIZE);
758         unsigned int socket = SOCKET_ID_ANY;
759
760         if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
761                 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
762
763                 socket = ctrl->socket;
764         } else if (priv->verbs_alloc_ctx.type ==
765                    MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
766                 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
767
768                 socket = ctrl->socket;
769         }
770         assert(data != NULL);
771         ret = rte_malloc_socket(__func__, size, alignment, socket);
772         if (!ret && size)
773                 rte_errno = ENOMEM;
774         return ret;
775 }
776
777 /**
778  * Verbs callback to free a memory.
779  *
780  * @param[in] ptr
781  *   A pointer to the memory to free.
782  * @param[in] data
783  *   A pointer to the callback data.
784  */
785 static void
786 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
787 {
788         assert(data != NULL);
789         rte_free(ptr);
790 }
791
792 /**
793  * DPDK callback to add udp tunnel port
794  *
795  * @param[in] dev
796  *   A pointer to eth_dev
797  * @param[in] udp_tunnel
798  *   A pointer to udp tunnel
799  *
800  * @return
801  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
802  */
803 int
804 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
805                          struct rte_eth_udp_tunnel *udp_tunnel)
806 {
807         assert(udp_tunnel != NULL);
808         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
809             udp_tunnel->udp_port == 4789)
810                 return 0;
811         if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
812             udp_tunnel->udp_port == 4790)
813                 return 0;
814         return -ENOTSUP;
815 }
816
817 /**
818  * Initialize process private data structure.
819  *
820  * @param dev
821  *   Pointer to Ethernet device structure.
822  *
823  * @return
824  *   0 on success, a negative errno value otherwise and rte_errno is set.
825  */
826 int
827 mlx5_proc_priv_init(struct rte_eth_dev *dev)
828 {
829         struct mlx5_priv *priv = dev->data->dev_private;
830         struct mlx5_proc_priv *ppriv;
831         size_t ppriv_size;
832
833         /*
834          * UAR register table follows the process private structure. BlueFlame
835          * registers for Tx queues are stored in the table.
836          */
837         ppriv_size =
838                 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
839         ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
840                                   RTE_CACHE_LINE_SIZE, dev->device->numa_node);
841         if (!ppriv) {
842                 rte_errno = ENOMEM;
843                 return -rte_errno;
844         }
845         ppriv->uar_table_sz = ppriv_size;
846         dev->process_private = ppriv;
847         return 0;
848 }
849
850 /**
851  * Un-initialize process private data structure.
852  *
853  * @param dev
854  *   Pointer to Ethernet device structure.
855  */
856 static void
857 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
858 {
859         if (!dev->process_private)
860                 return;
861         rte_free(dev->process_private);
862         dev->process_private = NULL;
863 }
864
865 /**
866  * DPDK callback to close the device.
867  *
868  * Destroy all queues and objects, free memory.
869  *
870  * @param dev
871  *   Pointer to Ethernet device structure.
872  */
873 static void
874 mlx5_dev_close(struct rte_eth_dev *dev)
875 {
876         struct mlx5_priv *priv = dev->data->dev_private;
877         unsigned int i;
878         int ret;
879
880         DRV_LOG(DEBUG, "port %u closing device \"%s\"",
881                 dev->data->port_id,
882                 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
883         /* In case mlx5_dev_stop() has not been called. */
884         mlx5_dev_interrupt_handler_uninstall(dev);
885         mlx5_dev_interrupt_handler_devx_uninstall(dev);
886         mlx5_traffic_disable(dev);
887         mlx5_flow_flush(dev, NULL);
888         /* Prevent crashes when queues are still in use. */
889         dev->rx_pkt_burst = removed_rx_burst;
890         dev->tx_pkt_burst = removed_tx_burst;
891         rte_wmb();
892         /* Disable datapath on secondary process. */
893         mlx5_mp_req_stop_rxtx(dev);
894         if (priv->rxqs != NULL) {
895                 /* XXX race condition if mlx5_rx_burst() is still running. */
896                 usleep(1000);
897                 for (i = 0; (i != priv->rxqs_n); ++i)
898                         mlx5_rxq_release(dev, i);
899                 priv->rxqs_n = 0;
900                 priv->rxqs = NULL;
901         }
902         if (priv->txqs != NULL) {
903                 /* XXX race condition if mlx5_tx_burst() is still running. */
904                 usleep(1000);
905                 for (i = 0; (i != priv->txqs_n); ++i)
906                         mlx5_txq_release(dev, i);
907                 priv->txqs_n = 0;
908                 priv->txqs = NULL;
909         }
910         mlx5_proc_priv_uninit(dev);
911         mlx5_mprq_free_mp(dev);
912         mlx5_free_shared_dr(priv);
913         if (priv->rss_conf.rss_key != NULL)
914                 rte_free(priv->rss_conf.rss_key);
915         if (priv->reta_idx != NULL)
916                 rte_free(priv->reta_idx);
917         if (priv->config.vf)
918                 mlx5_nl_mac_addr_flush(dev);
919         if (priv->nl_socket_route >= 0)
920                 close(priv->nl_socket_route);
921         if (priv->nl_socket_rdma >= 0)
922                 close(priv->nl_socket_rdma);
923         if (priv->vmwa_context)
924                 mlx5_vlan_vmwa_exit(priv->vmwa_context);
925         if (priv->sh) {
926                 /*
927                  * Free the shared context in last turn, because the cleanup
928                  * routines above may use some shared fields, like
929                  * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
930                  * ifindex if Netlink fails.
931                  */
932                 mlx5_free_shared_ibctx(priv->sh);
933                 priv->sh = NULL;
934         }
935         ret = mlx5_hrxq_verify(dev);
936         if (ret)
937                 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
938                         dev->data->port_id);
939         ret = mlx5_ind_table_obj_verify(dev);
940         if (ret)
941                 DRV_LOG(WARNING, "port %u some indirection table still remain",
942                         dev->data->port_id);
943         ret = mlx5_rxq_obj_verify(dev);
944         if (ret)
945                 DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
946                         dev->data->port_id);
947         ret = mlx5_rxq_verify(dev);
948         if (ret)
949                 DRV_LOG(WARNING, "port %u some Rx queues still remain",
950                         dev->data->port_id);
951         ret = mlx5_txq_obj_verify(dev);
952         if (ret)
953                 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
954                         dev->data->port_id);
955         ret = mlx5_txq_verify(dev);
956         if (ret)
957                 DRV_LOG(WARNING, "port %u some Tx queues still remain",
958                         dev->data->port_id);
959         ret = mlx5_flow_verify(dev);
960         if (ret)
961                 DRV_LOG(WARNING, "port %u some flows still remain",
962                         dev->data->port_id);
963         if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
964                 unsigned int c = 0;
965                 uint16_t port_id;
966
967                 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
968                         struct mlx5_priv *opriv =
969                                 rte_eth_devices[port_id].data->dev_private;
970
971                         if (!opriv ||
972                             opriv->domain_id != priv->domain_id ||
973                             &rte_eth_devices[port_id] == dev)
974                                 continue;
975                         ++c;
976                         break;
977                 }
978                 if (!c)
979                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
980         }
981         memset(priv, 0, sizeof(*priv));
982         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
983         /*
984          * Reset mac_addrs to NULL such that it is not freed as part of
985          * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
986          * it is freed when dev_private is freed.
987          */
988         dev->data->mac_addrs = NULL;
989 }
990
991 const struct eth_dev_ops mlx5_dev_ops = {
992         .dev_configure = mlx5_dev_configure,
993         .dev_start = mlx5_dev_start,
994         .dev_stop = mlx5_dev_stop,
995         .dev_set_link_down = mlx5_set_link_down,
996         .dev_set_link_up = mlx5_set_link_up,
997         .dev_close = mlx5_dev_close,
998         .promiscuous_enable = mlx5_promiscuous_enable,
999         .promiscuous_disable = mlx5_promiscuous_disable,
1000         .allmulticast_enable = mlx5_allmulticast_enable,
1001         .allmulticast_disable = mlx5_allmulticast_disable,
1002         .link_update = mlx5_link_update,
1003         .stats_get = mlx5_stats_get,
1004         .stats_reset = mlx5_stats_reset,
1005         .xstats_get = mlx5_xstats_get,
1006         .xstats_reset = mlx5_xstats_reset,
1007         .xstats_get_names = mlx5_xstats_get_names,
1008         .fw_version_get = mlx5_fw_version_get,
1009         .dev_infos_get = mlx5_dev_infos_get,
1010         .read_clock = mlx5_read_clock,
1011         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1012         .vlan_filter_set = mlx5_vlan_filter_set,
1013         .rx_queue_setup = mlx5_rx_queue_setup,
1014         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1015         .tx_queue_setup = mlx5_tx_queue_setup,
1016         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1017         .rx_queue_release = mlx5_rx_queue_release,
1018         .tx_queue_release = mlx5_tx_queue_release,
1019         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1020         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1021         .mac_addr_remove = mlx5_mac_addr_remove,
1022         .mac_addr_add = mlx5_mac_addr_add,
1023         .mac_addr_set = mlx5_mac_addr_set,
1024         .set_mc_addr_list = mlx5_set_mc_addr_list,
1025         .mtu_set = mlx5_dev_set_mtu,
1026         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1027         .vlan_offload_set = mlx5_vlan_offload_set,
1028         .reta_update = mlx5_dev_rss_reta_update,
1029         .reta_query = mlx5_dev_rss_reta_query,
1030         .rss_hash_update = mlx5_rss_hash_update,
1031         .rss_hash_conf_get = mlx5_rss_hash_conf_get,
1032         .filter_ctrl = mlx5_dev_filter_ctrl,
1033         .rx_descriptor_status = mlx5_rx_descriptor_status,
1034         .tx_descriptor_status = mlx5_tx_descriptor_status,
1035         .rx_queue_count = mlx5_rx_queue_count,
1036         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1037         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1038         .is_removed = mlx5_is_removed,
1039         .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
1040         .get_module_info = mlx5_get_module_info,
1041         .get_module_eeprom = mlx5_get_module_eeprom,
1042 };
1043
1044 /* Available operations from secondary process. */
1045 static const struct eth_dev_ops mlx5_dev_sec_ops = {
1046         .stats_get = mlx5_stats_get,
1047         .stats_reset = mlx5_stats_reset,
1048         .xstats_get = mlx5_xstats_get,
1049         .xstats_reset = mlx5_xstats_reset,
1050         .xstats_get_names = mlx5_xstats_get_names,
1051         .fw_version_get = mlx5_fw_version_get,
1052         .dev_infos_get = mlx5_dev_infos_get,
1053         .rx_descriptor_status = mlx5_rx_descriptor_status,
1054         .tx_descriptor_status = mlx5_tx_descriptor_status,
1055         .get_module_info = mlx5_get_module_info,
1056         .get_module_eeprom = mlx5_get_module_eeprom,
1057 };
1058
1059 /* Available operations in flow isolated mode. */
1060 const struct eth_dev_ops mlx5_dev_ops_isolate = {
1061         .dev_configure = mlx5_dev_configure,
1062         .dev_start = mlx5_dev_start,
1063         .dev_stop = mlx5_dev_stop,
1064         .dev_set_link_down = mlx5_set_link_down,
1065         .dev_set_link_up = mlx5_set_link_up,
1066         .dev_close = mlx5_dev_close,
1067         .promiscuous_enable = mlx5_promiscuous_enable,
1068         .promiscuous_disable = mlx5_promiscuous_disable,
1069         .allmulticast_enable = mlx5_allmulticast_enable,
1070         .allmulticast_disable = mlx5_allmulticast_disable,
1071         .link_update = mlx5_link_update,
1072         .stats_get = mlx5_stats_get,
1073         .stats_reset = mlx5_stats_reset,
1074         .xstats_get = mlx5_xstats_get,
1075         .xstats_reset = mlx5_xstats_reset,
1076         .xstats_get_names = mlx5_xstats_get_names,
1077         .fw_version_get = mlx5_fw_version_get,
1078         .dev_infos_get = mlx5_dev_infos_get,
1079         .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1080         .vlan_filter_set = mlx5_vlan_filter_set,
1081         .rx_queue_setup = mlx5_rx_queue_setup,
1082         .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1083         .tx_queue_setup = mlx5_tx_queue_setup,
1084         .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
1085         .rx_queue_release = mlx5_rx_queue_release,
1086         .tx_queue_release = mlx5_tx_queue_release,
1087         .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
1088         .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
1089         .mac_addr_remove = mlx5_mac_addr_remove,
1090         .mac_addr_add = mlx5_mac_addr_add,
1091         .mac_addr_set = mlx5_mac_addr_set,
1092         .set_mc_addr_list = mlx5_set_mc_addr_list,
1093         .mtu_set = mlx5_dev_set_mtu,
1094         .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
1095         .vlan_offload_set = mlx5_vlan_offload_set,
1096         .filter_ctrl = mlx5_dev_filter_ctrl,
1097         .rx_descriptor_status = mlx5_rx_descriptor_status,
1098         .tx_descriptor_status = mlx5_tx_descriptor_status,
1099         .rx_queue_intr_enable = mlx5_rx_intr_enable,
1100         .rx_queue_intr_disable = mlx5_rx_intr_disable,
1101         .is_removed = mlx5_is_removed,
1102         .get_module_info = mlx5_get_module_info,
1103         .get_module_eeprom = mlx5_get_module_eeprom,
1104 };
1105
1106 /**
1107  * Verify and store value for device argument.
1108  *
1109  * @param[in] key
1110  *   Key argument to verify.
1111  * @param[in] val
1112  *   Value associated with key.
1113  * @param opaque
1114  *   User data.
1115  *
1116  * @return
1117  *   0 on success, a negative errno value otherwise and rte_errno is set.
1118  */
1119 static int
1120 mlx5_args_check(const char *key, const char *val, void *opaque)
1121 {
1122         struct mlx5_dev_config *config = opaque;
1123         unsigned long tmp;
1124
1125         /* No-op, port representors are processed in mlx5_dev_spawn(). */
1126         if (!strcmp(MLX5_REPRESENTOR, key))
1127                 return 0;
1128         errno = 0;
1129         tmp = strtoul(val, NULL, 0);
1130         if (errno) {
1131                 rte_errno = errno;
1132                 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1133                 return -rte_errno;
1134         }
1135         if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
1136                 config->cqe_comp = !!tmp;
1137         } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
1138                 config->cqe_pad = !!tmp;
1139         } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
1140                 config->hw_padding = !!tmp;
1141         } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
1142                 config->mprq.enabled = !!tmp;
1143         } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
1144                 config->mprq.stride_num_n = tmp;
1145         } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
1146                 config->mprq.max_memcpy_len = tmp;
1147         } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
1148                 config->mprq.min_rxqs_num = tmp;
1149         } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
1150                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1151                                  " converted to txq_inline_max", key);
1152                 config->txq_inline_max = tmp;
1153         } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
1154                 config->txq_inline_max = tmp;
1155         } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
1156                 config->txq_inline_min = tmp;
1157         } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
1158                 config->txq_inline_mpw = tmp;
1159         } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
1160                 config->txqs_inline = tmp;
1161         } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
1162                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1163         } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
1164                 config->mps = !!tmp;
1165         } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
1166                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1167         } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
1168                 DRV_LOG(WARNING, "%s: deprecated parameter,"
1169                                  " converted to txq_inline_mpw", key);
1170                 config->txq_inline_mpw = tmp;
1171         } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
1172                 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1173         } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
1174                 config->rx_vec_en = !!tmp;
1175         } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1176                 config->l3_vxlan_en = !!tmp;
1177         } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1178                 config->vf_nl_en = !!tmp;
1179         } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1180                 config->dv_esw_en = !!tmp;
1181         } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1182                 config->dv_flow_en = !!tmp;
1183         } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
1184                 config->mr_ext_memseg_en = !!tmp;
1185         } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
1186                 config->max_dump_files_num = tmp;
1187         } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
1188                 config->lro.timeout = tmp;
1189         } else {
1190                 DRV_LOG(WARNING, "%s: unknown parameter", key);
1191                 rte_errno = EINVAL;
1192                 return -rte_errno;
1193         }
1194         return 0;
1195 }
1196
1197 /**
1198  * Parse device parameters.
1199  *
1200  * @param config
1201  *   Pointer to device configuration structure.
1202  * @param devargs
1203  *   Device arguments structure.
1204  *
1205  * @return
1206  *   0 on success, a negative errno value otherwise and rte_errno is set.
1207  */
1208 static int
1209 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
1210 {
1211         const char **params = (const char *[]){
1212                 MLX5_RXQ_CQE_COMP_EN,
1213                 MLX5_RXQ_CQE_PAD_EN,
1214                 MLX5_RXQ_PKT_PAD_EN,
1215                 MLX5_RX_MPRQ_EN,
1216                 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
1217                 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
1218                 MLX5_RXQS_MIN_MPRQ,
1219                 MLX5_TXQ_INLINE,
1220                 MLX5_TXQ_INLINE_MIN,
1221                 MLX5_TXQ_INLINE_MAX,
1222                 MLX5_TXQ_INLINE_MPW,
1223                 MLX5_TXQS_MIN_INLINE,
1224                 MLX5_TXQS_MAX_VEC,
1225                 MLX5_TXQ_MPW_EN,
1226                 MLX5_TXQ_MPW_HDR_DSEG_EN,
1227                 MLX5_TXQ_MAX_INLINE_LEN,
1228                 MLX5_TX_VEC_EN,
1229                 MLX5_RX_VEC_EN,
1230                 MLX5_L3_VXLAN_EN,
1231                 MLX5_VF_NL_EN,
1232                 MLX5_DV_ESW_EN,
1233                 MLX5_DV_FLOW_EN,
1234                 MLX5_MR_EXT_MEMSEG_EN,
1235                 MLX5_REPRESENTOR,
1236                 MLX5_MAX_DUMP_FILES_NUM,
1237                 MLX5_LRO_TIMEOUT_USEC,
1238                 NULL,
1239         };
1240         struct rte_kvargs *kvlist;
1241         int ret = 0;
1242         int i;
1243
1244         if (devargs == NULL)
1245                 return 0;
1246         /* Following UGLY cast is done to pass checkpatch. */
1247         kvlist = rte_kvargs_parse(devargs->args, params);
1248         if (kvlist == NULL) {
1249                 rte_errno = EINVAL;
1250                 return -rte_errno;
1251         }
1252         /* Process parameters. */
1253         for (i = 0; (params[i] != NULL); ++i) {
1254                 if (rte_kvargs_count(kvlist, params[i])) {
1255                         ret = rte_kvargs_process(kvlist, params[i],
1256                                                  mlx5_args_check, config);
1257                         if (ret) {
1258                                 rte_errno = EINVAL;
1259                                 rte_kvargs_free(kvlist);
1260                                 return -rte_errno;
1261                         }
1262                 }
1263         }
1264         rte_kvargs_free(kvlist);
1265         return 0;
1266 }
1267
1268 static struct rte_pci_driver mlx5_driver;
1269
1270 /**
1271  * PMD global initialization.
1272  *
1273  * Independent from individual device, this function initializes global
1274  * per-PMD data structures distinguishing primary and secondary processes.
1275  * Hence, each initialization is called once per a process.
1276  *
1277  * @return
1278  *   0 on success, a negative errno value otherwise and rte_errno is set.
1279  */
1280 static int
1281 mlx5_init_once(void)
1282 {
1283         struct mlx5_shared_data *sd;
1284         struct mlx5_local_data *ld = &mlx5_local_data;
1285         int ret = 0;
1286
1287         if (mlx5_init_shared_data())
1288                 return -rte_errno;
1289         sd = mlx5_shared_data;
1290         assert(sd);
1291         rte_spinlock_lock(&sd->lock);
1292         switch (rte_eal_process_type()) {
1293         case RTE_PROC_PRIMARY:
1294                 if (sd->init_done)
1295                         break;
1296                 LIST_INIT(&sd->mem_event_cb_list);
1297                 rte_rwlock_init(&sd->mem_event_rwlock);
1298                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1299                                                 mlx5_mr_mem_event_cb, NULL);
1300                 ret = mlx5_mp_init_primary();
1301                 if (ret)
1302                         goto out;
1303                 sd->init_done = true;
1304                 break;
1305         case RTE_PROC_SECONDARY:
1306                 if (ld->init_done)
1307                         break;
1308                 ret = mlx5_mp_init_secondary();
1309                 if (ret)
1310                         goto out;
1311                 ++sd->secondary_cnt;
1312                 ld->init_done = true;
1313                 break;
1314         default:
1315                 break;
1316         }
1317 out:
1318         rte_spinlock_unlock(&sd->lock);
1319         return ret;
1320 }
1321
1322 /**
1323  * Configures the minimal amount of data to inline into WQE
1324  * while sending packets.
1325  *
1326  * - the txq_inline_min has the maximal priority, if this
1327  *   key is specified in devargs
1328  * - if DevX is enabled the inline mode is queried from the
1329  *   device (HCA attributes and NIC vport context if needed).
1330  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
1331  *   and none (0 bytes) for other NICs
1332  *
1333  * @param spawn
1334  *   Verbs device parameters (name, port, switch_info) to spawn.
1335  * @param config
1336  *   Device configuration parameters.
1337  */
1338 static void
1339 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1340                     struct mlx5_dev_config *config)
1341 {
1342         if (config->txq_inline_min != MLX5_ARG_UNSET) {
1343                 /* Application defines size of inlined data explicitly. */
1344                 switch (spawn->pci_dev->id.device_id) {
1345                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1346                 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1347                         if (config->txq_inline_min <
1348                                        (int)MLX5_INLINE_HSIZE_L2) {
1349                                 DRV_LOG(DEBUG,
1350                                         "txq_inline_mix aligned to minimal"
1351                                         " ConnectX-4 required value %d",
1352                                         (int)MLX5_INLINE_HSIZE_L2);
1353                                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1354                         }
1355                         break;
1356                 }
1357                 goto exit;
1358         }
1359         if (config->hca_attr.eth_net_offloads) {
1360                 /* We have DevX enabled, inline mode queried successfully. */
1361                 switch (config->hca_attr.wqe_inline_mode) {
1362                 case MLX5_CAP_INLINE_MODE_L2:
1363                         /* outer L2 header must be inlined. */
1364                         config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1365                         goto exit;
1366                 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1367                         /* No inline data are required by NIC. */
1368                         config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1369                         config->hw_vlan_insert =
1370                                 config->hca_attr.wqe_vlan_insert;
1371                         DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
1372                         goto exit;
1373                 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1374                         /* inline mode is defined by NIC vport context. */
1375                         if (!config->hca_attr.eth_virt)
1376                                 break;
1377                         switch (config->hca_attr.vport_inline_mode) {
1378                         case MLX5_INLINE_MODE_NONE:
1379                                 config->txq_inline_min =
1380                                         MLX5_INLINE_HSIZE_NONE;
1381                                 goto exit;
1382                         case MLX5_INLINE_MODE_L2:
1383                                 config->txq_inline_min =
1384                                         MLX5_INLINE_HSIZE_L2;
1385                                 goto exit;
1386                         case MLX5_INLINE_MODE_IP:
1387                                 config->txq_inline_min =
1388                                         MLX5_INLINE_HSIZE_L3;
1389                                 goto exit;
1390                         case MLX5_INLINE_MODE_TCP_UDP:
1391                                 config->txq_inline_min =
1392                                         MLX5_INLINE_HSIZE_L4;
1393                                 goto exit;
1394                         case MLX5_INLINE_MODE_INNER_L2:
1395                                 config->txq_inline_min =
1396                                         MLX5_INLINE_HSIZE_INNER_L2;
1397                                 goto exit;
1398                         case MLX5_INLINE_MODE_INNER_IP:
1399                                 config->txq_inline_min =
1400                                         MLX5_INLINE_HSIZE_INNER_L3;
1401                                 goto exit;
1402                         case MLX5_INLINE_MODE_INNER_TCP_UDP:
1403                                 config->txq_inline_min =
1404                                         MLX5_INLINE_HSIZE_INNER_L4;
1405                                 goto exit;
1406                         }
1407                 }
1408         }
1409         /*
1410          * We get here if we are unable to deduce
1411          * inline data size with DevX. Try PCI ID
1412          * to determine old NICs.
1413          */
1414         switch (spawn->pci_dev->id.device_id) {
1415         case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1416         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1417         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1418         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1419                 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1420                 config->hw_vlan_insert = 0;
1421                 break;
1422         case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
1423         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1424         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
1425         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1426                 /*
1427                  * These NICs support VLAN insertion from WQE and
1428                  * report the wqe_vlan_insert flag. But there is the bug
1429                  * and PFC control may be broken, so disable feature.
1430                  */
1431                 config->hw_vlan_insert = 0;
1432                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1433                 break;
1434         default:
1435                 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1436                 break;
1437         }
1438 exit:
1439         DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
1440 }
1441
1442 /**
1443  * Allocate page of door-bells and register it using DevX API.
1444  *
1445  * @param [in] dev
1446  *   Pointer to Ethernet device.
1447  *
1448  * @return
1449  *   Pointer to new page on success, NULL otherwise.
1450  */
1451 static struct mlx5_devx_dbr_page *
1452 mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_devx_dbr_page *page;
1456
1457         /* Allocate space for door-bell page and management data. */
1458         page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
1459                                  RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1460         if (!page) {
1461                 DRV_LOG(ERR, "port %u cannot allocate dbr page",
1462                         dev->data->port_id);
1463                 return NULL;
1464         }
1465         /* Register allocated memory. */
1466         page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
1467                                               MLX5_DBR_PAGE_SIZE, 0);
1468         if (!page->umem) {
1469                 DRV_LOG(ERR, "port %u cannot umem reg dbr page",
1470                         dev->data->port_id);
1471                 rte_free(page);
1472                 return NULL;
1473         }
1474         return page;
1475 }
1476
1477 /**
1478  * Find the next available door-bell, allocate new page if needed.
1479  *
1480  * @param [in] dev
1481  *   Pointer to Ethernet device.
1482  * @param [out] dbr_page
1483  *   Door-bell page containing the page data.
1484  *
1485  * @return
1486  *   Door-bell address offset on success, a negative error value otherwise.
1487  */
1488 int64_t
1489 mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
1490 {
1491         struct mlx5_priv *priv = dev->data->dev_private;
1492         struct mlx5_devx_dbr_page *page = NULL;
1493         uint32_t i, j;
1494
1495         LIST_FOREACH(page, &priv->dbrpgs, next)
1496                 if (page->dbr_count < MLX5_DBR_PER_PAGE)
1497                         break;
1498         if (!page) { /* No page with free door-bell exists. */
1499                 page = mlx5_alloc_dbr_page(dev);
1500                 if (!page) /* Failed to allocate new page. */
1501                         return (-1);
1502                 LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
1503         }
1504         /* Loop to find bitmap part with clear bit. */
1505         for (i = 0;
1506              i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
1507              i++)
1508                 ; /* Empty. */
1509         /* Find the first clear bit. */
1510         j = rte_bsf64(~page->dbr_bitmap[i]);
1511         assert(i < (MLX5_DBR_PER_PAGE / 64));
1512         page->dbr_bitmap[i] |= (1 << j);
1513         page->dbr_count++;
1514         *dbr_page = page;
1515         return (((i * 64) + j) * sizeof(uint64_t));
1516 }
1517
1518 /**
1519  * Release a door-bell record.
1520  *
1521  * @param [in] dev
1522  *   Pointer to Ethernet device.
1523  * @param [in] umem_id
1524  *   UMEM ID of page containing the door-bell record to release.
1525  * @param [in] offset
1526  *   Offset of door-bell record in page.
1527  *
1528  * @return
1529  *   0 on success, a negative error value otherwise.
1530  */
1531 int32_t
1532 mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
1533 {
1534         struct mlx5_priv *priv = dev->data->dev_private;
1535         struct mlx5_devx_dbr_page *page = NULL;
1536         int ret = 0;
1537
1538         LIST_FOREACH(page, &priv->dbrpgs, next)
1539                 /* Find the page this address belongs to. */
1540                 if (page->umem->umem_id == umem_id)
1541                         break;
1542         if (!page)
1543                 return -EINVAL;
1544         page->dbr_count--;
1545         if (!page->dbr_count) {
1546                 /* Page not used, free it and remove from list. */
1547                 LIST_REMOVE(page, next);
1548                 if (page->umem)
1549                         ret = -mlx5_glue->devx_umem_dereg(page->umem);
1550                 rte_free(page);
1551         } else {
1552                 /* Mark in bitmap that this door-bell is not in use. */
1553                 offset /= MLX5_DBR_SIZE;
1554                 int i = offset / 64;
1555                 int j = offset % 64;
1556
1557                 page->dbr_bitmap[i] &= ~(1 << j);
1558         }
1559         return ret;
1560 }
1561
1562 /**
1563  * Check sibling device configurations.
1564  *
1565  * Sibling devices sharing the Infiniband device context
1566  * should have compatible configurations. This regards
1567  * representors and bonding slaves.
1568  *
1569  * @param priv
1570  *   Private device descriptor.
1571  * @param config
1572  *   Configuration of the device is going to be created.
1573  *
1574  * @return
1575  *   0 on success, EINVAL otherwise
1576  */
1577 static int
1578 mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
1579                               struct mlx5_dev_config *config)
1580 {
1581         struct mlx5_ibv_shared *sh = priv->sh;
1582         struct mlx5_dev_config *sh_conf = NULL;
1583         uint16_t port_id;
1584
1585         assert(sh);
1586         /* Nothing to compare for the single/first device. */
1587         if (sh->refcnt == 1)
1588                 return 0;
1589         /* Find the device with shared context. */
1590         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1591                 struct mlx5_priv *opriv =
1592                         rte_eth_devices[port_id].data->dev_private;
1593
1594                 if (opriv && opriv != priv && opriv->sh == sh) {
1595                         sh_conf = &opriv->config;
1596                         break;
1597                 }
1598         }
1599         if (!sh_conf)
1600                 return 0;
1601         if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
1602                 DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
1603                              " for shared %s context", sh->ibdev_name);
1604                 rte_errno = EINVAL;
1605                 return rte_errno;
1606         }
1607         return 0;
1608 }
1609 /**
1610  * Spawn an Ethernet device from Verbs information.
1611  *
1612  * @param dpdk_dev
1613  *   Backing DPDK device.
1614  * @param spawn
1615  *   Verbs device parameters (name, port, switch_info) to spawn.
1616  * @param config
1617  *   Device configuration parameters.
1618  *
1619  * @return
1620  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
1621  *   is set. The following errors are defined:
1622  *
1623  *   EBUSY: device is not supposed to be spawned.
1624  *   EEXIST: device is already spawned
1625  */
1626 static struct rte_eth_dev *
1627 mlx5_dev_spawn(struct rte_device *dpdk_dev,
1628                struct mlx5_dev_spawn_data *spawn,
1629                struct mlx5_dev_config config)
1630 {
1631         const struct mlx5_switch_info *switch_info = &spawn->info;
1632         struct mlx5_ibv_shared *sh = NULL;
1633         struct ibv_port_attr port_attr;
1634         struct mlx5dv_context dv_attr = { .comp_mask = 0 };
1635         struct rte_eth_dev *eth_dev = NULL;
1636         struct mlx5_priv *priv = NULL;
1637         int err = 0;
1638         unsigned int hw_padding = 0;
1639         unsigned int mps;
1640         unsigned int cqe_comp;
1641         unsigned int cqe_pad = 0;
1642         unsigned int tunnel_en = 0;
1643         unsigned int mpls_en = 0;
1644         unsigned int swp = 0;
1645         unsigned int mprq = 0;
1646         unsigned int mprq_min_stride_size_n = 0;
1647         unsigned int mprq_max_stride_size_n = 0;
1648         unsigned int mprq_min_stride_num_n = 0;
1649         unsigned int mprq_max_stride_num_n = 0;
1650         struct rte_ether_addr mac;
1651         char name[RTE_ETH_NAME_MAX_LEN];
1652         int own_domain_id = 0;
1653         uint16_t port_id;
1654         unsigned int i;
1655 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1656         struct mlx5dv_devx_port devx_port;
1657 #endif
1658
1659         /* Determine if this port representor is supposed to be spawned. */
1660         if (switch_info->representor && dpdk_dev->devargs) {
1661                 struct rte_eth_devargs eth_da;
1662
1663                 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
1664                 if (err) {
1665                         rte_errno = -err;
1666                         DRV_LOG(ERR, "failed to process device arguments: %s",
1667                                 strerror(rte_errno));
1668                         return NULL;
1669                 }
1670                 for (i = 0; i < eth_da.nb_representor_ports; ++i)
1671                         if (eth_da.representor_ports[i] ==
1672                             (uint16_t)switch_info->port_name)
1673                                 break;
1674                 if (i == eth_da.nb_representor_ports) {
1675                         rte_errno = EBUSY;
1676                         return NULL;
1677                 }
1678         }
1679         /* Build device name. */
1680         if (spawn->pf_bond <  0) {
1681                 /* Single device. */
1682                 if (!switch_info->representor)
1683                         strlcpy(name, dpdk_dev->name, sizeof(name));
1684                 else
1685                         snprintf(name, sizeof(name), "%s_representor_%u",
1686                                  dpdk_dev->name, switch_info->port_name);
1687         } else {
1688                 /* Bonding device. */
1689                 if (!switch_info->representor)
1690                         snprintf(name, sizeof(name), "%s_%s",
1691                                  dpdk_dev->name, spawn->ibv_dev->name);
1692                 else
1693                         snprintf(name, sizeof(name), "%s_%s_representor_%u",
1694                                  dpdk_dev->name, spawn->ibv_dev->name,
1695                                  switch_info->port_name);
1696         }
1697         /* check if the device is already spawned */
1698         if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1699                 rte_errno = EEXIST;
1700                 return NULL;
1701         }
1702         DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1703         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1704                 eth_dev = rte_eth_dev_attach_secondary(name);
1705                 if (eth_dev == NULL) {
1706                         DRV_LOG(ERR, "can not attach rte ethdev");
1707                         rte_errno = ENOMEM;
1708                         return NULL;
1709                 }
1710                 eth_dev->device = dpdk_dev;
1711                 eth_dev->dev_ops = &mlx5_dev_sec_ops;
1712                 err = mlx5_proc_priv_init(eth_dev);
1713                 if (err)
1714                         return NULL;
1715                 /* Receive command fd from primary process */
1716                 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
1717                 if (err < 0)
1718                         return NULL;
1719                 /* Remap UAR for Tx queues. */
1720                 err = mlx5_tx_uar_init_secondary(eth_dev, err);
1721                 if (err)
1722                         return NULL;
1723                 /*
1724                  * Ethdev pointer is still required as input since
1725                  * the primary device is not accessible from the
1726                  * secondary process.
1727                  */
1728                 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1729                 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1730                 return eth_dev;
1731         }
1732         sh = mlx5_alloc_shared_ibctx(spawn);
1733         if (!sh)
1734                 return NULL;
1735         config.devx = sh->devx;
1736 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
1737         config.dest_tir = 1;
1738 #endif
1739 #ifdef HAVE_IBV_MLX5_MOD_SWP
1740         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
1741 #endif
1742         /*
1743          * Multi-packet send is supported by ConnectX-4 Lx PF as well
1744          * as all ConnectX-5 devices.
1745          */
1746 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1747         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1748 #endif
1749 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1750         dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1751 #endif
1752         mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
1753         if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1754                 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1755                         DRV_LOG(DEBUG, "enhanced MPW is supported");
1756                         mps = MLX5_MPW_ENHANCED;
1757                 } else {
1758                         DRV_LOG(DEBUG, "MPW is supported");
1759                         mps = MLX5_MPW;
1760                 }
1761         } else {
1762                 DRV_LOG(DEBUG, "MPW isn't supported");
1763                 mps = MLX5_MPW_DISABLED;
1764         }
1765 #ifdef HAVE_IBV_MLX5_MOD_SWP
1766         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1767                 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1768         DRV_LOG(DEBUG, "SWP support: %u", swp);
1769 #endif
1770         config.swp = !!swp;
1771 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1772         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1773                 struct mlx5dv_striding_rq_caps mprq_caps =
1774                         dv_attr.striding_rq_caps;
1775
1776                 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1777                         mprq_caps.min_single_stride_log_num_of_bytes);
1778                 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1779                         mprq_caps.max_single_stride_log_num_of_bytes);
1780                 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1781                         mprq_caps.min_single_wqe_log_num_of_strides);
1782                 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1783                         mprq_caps.max_single_wqe_log_num_of_strides);
1784                 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1785                         mprq_caps.supported_qpts);
1786                 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1787                 mprq = 1;
1788                 mprq_min_stride_size_n =
1789                         mprq_caps.min_single_stride_log_num_of_bytes;
1790                 mprq_max_stride_size_n =
1791                         mprq_caps.max_single_stride_log_num_of_bytes;
1792                 mprq_min_stride_num_n =
1793                         mprq_caps.min_single_wqe_log_num_of_strides;
1794                 mprq_max_stride_num_n =
1795                         mprq_caps.max_single_wqe_log_num_of_strides;
1796                 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1797                                                    mprq_min_stride_num_n);
1798         }
1799 #endif
1800         if (RTE_CACHE_LINE_SIZE == 128 &&
1801             !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
1802                 cqe_comp = 0;
1803         else
1804                 cqe_comp = 1;
1805         config.cqe_comp = cqe_comp;
1806 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1807         /* Whether device supports 128B Rx CQE padding. */
1808         cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
1809                   (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
1810 #endif
1811 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1812         if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1813                 tunnel_en = ((dv_attr.tunnel_offloads_caps &
1814                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
1815                              (dv_attr.tunnel_offloads_caps &
1816                               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
1817         }
1818         DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
1819                 tunnel_en ? "" : "not ");
1820 #else
1821         DRV_LOG(WARNING,
1822                 "tunnel offloading disabled due to old OFED/rdma-core version");
1823 #endif
1824         config.tunnel_en = tunnel_en;
1825 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1826         mpls_en = ((dv_attr.tunnel_offloads_caps &
1827                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1828                    (dv_attr.tunnel_offloads_caps &
1829                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1830         DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1831                 mpls_en ? "" : "not ");
1832 #else
1833         DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1834                 " old OFED/rdma-core version or firmware configuration");
1835 #endif
1836         config.mpls_en = mpls_en;
1837         /* Check port status. */
1838         err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
1839         if (err) {
1840                 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1841                 goto error;
1842         }
1843         if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1844                 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1845                 err = EINVAL;
1846                 goto error;
1847         }
1848         if (port_attr.state != IBV_PORT_ACTIVE)
1849                 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1850                         mlx5_glue->port_state_str(port_attr.state),
1851                         port_attr.state);
1852         /* Allocate private eth device data. */
1853         priv = rte_zmalloc("ethdev private structure",
1854                            sizeof(*priv),
1855                            RTE_CACHE_LINE_SIZE);
1856         if (priv == NULL) {
1857                 DRV_LOG(ERR, "priv allocation failure");
1858                 err = ENOMEM;
1859                 goto error;
1860         }
1861         priv->sh = sh;
1862         priv->ibv_port = spawn->ibv_port;
1863         priv->pci_dev = spawn->pci_dev;
1864         priv->mtu = RTE_ETHER_MTU;
1865 #ifndef RTE_ARCH_64
1866         /* Initialize UAR access locks for 32bit implementations. */
1867         rte_spinlock_init(&priv->uar_lock_cq);
1868         for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1869                 rte_spinlock_init(&priv->uar_lock[i]);
1870 #endif
1871         /* Some internal functions rely on Netlink sockets, open them now. */
1872         priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1873         priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1874         priv->nl_sn = 0;
1875         priv->representor = !!switch_info->representor;
1876         priv->master = !!switch_info->master;
1877         priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1878         priv->vport_meta_tag = 0;
1879         priv->vport_meta_mask = 0;
1880         priv->pf_bond = spawn->pf_bond;
1881 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1882         /*
1883          * The DevX port query API is implemented. E-Switch may use
1884          * either vport or reg_c[0] metadata register to match on
1885          * vport index. The engaged part of metadata register is
1886          * defined by mask.
1887          */
1888         devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
1889                               MLX5DV_DEVX_PORT_MATCH_REG_C_0;
1890         err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port, &devx_port);
1891         if (err) {
1892                 DRV_LOG(WARNING, "can't query devx port %d on device %s\n",
1893                         spawn->ibv_port, spawn->ibv_dev->name);
1894                 devx_port.comp_mask = 0;
1895         }
1896         if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
1897                 priv->vport_meta_tag = devx_port.reg_c_0.value;
1898                 priv->vport_meta_mask = devx_port.reg_c_0.mask;
1899                 if (!priv->vport_meta_mask) {
1900                         DRV_LOG(ERR, "vport zero mask for port %d"
1901                                      " on bonding device %s\n",
1902                                      spawn->ibv_port, spawn->ibv_dev->name);
1903                         err = ENOTSUP;
1904                         goto error;
1905                 }
1906                 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1907                         DRV_LOG(ERR, "invalid vport tag for port %d"
1908                                      " on bonding device %s\n",
1909                                      spawn->ibv_port, spawn->ibv_dev->name);
1910                         err = ENOTSUP;
1911                         goto error;
1912                 }
1913         } else if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
1914                 priv->vport_id = devx_port.vport_num;
1915         } else if (spawn->pf_bond >= 0) {
1916                 DRV_LOG(ERR, "can't deduce vport index for port %d"
1917                              " on bonding device %s\n",
1918                              spawn->ibv_port, spawn->ibv_dev->name);
1919                 err = ENOTSUP;
1920                 goto error;
1921         } else {
1922                 /* Suppose vport index in compatible way. */
1923                 priv->vport_id = switch_info->representor ?
1924                                  switch_info->port_name + 1 : -1;
1925         }
1926 #else
1927         /*
1928          * Kernel/rdma_core support single E-Switch per PF configurations
1929          * only and vport_id field contains the vport index for
1930          * associated VF, which is deduced from representor port name.
1931          * For example, let's have the IB device port 10, it has
1932          * attached network device eth0, which has port name attribute
1933          * pf0vf2, we can deduce the VF number as 2, and set vport index
1934          * as 3 (2+1). This assigning schema should be changed if the
1935          * multiple E-Switch instances per PF configurations or/and PCI
1936          * subfunctions are added.
1937          */
1938         priv->vport_id = switch_info->representor ?
1939                          switch_info->port_name + 1 : -1;
1940 #endif
1941         /* representor_id field keeps the unmodified VF index. */
1942         priv->representor_id = switch_info->representor ?
1943                                switch_info->port_name : -1;
1944         /*
1945          * Look for sibling devices in order to reuse their switch domain
1946          * if any, otherwise allocate one.
1947          */
1948         MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1949                 const struct mlx5_priv *opriv =
1950                         rte_eth_devices[port_id].data->dev_private;
1951
1952                 if (!opriv ||
1953                     opriv->sh != priv->sh ||
1954                         opriv->domain_id ==
1955                         RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1956                         continue;
1957                 priv->domain_id = opriv->domain_id;
1958                 break;
1959         }
1960         if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1961                 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1962                 if (err) {
1963                         err = rte_errno;
1964                         DRV_LOG(ERR, "unable to allocate switch domain: %s",
1965                                 strerror(rte_errno));
1966                         goto error;
1967                 }
1968                 own_domain_id = 1;
1969         }
1970         err = mlx5_args(&config, dpdk_dev->devargs);
1971         if (err) {
1972                 err = rte_errno;
1973                 DRV_LOG(ERR, "failed to process device arguments: %s",
1974                         strerror(rte_errno));
1975                 goto error;
1976         }
1977         err = mlx5_dev_check_sibling_config(priv, &config);
1978         if (err)
1979                 goto error;
1980         config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1981                             IBV_DEVICE_RAW_IP_CSUM);
1982         DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1983                 (config.hw_csum ? "" : "not "));
1984 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1985         !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1986         DRV_LOG(DEBUG, "counters are not supported");
1987 #endif
1988 #ifndef HAVE_IBV_FLOW_DV_SUPPORT
1989         if (config.dv_flow_en) {
1990                 DRV_LOG(WARNING, "DV flow is not supported");
1991                 config.dv_flow_en = 0;
1992         }
1993 #endif
1994         config.ind_table_max_size =
1995                 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
1996         /*
1997          * Remove this check once DPDK supports larger/variable
1998          * indirection tables.
1999          */
2000         if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
2001                 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
2002         DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
2003                 config.ind_table_max_size);
2004         config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
2005                                   IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
2006         DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
2007                 (config.hw_vlan_strip ? "" : "not "));
2008         config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
2009                                  IBV_RAW_PACKET_CAP_SCATTER_FCS);
2010         DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
2011                 (config.hw_fcs_strip ? "" : "not "));
2012 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
2013         hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
2014 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
2015         hw_padding = !!(sh->device_attr.device_cap_flags_ex &
2016                         IBV_DEVICE_PCI_WRITE_END_PADDING);
2017 #endif
2018         if (config.hw_padding && !hw_padding) {
2019                 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
2020                 config.hw_padding = 0;
2021         } else if (config.hw_padding) {
2022                 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
2023         }
2024         config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
2025                       (sh->device_attr.tso_caps.supported_qpts &
2026                        (1 << IBV_QPT_RAW_PACKET)));
2027         if (config.tso)
2028                 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
2029         /*
2030          * MPW is disabled by default, while the Enhanced MPW is enabled
2031          * by default.
2032          */
2033         if (config.mps == MLX5_ARG_UNSET)
2034                 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
2035                                                           MLX5_MPW_DISABLED;
2036         else
2037                 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
2038         DRV_LOG(INFO, "%sMPS is %s",
2039                 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
2040                 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2041         if (config.cqe_comp && !cqe_comp) {
2042                 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
2043                 config.cqe_comp = 0;
2044         }
2045         if (config.cqe_pad && !cqe_pad) {
2046                 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
2047                 config.cqe_pad = 0;
2048         } else if (config.cqe_pad) {
2049                 DRV_LOG(INFO, "Rx CQE padding is enabled");
2050         }
2051         if (config.devx) {
2052                 priv->counter_fallback = 0;
2053                 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
2054                 if (err) {
2055                         err = -err;
2056                         goto error;
2057                 }
2058                 if (!config.hca_attr.flow_counters_dump)
2059                         priv->counter_fallback = 1;
2060 #ifndef HAVE_IBV_DEVX_ASYNC
2061                 priv->counter_fallback = 1;
2062 #endif
2063                 if (priv->counter_fallback)
2064                         DRV_LOG(INFO, "Use fall-back DV counter management\n");
2065                 /* Check for LRO support. */
2066                 if (config.dest_tir && config.hca_attr.lro_cap &&
2067                     config.dv_flow_en) {
2068                         /* TBD check tunnel lro caps. */
2069                         config.lro.supported = config.hca_attr.lro_cap;
2070                         DRV_LOG(DEBUG, "Device supports LRO");
2071                         /*
2072                          * If LRO timeout is not configured by application,
2073                          * use the minimal supported value.
2074                          */
2075                         if (!config.lro.timeout)
2076                                 config.lro.timeout =
2077                                 config.hca_attr.lro_timer_supported_periods[0];
2078                         DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
2079                                 config.lro.timeout);
2080                 }
2081         }
2082         if (config.mprq.enabled && mprq) {
2083                 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
2084                     config.mprq.stride_num_n < mprq_min_stride_num_n) {
2085                         config.mprq.stride_num_n =
2086                                 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
2087                                         mprq_min_stride_num_n);
2088                         DRV_LOG(WARNING,
2089                                 "the number of strides"
2090                                 " for Multi-Packet RQ is out of range,"
2091                                 " setting default value (%u)",
2092                                 1 << config.mprq.stride_num_n);
2093                 }
2094                 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
2095                 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
2096         } else if (config.mprq.enabled && !mprq) {
2097                 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
2098                 config.mprq.enabled = 0;
2099         }
2100         if (config.max_dump_files_num == 0)
2101                 config.max_dump_files_num = 128;
2102         eth_dev = rte_eth_dev_allocate(name);
2103         if (eth_dev == NULL) {
2104                 DRV_LOG(ERR, "can not allocate rte ethdev");
2105                 err = ENOMEM;
2106                 goto error;
2107         }
2108         /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
2109         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2110         if (priv->representor) {
2111                 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
2112                 eth_dev->data->representor_id = priv->representor_id;
2113         }
2114         /*
2115          * Store associated network device interface index. This index
2116          * is permanent throughout the lifetime of device. So, we may store
2117          * the ifindex here and use the cached value further.
2118          */
2119         assert(spawn->ifindex);
2120         priv->if_index = spawn->ifindex;
2121         eth_dev->data->dev_private = priv;
2122         priv->dev_data = eth_dev->data;
2123         eth_dev->data->mac_addrs = priv->mac;
2124         eth_dev->device = dpdk_dev;
2125         /* Configure the first MAC address by default. */
2126         if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
2127                 DRV_LOG(ERR,
2128                         "port %u cannot get MAC address, is mlx5_en"
2129                         " loaded? (errno: %s)",
2130                         eth_dev->data->port_id, strerror(rte_errno));
2131                 err = ENODEV;
2132                 goto error;
2133         }
2134         DRV_LOG(INFO,
2135                 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
2136                 eth_dev->data->port_id,
2137                 mac.addr_bytes[0], mac.addr_bytes[1],
2138                 mac.addr_bytes[2], mac.addr_bytes[3],
2139                 mac.addr_bytes[4], mac.addr_bytes[5]);
2140 #ifndef NDEBUG
2141         {
2142                 char ifname[IF_NAMESIZE];
2143
2144                 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
2145                         DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
2146                                 eth_dev->data->port_id, ifname);
2147                 else
2148                         DRV_LOG(DEBUG, "port %u ifname is unknown",
2149                                 eth_dev->data->port_id);
2150         }
2151 #endif
2152         /* Get actual MTU if possible. */
2153         err = mlx5_get_mtu(eth_dev, &priv->mtu);
2154         if (err) {
2155                 err = rte_errno;
2156                 goto error;
2157         }
2158         DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
2159                 priv->mtu);
2160         /* Initialize burst functions to prevent crashes before link-up. */
2161         eth_dev->rx_pkt_burst = removed_rx_burst;
2162         eth_dev->tx_pkt_burst = removed_tx_burst;
2163         eth_dev->dev_ops = &mlx5_dev_ops;
2164         /* Register MAC address. */
2165         claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
2166         if (config.vf && config.vf_nl_en)
2167                 mlx5_nl_mac_addr_sync(eth_dev);
2168         TAILQ_INIT(&priv->flows);
2169         TAILQ_INIT(&priv->ctrl_flows);
2170         /* Hint libmlx5 to use PMD allocator for data plane resources */
2171         struct mlx5dv_ctx_allocators alctr = {
2172                 .alloc = &mlx5_alloc_verbs_buf,
2173                 .free = &mlx5_free_verbs_buf,
2174                 .data = priv,
2175         };
2176         mlx5_glue->dv_set_context_attr(sh->ctx,
2177                                        MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2178                                        (void *)((uintptr_t)&alctr));
2179         /* Bring Ethernet device up. */
2180         DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
2181                 eth_dev->data->port_id);
2182         mlx5_set_link_up(eth_dev);
2183         /*
2184          * Even though the interrupt handler is not installed yet,
2185          * interrupts will still trigger on the async_fd from
2186          * Verbs context returned by ibv_open_device().
2187          */
2188         mlx5_link_update(eth_dev, 0);
2189 #ifdef HAVE_MLX5DV_DR_ESWITCH
2190         if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
2191               (switch_info->representor || switch_info->master)))
2192                 config.dv_esw_en = 0;
2193 #else
2194         config.dv_esw_en = 0;
2195 #endif
2196         /* Detect minimal data bytes to inline. */
2197         mlx5_set_min_inline(spawn, &config);
2198         /* Store device configuration on private structure. */
2199         priv->config = config;
2200         /* Create context for virtual machine VLAN workaround. */
2201         priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
2202         if (config.dv_flow_en) {
2203                 err = mlx5_alloc_shared_dr(priv);
2204                 if (err)
2205                         goto error;
2206         }
2207         /* Supported Verbs flow priority number detection. */
2208         err = mlx5_flow_discover_priorities(eth_dev);
2209         if (err < 0) {
2210                 err = -err;
2211                 goto error;
2212         }
2213         priv->config.flow_prio = err;
2214         return eth_dev;
2215 error:
2216         if (priv) {
2217                 if (priv->sh)
2218                         mlx5_free_shared_dr(priv);
2219                 if (priv->nl_socket_route >= 0)
2220                         close(priv->nl_socket_route);
2221                 if (priv->nl_socket_rdma >= 0)
2222                         close(priv->nl_socket_rdma);
2223                 if (priv->vmwa_context)
2224                         mlx5_vlan_vmwa_exit(priv->vmwa_context);
2225                 if (own_domain_id)
2226                         claim_zero(rte_eth_switch_domain_free(priv->domain_id));
2227                 rte_free(priv);
2228                 if (eth_dev != NULL)
2229                         eth_dev->data->dev_private = NULL;
2230         }
2231         if (eth_dev != NULL) {
2232                 /* mac_addrs must not be freed alone because part of dev_private */
2233                 eth_dev->data->mac_addrs = NULL;
2234                 rte_eth_dev_release_port(eth_dev);
2235         }
2236         if (sh)
2237                 mlx5_free_shared_ibctx(sh);
2238         assert(err > 0);
2239         rte_errno = err;
2240         return NULL;
2241 }
2242
2243 /**
2244  * Comparison callback to sort device data.
2245  *
2246  * This is meant to be used with qsort().
2247  *
2248  * @param a[in]
2249  *   Pointer to pointer to first data object.
2250  * @param b[in]
2251  *   Pointer to pointer to second data object.
2252  *
2253  * @return
2254  *   0 if both objects are equal, less than 0 if the first argument is less
2255  *   than the second, greater than 0 otherwise.
2256  */
2257 static int
2258 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
2259 {
2260         const struct mlx5_switch_info *si_a =
2261                 &((const struct mlx5_dev_spawn_data *)a)->info;
2262         const struct mlx5_switch_info *si_b =
2263                 &((const struct mlx5_dev_spawn_data *)b)->info;
2264         int ret;
2265
2266         /* Master device first. */
2267         ret = si_b->master - si_a->master;
2268         if (ret)
2269                 return ret;
2270         /* Then representor devices. */
2271         ret = si_b->representor - si_a->representor;
2272         if (ret)
2273                 return ret;
2274         /* Unidentified devices come last in no specific order. */
2275         if (!si_a->representor)
2276                 return 0;
2277         /* Order representors by name. */
2278         return si_a->port_name - si_b->port_name;
2279 }
2280
2281 /**
2282  * Match PCI information for possible slaves of bonding device.
2283  *
2284  * @param[in] ibv_dev
2285  *   Pointer to Infiniband device structure.
2286  * @param[in] pci_dev
2287  *   Pointer to PCI device structure to match PCI address.
2288  * @param[in] nl_rdma
2289  *   Netlink RDMA group socket handle.
2290  *
2291  * @return
2292  *   negative value if no bonding device found, otherwise
2293  *   positive index of slave PF in bonding.
2294  */
2295 static int
2296 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
2297                            const struct rte_pci_device *pci_dev,
2298                            int nl_rdma)
2299 {
2300         char ifname[IF_NAMESIZE + 1];
2301         unsigned int ifindex;
2302         unsigned int np, i;
2303         FILE *file = NULL;
2304         int pf = -1;
2305
2306         /*
2307          * Try to get master device name. If something goes
2308          * wrong suppose the lack of kernel support and no
2309          * bonding devices.
2310          */
2311         if (nl_rdma < 0)
2312                 return -1;
2313         if (!strstr(ibv_dev->name, "bond"))
2314                 return -1;
2315         np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
2316         if (!np)
2317                 return -1;
2318         /*
2319          * The Master device might not be on the predefined
2320          * port (not on port index 1, it is not garanted),
2321          * we have to scan all Infiniband device port and
2322          * find master.
2323          */
2324         for (i = 1; i <= np; ++i) {
2325                 /* Check whether Infiniband port is populated. */
2326                 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
2327                 if (!ifindex)
2328                         continue;
2329                 if (!if_indextoname(ifindex, ifname))
2330                         continue;
2331                 /* Try to read bonding slave names from sysfs. */
2332                 MKSTR(slaves,
2333                       "/sys/class/net/%s/master/bonding/slaves", ifname);
2334                 file = fopen(slaves, "r");
2335                 if (file)
2336                         break;
2337         }
2338         if (!file)
2339                 return -1;
2340         /* Use safe format to check maximal buffer length. */
2341         assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
2342         while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
2343                 char tmp_str[IF_NAMESIZE + 32];
2344                 struct rte_pci_addr pci_addr;
2345                 struct mlx5_switch_info info;
2346
2347                 /* Process slave interface names in the loop. */
2348                 snprintf(tmp_str, sizeof(tmp_str),
2349                          "/sys/class/net/%s", ifname);
2350                 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
2351                         DRV_LOG(WARNING, "can not get PCI address"
2352                                          " for netdev \"%s\"", ifname);
2353                         continue;
2354                 }
2355                 if (pci_dev->addr.domain != pci_addr.domain ||
2356                     pci_dev->addr.bus != pci_addr.bus ||
2357                     pci_dev->addr.devid != pci_addr.devid ||
2358                     pci_dev->addr.function != pci_addr.function)
2359                         continue;
2360                 /* Slave interface PCI address match found. */
2361                 fclose(file);
2362                 snprintf(tmp_str, sizeof(tmp_str),
2363                          "/sys/class/net/%s/phys_port_name", ifname);
2364                 file = fopen(tmp_str, "rb");
2365                 if (!file)
2366                         break;
2367                 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2368                 if (fscanf(file, "%32s", tmp_str) == 1)
2369                         mlx5_translate_port_name(tmp_str, &info);
2370                 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
2371                     info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2372                         pf = info.port_name;
2373                 break;
2374         }
2375         if (file)
2376                 fclose(file);
2377         return pf;
2378 }
2379
2380 /**
2381  * DPDK callback to register a PCI device.
2382  *
2383  * This function spawns Ethernet devices out of a given PCI device.
2384  *
2385  * @param[in] pci_drv
2386  *   PCI driver structure (mlx5_driver).
2387  * @param[in] pci_dev
2388  *   PCI device information.
2389  *
2390  * @return
2391  *   0 on success, a negative errno value otherwise and rte_errno is set.
2392  */
2393 static int
2394 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2395                struct rte_pci_device *pci_dev)
2396 {
2397         struct ibv_device **ibv_list;
2398         /*
2399          * Number of found IB Devices matching with requested PCI BDF.
2400          * nd != 1 means there are multiple IB devices over the same
2401          * PCI device and we have representors and master.
2402          */
2403         unsigned int nd = 0;
2404         /*
2405          * Number of found IB device Ports. nd = 1 and np = 1..n means
2406          * we have the single multiport IB device, and there may be
2407          * representors attached to some of found ports.
2408          */
2409         unsigned int np = 0;
2410         /*
2411          * Number of DPDK ethernet devices to Spawn - either over
2412          * multiple IB devices or multiple ports of single IB device.
2413          * Actually this is the number of iterations to spawn.
2414          */
2415         unsigned int ns = 0;
2416         /*
2417          * Bonding device
2418          *   < 0 - no bonding device (single one)
2419          *  >= 0 - bonding device (value is slave PF index)
2420          */
2421         int bd = -1;
2422         struct mlx5_dev_spawn_data *list = NULL;
2423         struct mlx5_dev_config dev_config;
2424         int ret;
2425
2426         ret = mlx5_init_once();
2427         if (ret) {
2428                 DRV_LOG(ERR, "unable to init PMD global data: %s",
2429                         strerror(rte_errno));
2430                 return -rte_errno;
2431         }
2432         assert(pci_drv == &mlx5_driver);
2433         errno = 0;
2434         ibv_list = mlx5_glue->get_device_list(&ret);
2435         if (!ibv_list) {
2436                 rte_errno = errno ? errno : ENOSYS;
2437                 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
2438                 return -rte_errno;
2439         }
2440         /*
2441          * First scan the list of all Infiniband devices to find
2442          * matching ones, gathering into the list.
2443          */
2444         struct ibv_device *ibv_match[ret + 1];
2445         int nl_route = mlx5_nl_init(NETLINK_ROUTE);
2446         int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
2447         unsigned int i;
2448
2449         while (ret-- > 0) {
2450                 struct rte_pci_addr pci_addr;
2451
2452                 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
2453                 bd = mlx5_device_bond_pci_match
2454                                 (ibv_list[ret], pci_dev, nl_rdma);
2455                 if (bd >= 0) {
2456                         /*
2457                          * Bonding device detected. Only one match is allowed,
2458                          * the bonding is supported over multi-port IB device,
2459                          * there should be no matches on representor PCI
2460                          * functions or non VF LAG bonding devices with
2461                          * specified address.
2462                          */
2463                         if (nd) {
2464                                 DRV_LOG(ERR,
2465                                         "multiple PCI match on bonding device"
2466                                         "\"%s\" found", ibv_list[ret]->name);
2467                                 rte_errno = ENOENT;
2468                                 ret = -rte_errno;
2469                                 goto exit;
2470                         }
2471                         DRV_LOG(INFO, "PCI information matches for"
2472                                       " slave %d bonding device \"%s\"",
2473                                       bd, ibv_list[ret]->name);
2474                         ibv_match[nd++] = ibv_list[ret];
2475                         break;
2476                 }
2477                 if (mlx5_dev_to_pci_addr
2478                         (ibv_list[ret]->ibdev_path, &pci_addr))
2479                         continue;
2480                 if (pci_dev->addr.domain != pci_addr.domain ||
2481                     pci_dev->addr.bus != pci_addr.bus ||
2482                     pci_dev->addr.devid != pci_addr.devid ||
2483                     pci_dev->addr.function != pci_addr.function)
2484                         continue;
2485                 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2486                         ibv_list[ret]->name);
2487                 ibv_match[nd++] = ibv_list[ret];
2488         }
2489         ibv_match[nd] = NULL;
2490         if (!nd) {
2491                 /* No device matches, just complain and bail out. */
2492                 DRV_LOG(WARNING,
2493                         "no Verbs device matches PCI device " PCI_PRI_FMT ","
2494                         " are kernel drivers loaded?",
2495                         pci_dev->addr.domain, pci_dev->addr.bus,
2496                         pci_dev->addr.devid, pci_dev->addr.function);
2497                 rte_errno = ENOENT;
2498                 ret = -rte_errno;
2499                 goto exit;
2500         }
2501         if (nd == 1) {
2502                 /*
2503                  * Found single matching device may have multiple ports.
2504                  * Each port may be representor, we have to check the port
2505                  * number and check the representors existence.
2506                  */
2507                 if (nl_rdma >= 0)
2508                         np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
2509                 if (!np)
2510                         DRV_LOG(WARNING, "can not get IB device \"%s\""
2511                                          " ports number", ibv_match[0]->name);
2512                 if (bd >= 0 && !np) {
2513                         DRV_LOG(ERR, "can not get ports"
2514                                      " for bonding device");
2515                         rte_errno = ENOENT;
2516                         ret = -rte_errno;
2517                         goto exit;
2518                 }
2519         }
2520 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
2521         if (bd >= 0) {
2522                 /*
2523                  * This may happen if there is VF LAG kernel support and
2524                  * application is compiled with older rdma_core library.
2525                  */
2526                 DRV_LOG(ERR,
2527                         "No kernel/verbs support for VF LAG bonding found.");
2528                 rte_errno = ENOTSUP;
2529                 ret = -rte_errno;
2530                 goto exit;
2531         }
2532 #endif
2533         /*
2534          * Now we can determine the maximal
2535          * amount of devices to be spawned.
2536          */
2537         list = rte_zmalloc("device spawn data",
2538                          sizeof(struct mlx5_dev_spawn_data) *
2539                          (np ? np : nd),
2540                          RTE_CACHE_LINE_SIZE);
2541         if (!list) {
2542                 DRV_LOG(ERR, "spawn data array allocation failure");
2543                 rte_errno = ENOMEM;
2544                 ret = -rte_errno;
2545                 goto exit;
2546         }
2547         if (bd >= 0 || np > 1) {
2548                 /*
2549                  * Single IB device with multiple ports found,
2550                  * it may be E-Switch master device and representors.
2551                  * We have to perform identification trough the ports.
2552                  */
2553                 assert(nl_rdma >= 0);
2554                 assert(ns == 0);
2555                 assert(nd == 1);
2556                 assert(np);
2557                 for (i = 1; i <= np; ++i) {
2558                         list[ns].max_port = np;
2559                         list[ns].ibv_port = i;
2560                         list[ns].ibv_dev = ibv_match[0];
2561                         list[ns].eth_dev = NULL;
2562                         list[ns].pci_dev = pci_dev;
2563                         list[ns].pf_bond = bd;
2564                         list[ns].ifindex = mlx5_nl_ifindex
2565                                         (nl_rdma, list[ns].ibv_dev->name, i);
2566                         if (!list[ns].ifindex) {
2567                                 /*
2568                                  * No network interface index found for the
2569                                  * specified port, it means there is no
2570                                  * representor on this port. It's OK,
2571                                  * there can be disabled ports, for example
2572                                  * if sriov_numvfs < sriov_totalvfs.
2573                                  */
2574                                 continue;
2575                         }
2576                         ret = -1;
2577                         if (nl_route >= 0)
2578                                 ret = mlx5_nl_switch_info
2579                                                (nl_route,
2580                                                 list[ns].ifindex,
2581                                                 &list[ns].info);
2582                         if (ret || (!list[ns].info.representor &&
2583                                     !list[ns].info.master)) {
2584                                 /*
2585                                  * We failed to recognize representors with
2586                                  * Netlink, let's try to perform the task
2587                                  * with sysfs.
2588                                  */
2589                                 ret =  mlx5_sysfs_switch_info
2590                                                 (list[ns].ifindex,
2591                                                  &list[ns].info);
2592                         }
2593                         if (!ret && bd >= 0) {
2594                                 switch (list[ns].info.name_type) {
2595                                 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2596                                         if (list[ns].info.port_name == bd)
2597                                                 ns++;
2598                                         break;
2599                                 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2600                                         if (list[ns].info.pf_num == bd)
2601                                                 ns++;
2602                                         break;
2603                                 default:
2604                                         break;
2605                                 }
2606                                 continue;
2607                         }
2608                         if (!ret && (list[ns].info.representor ^
2609                                      list[ns].info.master))
2610                                 ns++;
2611                 }
2612                 if (!ns) {
2613                         DRV_LOG(ERR,
2614                                 "unable to recognize master/representors"
2615                                 " on the IB device with multiple ports");
2616                         rte_errno = ENOENT;
2617                         ret = -rte_errno;
2618                         goto exit;
2619                 }
2620         } else {
2621                 /*
2622                  * The existence of several matching entries (nd > 1) means
2623                  * port representors have been instantiated. No existing Verbs
2624                  * call nor sysfs entries can tell them apart, this can only
2625                  * be done through Netlink calls assuming kernel drivers are
2626                  * recent enough to support them.
2627                  *
2628                  * In the event of identification failure through Netlink,
2629                  * try again through sysfs, then:
2630                  *
2631                  * 1. A single IB device matches (nd == 1) with single
2632                  *    port (np=0/1) and is not a representor, assume
2633                  *    no switch support.
2634                  *
2635                  * 2. Otherwise no safe assumptions can be made;
2636                  *    complain louder and bail out.
2637                  */
2638                 np = 1;
2639                 for (i = 0; i != nd; ++i) {
2640                         memset(&list[ns].info, 0, sizeof(list[ns].info));
2641                         list[ns].max_port = 1;
2642                         list[ns].ibv_port = 1;
2643                         list[ns].ibv_dev = ibv_match[i];
2644                         list[ns].eth_dev = NULL;
2645                         list[ns].pci_dev = pci_dev;
2646                         list[ns].pf_bond = -1;
2647                         list[ns].ifindex = 0;
2648                         if (nl_rdma >= 0)
2649                                 list[ns].ifindex = mlx5_nl_ifindex
2650                                         (nl_rdma, list[ns].ibv_dev->name, 1);
2651                         if (!list[ns].ifindex) {
2652                                 char ifname[IF_NAMESIZE];
2653
2654                                 /*
2655                                  * Netlink failed, it may happen with old
2656                                  * ib_core kernel driver (before 4.16).
2657                                  * We can assume there is old driver because
2658                                  * here we are processing single ports IB
2659                                  * devices. Let's try sysfs to retrieve
2660                                  * the ifindex. The method works for
2661                                  * master device only.
2662                                  */
2663                                 if (nd > 1) {
2664                                         /*
2665                                          * Multiple devices found, assume
2666                                          * representors, can not distinguish
2667                                          * master/representor and retrieve
2668                                          * ifindex via sysfs.
2669                                          */
2670                                         continue;
2671                                 }
2672                                 ret = mlx5_get_master_ifname
2673                                         (ibv_match[i]->ibdev_path, &ifname);
2674                                 if (!ret)
2675                                         list[ns].ifindex =
2676                                                 if_nametoindex(ifname);
2677                                 if (!list[ns].ifindex) {
2678                                         /*
2679                                          * No network interface index found
2680                                          * for the specified device, it means
2681                                          * there it is neither representor
2682                                          * nor master.
2683                                          */
2684                                         continue;
2685                                 }
2686                         }
2687                         ret = -1;
2688                         if (nl_route >= 0)
2689                                 ret = mlx5_nl_switch_info
2690                                                (nl_route,
2691                                                 list[ns].ifindex,
2692                                                 &list[ns].info);
2693                         if (ret || (!list[ns].info.representor &&
2694                                     !list[ns].info.master)) {
2695                                 /*
2696                                  * We failed to recognize representors with
2697                                  * Netlink, let's try to perform the task
2698                                  * with sysfs.
2699                                  */
2700                                 ret =  mlx5_sysfs_switch_info
2701                                                 (list[ns].ifindex,
2702                                                  &list[ns].info);
2703                         }
2704                         if (!ret && (list[ns].info.representor ^
2705                                      list[ns].info.master)) {
2706                                 ns++;
2707                         } else if ((nd == 1) &&
2708                                    !list[ns].info.representor &&
2709                                    !list[ns].info.master) {
2710                                 /*
2711                                  * Single IB device with
2712                                  * one physical port and
2713                                  * attached network device.
2714                                  * May be SRIOV is not enabled
2715                                  * or there is no representors.
2716                                  */
2717                                 DRV_LOG(INFO, "no E-Switch support detected");
2718                                 ns++;
2719                                 break;
2720                         }
2721                 }
2722                 if (!ns) {
2723                         DRV_LOG(ERR,
2724                                 "unable to recognize master/representors"
2725                                 " on the multiple IB devices");
2726                         rte_errno = ENOENT;
2727                         ret = -rte_errno;
2728                         goto exit;
2729                 }
2730         }
2731         assert(ns);
2732         /*
2733          * Sort list to probe devices in natural order for users convenience
2734          * (i.e. master first, then representors from lowest to highest ID).
2735          */
2736         qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2737         /* Default configuration. */
2738         dev_config = (struct mlx5_dev_config){
2739                 .hw_padding = 0,
2740                 .mps = MLX5_ARG_UNSET,
2741                 .rx_vec_en = 1,
2742                 .txq_inline_max = MLX5_ARG_UNSET,
2743                 .txq_inline_min = MLX5_ARG_UNSET,
2744                 .txq_inline_mpw = MLX5_ARG_UNSET,
2745                 .txqs_inline = MLX5_ARG_UNSET,
2746                 .vf_nl_en = 1,
2747                 .mr_ext_memseg_en = 1,
2748                 .mprq = {
2749                         .enabled = 0, /* Disabled by default. */
2750                         .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
2751                         .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
2752                         .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
2753                 },
2754                 .dv_esw_en = 1,
2755         };
2756         /* Device specific configuration. */
2757         switch (pci_dev->id.device_id) {
2758         case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2759         case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2760         case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2761         case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2762         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2763         case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2764                 dev_config.vf = 1;
2765                 break;
2766         default:
2767                 break;
2768         }
2769         for (i = 0; i != ns; ++i) {
2770                 uint32_t restore;
2771
2772                 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2773                                                  &list[i],
2774                                                  dev_config);
2775                 if (!list[i].eth_dev) {
2776                         if (rte_errno != EBUSY && rte_errno != EEXIST)
2777                                 break;
2778                         /* Device is disabled or already spawned. Ignore it. */
2779                         continue;
2780                 }
2781                 restore = list[i].eth_dev->data->dev_flags;
2782                 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2783                 /* Restore non-PCI flags cleared by the above call. */
2784                 list[i].eth_dev->data->dev_flags |= restore;
2785                 mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev);
2786                 rte_eth_dev_probing_finish(list[i].eth_dev);
2787         }
2788         if (i != ns) {
2789                 DRV_LOG(ERR,
2790                         "probe of PCI device " PCI_PRI_FMT " aborted after"
2791                         " encountering an error: %s",
2792                         pci_dev->addr.domain, pci_dev->addr.bus,
2793                         pci_dev->addr.devid, pci_dev->addr.function,
2794                         strerror(rte_errno));
2795                 ret = -rte_errno;
2796                 /* Roll back. */
2797                 while (i--) {
2798                         if (!list[i].eth_dev)
2799                                 continue;
2800                         mlx5_dev_close(list[i].eth_dev);
2801                         /* mac_addrs must not be freed because in dev_private */
2802                         list[i].eth_dev->data->mac_addrs = NULL;
2803                         claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2804                 }
2805                 /* Restore original error. */
2806                 rte_errno = -ret;
2807         } else {
2808                 ret = 0;
2809         }
2810 exit:
2811         /*
2812          * Do the routine cleanup:
2813          * - close opened Netlink sockets
2814          * - free allocated spawn data array
2815          * - free the Infiniband device list
2816          */
2817         if (nl_rdma >= 0)
2818                 close(nl_rdma);
2819         if (nl_route >= 0)
2820                 close(nl_route);
2821         if (list)
2822                 rte_free(list);
2823         assert(ibv_list);
2824         mlx5_glue->free_device_list(ibv_list);
2825         return ret;
2826 }
2827
2828 /**
2829  * Look for the ethernet device belonging to mlx5 driver.
2830  *
2831  * @param[in] port_id
2832  *   port_id to start looking for device.
2833  * @param[in] pci_dev
2834  *   Pointer to the hint PCI device. When device is being probed
2835  *   the its siblings (master and preceding representors might
2836  *   not have assigned driver yet (because the mlx5_pci_probe()
2837  *   is not completed yet, for this case match on hint PCI
2838  *   device may be used to detect sibling device.
2839  *
2840  * @return
2841  *   port_id of found device, RTE_MAX_ETHPORT if not found.
2842  */
2843 uint16_t
2844 mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev)
2845 {
2846         while (port_id < RTE_MAX_ETHPORTS) {
2847                 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2848
2849                 if (dev->state != RTE_ETH_DEV_UNUSED &&
2850                     dev->device &&
2851                     (dev->device == &pci_dev->device ||
2852                      (dev->device->driver &&
2853                      dev->device->driver->name &&
2854                      !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME))))
2855                         break;
2856                 port_id++;
2857         }
2858         if (port_id >= RTE_MAX_ETHPORTS)
2859                 return RTE_MAX_ETHPORTS;
2860         return port_id;
2861 }
2862
2863 /**
2864  * DPDK callback to remove a PCI device.
2865  *
2866  * This function removes all Ethernet devices belong to a given PCI device.
2867  *
2868  * @param[in] pci_dev
2869  *   Pointer to the PCI device.
2870  *
2871  * @return
2872  *   0 on success, the function cannot fail.
2873  */
2874 static int
2875 mlx5_pci_remove(struct rte_pci_device *pci_dev)
2876 {
2877         uint16_t port_id;
2878
2879         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2880                 rte_eth_dev_close(port_id);
2881         return 0;
2882 }
2883
2884 static const struct rte_pci_id mlx5_pci_id_map[] = {
2885         {
2886                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2887                                PCI_DEVICE_ID_MELLANOX_CONNECTX4)
2888         },
2889         {
2890                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2891                                PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
2892         },
2893         {
2894                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2895                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
2896         },
2897         {
2898                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2899                                PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
2900         },
2901         {
2902                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2903                                PCI_DEVICE_ID_MELLANOX_CONNECTX5)
2904         },
2905         {
2906                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2907                                PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
2908         },
2909         {
2910                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2911                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
2912         },
2913         {
2914                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2915                                PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
2916         },
2917         {
2918                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2919                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
2920         },
2921         {
2922                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2923                                PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
2924         },
2925         {
2926                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2927                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
2928         },
2929         {
2930                 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2931                                 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
2932         },
2933         {
2934                 .vendor_id = 0
2935         }
2936 };
2937
2938 static struct rte_pci_driver mlx5_driver = {
2939         .driver = {
2940                 .name = MLX5_DRIVER_NAME
2941         },
2942         .id_table = mlx5_pci_id_map,
2943         .probe = mlx5_pci_probe,
2944         .remove = mlx5_pci_remove,
2945         .dma_map = mlx5_dma_map,
2946         .dma_unmap = mlx5_dma_unmap,
2947         .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
2948                      RTE_PCI_DRV_PROBE_AGAIN,
2949 };
2950
2951 #ifdef RTE_IBVERBS_LINK_DLOPEN
2952
2953 /**
2954  * Suffix RTE_EAL_PMD_PATH with "-glue".
2955  *
2956  * This function performs a sanity check on RTE_EAL_PMD_PATH before
2957  * suffixing its last component.
2958  *
2959  * @param buf[out]
2960  *   Output buffer, should be large enough otherwise NULL is returned.
2961  * @param size
2962  *   Size of @p out.
2963  *
2964  * @return
2965  *   Pointer to @p buf or @p NULL in case suffix cannot be appended.
2966  */
2967 static char *
2968 mlx5_glue_path(char *buf, size_t size)
2969 {
2970         static const char *const bad[] = { "/", ".", "..", NULL };
2971         const char *path = RTE_EAL_PMD_PATH;
2972         size_t len = strlen(path);
2973         size_t off;
2974         int i;
2975
2976         while (len && path[len - 1] == '/')
2977                 --len;
2978         for (off = len; off && path[off - 1] != '/'; --off)
2979                 ;
2980         for (i = 0; bad[i]; ++i)
2981                 if (!strncmp(path + off, bad[i], (int)(len - off)))
2982                         goto error;
2983         i = snprintf(buf, size, "%.*s-glue", (int)len, path);
2984         if (i == -1 || (size_t)i >= size)
2985                 goto error;
2986         return buf;
2987 error:
2988         DRV_LOG(ERR,
2989                 "unable to append \"-glue\" to last component of"
2990                 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
2991                 " please re-configure DPDK");
2992         return NULL;
2993 }
2994
2995 /**
2996  * Initialization routine for run-time dependency on rdma-core.
2997  */
2998 static int
2999 mlx5_glue_init(void)
3000 {
3001         char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
3002         const char *path[] = {
3003                 /*
3004                  * A basic security check is necessary before trusting
3005                  * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
3006                  */
3007                 (geteuid() == getuid() && getegid() == getgid() ?
3008                  getenv("MLX5_GLUE_PATH") : NULL),
3009                 /*
3010                  * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
3011                  * variant, otherwise let dlopen() look up libraries on its
3012                  * own.
3013                  */
3014                 (*RTE_EAL_PMD_PATH ?
3015                  mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
3016         };
3017         unsigned int i = 0;
3018         void *handle = NULL;
3019         void **sym;
3020         const char *dlmsg;
3021
3022         while (!handle && i != RTE_DIM(path)) {
3023                 const char *end;
3024                 size_t len;
3025                 int ret;
3026
3027                 if (!path[i]) {
3028                         ++i;
3029                         continue;
3030                 }
3031                 end = strpbrk(path[i], ":;");
3032                 if (!end)
3033                         end = path[i] + strlen(path[i]);
3034                 len = end - path[i];
3035                 ret = 0;
3036                 do {
3037                         char name[ret + 1];
3038
3039                         ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
3040                                        (int)len, path[i],
3041                                        (!len || *(end - 1) == '/') ? "" : "/");
3042                         if (ret == -1)
3043                                 break;
3044                         if (sizeof(name) != (size_t)ret + 1)
3045                                 continue;
3046                         DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
3047                                 name);
3048                         handle = dlopen(name, RTLD_LAZY);
3049                         break;
3050                 } while (1);
3051                 path[i] = end + 1;
3052                 if (!*end)
3053                         ++i;
3054         }
3055         if (!handle) {
3056                 rte_errno = EINVAL;
3057                 dlmsg = dlerror();
3058                 if (dlmsg)
3059                         DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
3060                 goto glue_error;
3061         }
3062         sym = dlsym(handle, "mlx5_glue");
3063         if (!sym || !*sym) {
3064                 rte_errno = EINVAL;
3065                 dlmsg = dlerror();
3066                 if (dlmsg)
3067                         DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
3068                 goto glue_error;
3069         }
3070         mlx5_glue = *sym;
3071         return 0;
3072 glue_error:
3073         if (handle)
3074                 dlclose(handle);
3075         DRV_LOG(WARNING,
3076                 "cannot initialize PMD due to missing run-time dependency on"
3077                 " rdma-core libraries (libibverbs, libmlx5)");
3078         return -rte_errno;
3079 }
3080
3081 #endif
3082
3083 /**
3084  * Driver initialization routine.
3085  */
3086 RTE_INIT(rte_mlx5_pmd_init)
3087 {
3088         /* Initialize driver log type. */
3089         mlx5_logtype = rte_log_register("pmd.net.mlx5");
3090         if (mlx5_logtype >= 0)
3091                 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
3092
3093         /* Build the static tables for Verbs conversion. */
3094         mlx5_set_ptype_table();
3095         mlx5_set_cksum_table();
3096         mlx5_set_swp_types_table();
3097         /*
3098          * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
3099          * huge pages. Calling ibv_fork_init() during init allows
3100          * applications to use fork() safely for purposes other than
3101          * using this PMD, which is not supported in forked processes.
3102          */
3103         setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
3104         /* Match the size of Rx completion entry to the size of a cacheline. */
3105         if (RTE_CACHE_LINE_SIZE == 128)
3106                 setenv("MLX5_CQE_SIZE", "128", 0);
3107         /*
3108          * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
3109          * cleanup all the Verbs resources even when the device was removed.
3110          */
3111         setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
3112 #ifdef RTE_IBVERBS_LINK_DLOPEN
3113         if (mlx5_glue_init())
3114                 return;
3115         assert(mlx5_glue);
3116 #endif
3117 #ifndef NDEBUG
3118         /* Glue structure must not contain any NULL pointers. */
3119         {
3120                 unsigned int i;
3121
3122                 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
3123                         assert(((const void *const *)mlx5_glue)[i]);
3124         }
3125 #endif
3126         if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
3127                 DRV_LOG(ERR,
3128                         "rdma-core glue \"%s\" mismatch: \"%s\" is required",
3129                         mlx5_glue->version, MLX5_GLUE_VERSION);
3130                 return;
3131         }
3132         mlx5_glue->fork_init();
3133         rte_pci_register(&mlx5_driver);
3134 }
3135
3136 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
3137 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
3138 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");