net/mlx5: fix DevX Rx queue type
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <fcntl.h>
12 #include <sys/queue.h>
13
14 /* Verbs header. */
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic ignored "-Wpedantic"
18 #endif
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
21 #ifdef PEDANTIC
22 #pragma GCC diagnostic error "-Wpedantic"
23 #endif
24
25 #include <rte_mbuf.h>
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
31 #include <rte_io.h>
32
33 #include "mlx5.h"
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
39
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42         0x2c, 0xc6, 0x81, 0xd1,
43         0x5b, 0xdb, 0xf4, 0xf7,
44         0xfc, 0xa2, 0x83, 0x19,
45         0xdb, 0x1a, 0x3e, 0x94,
46         0x6b, 0x9e, 0x38, 0xd9,
47         0x2c, 0x9c, 0x03, 0xd1,
48         0xad, 0x99, 0x44, 0xa7,
49         0xd9, 0x56, 0x3d, 0x59,
50         0x06, 0x3c, 0x25, 0xf3,
51         0xfc, 0x1f, 0xdc, 0x2a,
52 };
53
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56               (unsigned int)sizeof(rss_hash_default_key),
57               "wrong RSS default key size.");
58
59 /**
60  * Check whether Multi-Packet RQ can be enabled for the device.
61  *
62  * @param dev
63  *   Pointer to Ethernet device.
64  *
65  * @return
66  *   1 if supported, negative errno value if not.
67  */
68 inline int
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
70 {
71         struct mlx5_priv *priv = dev->data->dev_private;
72
73         if (priv->config.mprq.enabled &&
74             priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75                 return 1;
76         return -ENOTSUP;
77 }
78
79 /**
80  * Check whether Multi-Packet RQ is enabled for the Rx queue.
81  *
82  *  @param rxq
83  *     Pointer to receive queue structure.
84  *
85  * @return
86  *   0 if disabled, otherwise enabled.
87  */
88 inline int
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
90 {
91         return rxq->strd_num_n > 0;
92 }
93
94 /**
95  * Check whether Multi-Packet RQ is enabled for the device.
96  * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
97  *
98  * @param dev
99  *   Pointer to Ethernet device.
100  *
101  * @return
102  *   0 if disabled, otherwise enabled.
103  */
104 inline int
105 mlx5_mprq_enabled(struct rte_eth_dev *dev)
106 {
107         struct mlx5_priv *priv = dev->data->dev_private;
108         uint16_t i;
109         uint16_t n = 0;
110
111         if (mlx5_check_mprq_support(dev) < 0)
112                 return 0;
113         /* All the configured queues should be enabled. */
114         for (i = 0; i < priv->rxqs_n; ++i) {
115                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
116
117                 if (!rxq)
118                         continue;
119                 if (mlx5_rxq_mprq_enabled(rxq))
120                         ++n;
121         }
122         /* Multi-Packet RQ can't be partially configured. */
123         assert(n == 0 || n == priv->rxqs_n);
124         return n == priv->rxqs_n;
125 }
126
127 /**
128  * Check whether LRO is supported and enabled for the device.
129  *
130  * @param dev
131  *   Pointer to Ethernet device.
132  *
133  * @return
134  *   0 if disabled, 1 if enabled.
135  */
136 inline int
137 mlx5_lro_on(struct rte_eth_dev *dev)
138 {
139         return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
140 }
141
142 /**
143  * Allocate RX queue elements for Multi-Packet RQ.
144  *
145  * @param rxq_ctrl
146  *   Pointer to RX queue structure.
147  *
148  * @return
149  *   0 on success, a negative errno value otherwise and rte_errno is set.
150  */
151 static int
152 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
153 {
154         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
155         unsigned int wqe_n = 1 << rxq->elts_n;
156         unsigned int i;
157         int err;
158
159         /* Iterate on segments. */
160         for (i = 0; i <= wqe_n; ++i) {
161                 struct mlx5_mprq_buf *buf;
162
163                 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
164                         DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
165                         rte_errno = ENOMEM;
166                         goto error;
167                 }
168                 if (i < wqe_n)
169                         (*rxq->mprq_bufs)[i] = buf;
170                 else
171                         rxq->mprq_repl = buf;
172         }
173         DRV_LOG(DEBUG,
174                 "port %u Rx queue %u allocated and configured %u segments",
175                 rxq->port_id, rxq->idx, wqe_n);
176         return 0;
177 error:
178         err = rte_errno; /* Save rte_errno before cleanup. */
179         wqe_n = i;
180         for (i = 0; (i != wqe_n); ++i) {
181                 if ((*rxq->mprq_bufs)[i] != NULL)
182                         rte_mempool_put(rxq->mprq_mp,
183                                         (*rxq->mprq_bufs)[i]);
184                 (*rxq->mprq_bufs)[i] = NULL;
185         }
186         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
187                 rxq->port_id, rxq->idx);
188         rte_errno = err; /* Restore rte_errno. */
189         return -rte_errno;
190 }
191
192 /**
193  * Allocate RX queue elements for Single-Packet RQ.
194  *
195  * @param rxq_ctrl
196  *   Pointer to RX queue structure.
197  *
198  * @return
199  *   0 on success, errno value on failure.
200  */
201 static int
202 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
203 {
204         const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
205         unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
206         unsigned int i;
207         int err;
208
209         /* Iterate on segments. */
210         for (i = 0; (i != elts_n); ++i) {
211                 struct rte_mbuf *buf;
212
213                 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
214                 if (buf == NULL) {
215                         DRV_LOG(ERR, "port %u empty mbuf pool",
216                                 PORT_ID(rxq_ctrl->priv));
217                         rte_errno = ENOMEM;
218                         goto error;
219                 }
220                 /* Headroom is reserved by rte_pktmbuf_alloc(). */
221                 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
222                 /* Buffer is supposed to be empty. */
223                 assert(rte_pktmbuf_data_len(buf) == 0);
224                 assert(rte_pktmbuf_pkt_len(buf) == 0);
225                 assert(!buf->next);
226                 /* Only the first segment keeps headroom. */
227                 if (i % sges_n)
228                         SET_DATA_OFF(buf, 0);
229                 PORT(buf) = rxq_ctrl->rxq.port_id;
230                 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
231                 PKT_LEN(buf) = DATA_LEN(buf);
232                 NB_SEGS(buf) = 1;
233                 (*rxq_ctrl->rxq.elts)[i] = buf;
234         }
235         /* If Rx vector is activated. */
236         if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
237                 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
238                 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
239                 int j;
240
241                 /* Initialize default rearm_data for vPMD. */
242                 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
243                 rte_mbuf_refcnt_set(mbuf_init, 1);
244                 mbuf_init->nb_segs = 1;
245                 mbuf_init->port = rxq->port_id;
246                 /*
247                  * prevent compiler reordering:
248                  * rearm_data covers previous fields.
249                  */
250                 rte_compiler_barrier();
251                 rxq->mbuf_initializer =
252                         *(uint64_t *)&mbuf_init->rearm_data;
253                 /* Padding with a fake mbuf for vectorized Rx. */
254                 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
255                         (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
256         }
257         DRV_LOG(DEBUG,
258                 "port %u Rx queue %u allocated and configured %u segments"
259                 " (max %u packets)",
260                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
261                 elts_n / (1 << rxq_ctrl->rxq.sges_n));
262         return 0;
263 error:
264         err = rte_errno; /* Save rte_errno before cleanup. */
265         elts_n = i;
266         for (i = 0; (i != elts_n); ++i) {
267                 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
268                         rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
269                 (*rxq_ctrl->rxq.elts)[i] = NULL;
270         }
271         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
272                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
273         rte_errno = err; /* Restore rte_errno. */
274         return -rte_errno;
275 }
276
277 /**
278  * Allocate RX queue elements.
279  *
280  * @param rxq_ctrl
281  *   Pointer to RX queue structure.
282  *
283  * @return
284  *   0 on success, errno value on failure.
285  */
286 int
287 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
288 {
289         return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
290                rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
291 }
292
293 /**
294  * Free RX queue elements for Multi-Packet RQ.
295  *
296  * @param rxq_ctrl
297  *   Pointer to RX queue structure.
298  */
299 static void
300 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
301 {
302         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
303         uint16_t i;
304
305         DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
306                 rxq->port_id, rxq->idx);
307         if (rxq->mprq_bufs == NULL)
308                 return;
309         assert(mlx5_rxq_check_vec_support(rxq) < 0);
310         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
311                 if ((*rxq->mprq_bufs)[i] != NULL)
312                         mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
313                 (*rxq->mprq_bufs)[i] = NULL;
314         }
315         if (rxq->mprq_repl != NULL) {
316                 mlx5_mprq_buf_free(rxq->mprq_repl);
317                 rxq->mprq_repl = NULL;
318         }
319 }
320
321 /**
322  * Free RX queue elements for Single-Packet RQ.
323  *
324  * @param rxq_ctrl
325  *   Pointer to RX queue structure.
326  */
327 static void
328 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
329 {
330         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
331         const uint16_t q_n = (1 << rxq->elts_n);
332         const uint16_t q_mask = q_n - 1;
333         uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
334         uint16_t i;
335
336         DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
337                 PORT_ID(rxq_ctrl->priv), rxq->idx);
338         if (rxq->elts == NULL)
339                 return;
340         /**
341          * Some mbuf in the Ring belongs to the application.  They cannot be
342          * freed.
343          */
344         if (mlx5_rxq_check_vec_support(rxq) > 0) {
345                 for (i = 0; i < used; ++i)
346                         (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
347                 rxq->rq_pi = rxq->rq_ci;
348         }
349         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
350                 if ((*rxq->elts)[i] != NULL)
351                         rte_pktmbuf_free_seg((*rxq->elts)[i]);
352                 (*rxq->elts)[i] = NULL;
353         }
354 }
355
356 /**
357  * Free RX queue elements.
358  *
359  * @param rxq_ctrl
360  *   Pointer to RX queue structure.
361  */
362 static void
363 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
364 {
365         if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
366                 rxq_free_elts_mprq(rxq_ctrl);
367         else
368                 rxq_free_elts_sprq(rxq_ctrl);
369 }
370
371 /**
372  * Returns the per-queue supported offloads.
373  *
374  * @param dev
375  *   Pointer to Ethernet device.
376  *
377  * @return
378  *   Supported Rx offloads.
379  */
380 uint64_t
381 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
382 {
383         struct mlx5_priv *priv = dev->data->dev_private;
384         struct mlx5_dev_config *config = &priv->config;
385         uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
386                              DEV_RX_OFFLOAD_TIMESTAMP |
387                              DEV_RX_OFFLOAD_JUMBO_FRAME);
388
389         if (config->hw_fcs_strip)
390                 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
391
392         if (config->hw_csum)
393                 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
394                              DEV_RX_OFFLOAD_UDP_CKSUM |
395                              DEV_RX_OFFLOAD_TCP_CKSUM);
396         if (config->hw_vlan_strip)
397                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
398         return offloads;
399 }
400
401
402 /**
403  * Returns the per-port supported offloads.
404  *
405  * @param dev
406  *   Pointer to Ethernet device.
407  *
408  * @return
409  *   Supported Rx offloads.
410  */
411 uint64_t
412 mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
413 {
414         uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
415
416         if (MLX5_LRO_SUPPORTED(dev))
417                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
418         return offloads;
419 }
420
421 /**
422  * Verify if the queue can be released.
423  *
424  * @param dev
425  *   Pointer to Ethernet device.
426  * @param idx
427  *   RX queue index.
428  *
429  * @return
430  *   1 if the queue can be released
431  *   0 if the queue can not be released, there are references to it.
432  *   Negative errno and rte_errno is set if queue doesn't exist.
433  */
434 static int
435 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
436 {
437         struct mlx5_priv *priv = dev->data->dev_private;
438         struct mlx5_rxq_ctrl *rxq_ctrl;
439
440         if (!(*priv->rxqs)[idx]) {
441                 rte_errno = EINVAL;
442                 return -rte_errno;
443         }
444         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
445         return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
446 }
447
448 /**
449  *
450  * @param dev
451  *   Pointer to Ethernet device structure.
452  * @param idx
453  *   RX queue index.
454  * @param desc
455  *   Number of descriptors to configure in queue.
456  * @param socket
457  *   NUMA socket on which memory must be allocated.
458  * @param[in] conf
459  *   Thresholds parameters.
460  * @param mp
461  *   Memory pool for buffer allocations.
462  *
463  * @return
464  *   0 on success, a negative errno value otherwise and rte_errno is set.
465  */
466 int
467 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
468                     unsigned int socket, const struct rte_eth_rxconf *conf,
469                     struct rte_mempool *mp)
470 {
471         struct mlx5_priv *priv = dev->data->dev_private;
472         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
473         struct mlx5_rxq_ctrl *rxq_ctrl =
474                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
475
476         if (!rte_is_power_of_2(desc)) {
477                 desc = 1 << log2above(desc);
478                 DRV_LOG(WARNING,
479                         "port %u increased number of descriptors in Rx queue %u"
480                         " to the next power of two (%d)",
481                         dev->data->port_id, idx, desc);
482         }
483         DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
484                 dev->data->port_id, idx, desc);
485         if (idx >= priv->rxqs_n) {
486                 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
487                         dev->data->port_id, idx, priv->rxqs_n);
488                 rte_errno = EOVERFLOW;
489                 return -rte_errno;
490         }
491         if (!mlx5_rxq_releasable(dev, idx)) {
492                 DRV_LOG(ERR, "port %u unable to release queue index %u",
493                         dev->data->port_id, idx);
494                 rte_errno = EBUSY;
495                 return -rte_errno;
496         }
497         mlx5_rxq_release(dev, idx);
498         rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
499         if (!rxq_ctrl) {
500                 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
501                         dev->data->port_id, idx);
502                 rte_errno = ENOMEM;
503                 return -rte_errno;
504         }
505         DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
506                 dev->data->port_id, idx);
507         (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
508         return 0;
509 }
510
511 /**
512  * DPDK callback to release a RX queue.
513  *
514  * @param dpdk_rxq
515  *   Generic RX queue pointer.
516  */
517 void
518 mlx5_rx_queue_release(void *dpdk_rxq)
519 {
520         struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
521         struct mlx5_rxq_ctrl *rxq_ctrl;
522         struct mlx5_priv *priv;
523
524         if (rxq == NULL)
525                 return;
526         rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
527         priv = rxq_ctrl->priv;
528         if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
529                 rte_panic("port %u Rx queue %u is still used by a flow and"
530                           " cannot be removed\n",
531                           PORT_ID(priv), rxq->idx);
532         mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
533 }
534
535 /**
536  * Get an Rx queue Verbs/DevX object.
537  *
538  * @param dev
539  *   Pointer to Ethernet device.
540  * @param idx
541  *   Queue index in DPDK Rx queue array
542  *
543  * @return
544  *   The Verbs/DevX object if it exists.
545  */
546 static struct mlx5_rxq_obj *
547 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
548 {
549         struct mlx5_priv *priv = dev->data->dev_private;
550         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
551         struct mlx5_rxq_ctrl *rxq_ctrl;
552
553         if (idx >= priv->rxqs_n)
554                 return NULL;
555         if (!rxq_data)
556                 return NULL;
557         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
558         if (rxq_ctrl->obj)
559                 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
560         return rxq_ctrl->obj;
561 }
562
563 /**
564  * Release the resources allocated for an RQ DevX object.
565  *
566  * @param rxq_ctrl
567  *   DevX Rx queue object.
568  */
569 static void
570 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
571 {
572         if (rxq_ctrl->rxq.wqes) {
573                 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
574                 rxq_ctrl->rxq.wqes = NULL;
575         }
576         if (rxq_ctrl->wq_umem) {
577                 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
578                 rxq_ctrl->wq_umem = NULL;
579         }
580 }
581
582 /**
583  * Release an Rx verbs/DevX queue object.
584  *
585  * @param rxq_obj
586  *   Verbs/DevX Rx queue object.
587  *
588  * @return
589  *   1 while a reference on it exists, 0 when freed.
590  */
591 static int
592 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
593 {
594         assert(rxq_obj);
595         if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)
596                 assert(rxq_obj->wq);
597         assert(rxq_obj->cq);
598         if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
599                 rxq_free_elts(rxq_obj->rxq_ctrl);
600                 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
601                         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
602                 } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
603                         claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
604                         rxq_release_rq_resources(rxq_obj->rxq_ctrl);
605                 }
606                 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
607                 if (rxq_obj->channel)
608                         claim_zero(mlx5_glue->destroy_comp_channel
609                                    (rxq_obj->channel));
610                 LIST_REMOVE(rxq_obj, next);
611                 rte_free(rxq_obj);
612                 return 0;
613         }
614         return 1;
615 }
616
617 /**
618  * Allocate queue vector and fill epoll fd list for Rx interrupts.
619  *
620  * @param dev
621  *   Pointer to Ethernet device.
622  *
623  * @return
624  *   0 on success, a negative errno value otherwise and rte_errno is set.
625  */
626 int
627 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
628 {
629         struct mlx5_priv *priv = dev->data->dev_private;
630         unsigned int i;
631         unsigned int rxqs_n = priv->rxqs_n;
632         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
633         unsigned int count = 0;
634         struct rte_intr_handle *intr_handle = dev->intr_handle;
635
636         if (!dev->data->dev_conf.intr_conf.rxq)
637                 return 0;
638         mlx5_rx_intr_vec_disable(dev);
639         intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
640         if (intr_handle->intr_vec == NULL) {
641                 DRV_LOG(ERR,
642                         "port %u failed to allocate memory for interrupt"
643                         " vector, Rx interrupts will not be supported",
644                         dev->data->port_id);
645                 rte_errno = ENOMEM;
646                 return -rte_errno;
647         }
648         intr_handle->type = RTE_INTR_HANDLE_EXT;
649         for (i = 0; i != n; ++i) {
650                 /* This rxq obj must not be released in this function. */
651                 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
652                 int fd;
653                 int flags;
654                 int rc;
655
656                 /* Skip queues that cannot request interrupts. */
657                 if (!rxq_obj || !rxq_obj->channel) {
658                         /* Use invalid intr_vec[] index to disable entry. */
659                         intr_handle->intr_vec[i] =
660                                 RTE_INTR_VEC_RXTX_OFFSET +
661                                 RTE_MAX_RXTX_INTR_VEC_ID;
662                         continue;
663                 }
664                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
665                         DRV_LOG(ERR,
666                                 "port %u too many Rx queues for interrupt"
667                                 " vector size (%d), Rx interrupts cannot be"
668                                 " enabled",
669                                 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
670                         mlx5_rx_intr_vec_disable(dev);
671                         rte_errno = ENOMEM;
672                         return -rte_errno;
673                 }
674                 fd = rxq_obj->channel->fd;
675                 flags = fcntl(fd, F_GETFL);
676                 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
677                 if (rc < 0) {
678                         rte_errno = errno;
679                         DRV_LOG(ERR,
680                                 "port %u failed to make Rx interrupt file"
681                                 " descriptor %d non-blocking for queue index"
682                                 " %d",
683                                 dev->data->port_id, fd, i);
684                         mlx5_rx_intr_vec_disable(dev);
685                         return -rte_errno;
686                 }
687                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
688                 intr_handle->efds[count] = fd;
689                 count++;
690         }
691         if (!count)
692                 mlx5_rx_intr_vec_disable(dev);
693         else
694                 intr_handle->nb_efd = count;
695         return 0;
696 }
697
698 /**
699  * Clean up Rx interrupts handler.
700  *
701  * @param dev
702  *   Pointer to Ethernet device.
703  */
704 void
705 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
706 {
707         struct mlx5_priv *priv = dev->data->dev_private;
708         struct rte_intr_handle *intr_handle = dev->intr_handle;
709         unsigned int i;
710         unsigned int rxqs_n = priv->rxqs_n;
711         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
712
713         if (!dev->data->dev_conf.intr_conf.rxq)
714                 return;
715         if (!intr_handle->intr_vec)
716                 goto free;
717         for (i = 0; i != n; ++i) {
718                 struct mlx5_rxq_ctrl *rxq_ctrl;
719                 struct mlx5_rxq_data *rxq_data;
720
721                 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
722                     RTE_MAX_RXTX_INTR_VEC_ID)
723                         continue;
724                 /**
725                  * Need to access directly the queue to release the reference
726                  * kept in mlx5_rx_intr_vec_enable().
727                  */
728                 rxq_data = (*priv->rxqs)[i];
729                 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
730                 if (rxq_ctrl->obj)
731                         mlx5_rxq_obj_release(rxq_ctrl->obj);
732         }
733 free:
734         rte_intr_free_epoll_fd(intr_handle);
735         if (intr_handle->intr_vec)
736                 free(intr_handle->intr_vec);
737         intr_handle->nb_efd = 0;
738         intr_handle->intr_vec = NULL;
739 }
740
741 /**
742  *  MLX5 CQ notification .
743  *
744  *  @param rxq
745  *     Pointer to receive queue structure.
746  *  @param sq_n_rxq
747  *     Sequence number per receive queue .
748  */
749 static inline void
750 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
751 {
752         int sq_n = 0;
753         uint32_t doorbell_hi;
754         uint64_t doorbell;
755         void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
756
757         sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
758         doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
759         doorbell = (uint64_t)doorbell_hi << 32;
760         doorbell |=  rxq->cqn;
761         rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
762         mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
763                          cq_db_reg, rxq->uar_lock_cq);
764 }
765
766 /**
767  * DPDK callback for Rx queue interrupt enable.
768  *
769  * @param dev
770  *   Pointer to Ethernet device structure.
771  * @param rx_queue_id
772  *   Rx queue number.
773  *
774  * @return
775  *   0 on success, a negative errno value otherwise and rte_errno is set.
776  */
777 int
778 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
779 {
780         struct mlx5_priv *priv = dev->data->dev_private;
781         struct mlx5_rxq_data *rxq_data;
782         struct mlx5_rxq_ctrl *rxq_ctrl;
783
784         rxq_data = (*priv->rxqs)[rx_queue_id];
785         if (!rxq_data) {
786                 rte_errno = EINVAL;
787                 return -rte_errno;
788         }
789         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
790         if (rxq_ctrl->irq) {
791                 struct mlx5_rxq_obj *rxq_obj;
792
793                 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
794                 if (!rxq_obj) {
795                         rte_errno = EINVAL;
796                         return -rte_errno;
797                 }
798                 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
799                 mlx5_rxq_obj_release(rxq_obj);
800         }
801         return 0;
802 }
803
804 /**
805  * DPDK callback for Rx queue interrupt disable.
806  *
807  * @param dev
808  *   Pointer to Ethernet device structure.
809  * @param rx_queue_id
810  *   Rx queue number.
811  *
812  * @return
813  *   0 on success, a negative errno value otherwise and rte_errno is set.
814  */
815 int
816 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
817 {
818         struct mlx5_priv *priv = dev->data->dev_private;
819         struct mlx5_rxq_data *rxq_data;
820         struct mlx5_rxq_ctrl *rxq_ctrl;
821         struct mlx5_rxq_obj *rxq_obj = NULL;
822         struct ibv_cq *ev_cq;
823         void *ev_ctx;
824         int ret;
825
826         rxq_data = (*priv->rxqs)[rx_queue_id];
827         if (!rxq_data) {
828                 rte_errno = EINVAL;
829                 return -rte_errno;
830         }
831         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
832         if (!rxq_ctrl->irq)
833                 return 0;
834         rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
835         if (!rxq_obj) {
836                 rte_errno = EINVAL;
837                 return -rte_errno;
838         }
839         ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
840         if (ret || ev_cq != rxq_obj->cq) {
841                 rte_errno = EINVAL;
842                 goto exit;
843         }
844         rxq_data->cq_arm_sn++;
845         mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
846         mlx5_rxq_obj_release(rxq_obj);
847         return 0;
848 exit:
849         ret = rte_errno; /* Save rte_errno before cleanup. */
850         if (rxq_obj)
851                 mlx5_rxq_obj_release(rxq_obj);
852         DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
853                 dev->data->port_id, rx_queue_id);
854         rte_errno = ret; /* Restore rte_errno. */
855         return -rte_errno;
856 }
857
858 /**
859  * Create a CQ Verbs object.
860  *
861  * @param dev
862  *   Pointer to Ethernet device.
863  * @param priv
864  *   Pointer to device private data.
865  * @param rxq_data
866  *   Pointer to Rx queue data.
867  * @param cqe_n
868  *   Number of CQEs in CQ.
869  * @param rxq_obj
870  *   Pointer to Rx queue object data.
871  *
872  * @return
873  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
874  */
875 static struct ibv_cq *
876 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
877                 struct mlx5_rxq_data *rxq_data,
878                 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
879 {
880         struct {
881                 struct ibv_cq_init_attr_ex ibv;
882                 struct mlx5dv_cq_init_attr mlx5;
883         } cq_attr;
884
885         cq_attr.ibv = (struct ibv_cq_init_attr_ex){
886                 .cqe = cqe_n,
887                 .channel = rxq_obj->channel,
888                 .comp_mask = 0,
889         };
890         cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
891                 .comp_mask = 0,
892         };
893         if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
894                 cq_attr.mlx5.comp_mask |=
895                                 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
896 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
897                 cq_attr.mlx5.cqe_comp_res_format =
898                                 mlx5_rxq_mprq_enabled(rxq_data) ?
899                                 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
900                                 MLX5DV_CQE_RES_FORMAT_HASH;
901 #else
902                 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
903 #endif
904                 /*
905                  * For vectorized Rx, it must not be doubled in order to
906                  * make cq_ci and rq_ci aligned.
907                  */
908                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
909                         cq_attr.ibv.cqe *= 2;
910         } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
911                 DRV_LOG(DEBUG,
912                         "port %u Rx CQE compression is disabled for HW"
913                         " timestamp",
914                         dev->data->port_id);
915         }
916 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
917         if (priv->config.cqe_pad) {
918                 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
919                 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
920         }
921 #endif
922         return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
923                                                               &cq_attr.ibv,
924                                                               &cq_attr.mlx5));
925 }
926
927 /**
928  * Create a WQ Verbs object.
929  *
930  * @param dev
931  *   Pointer to Ethernet device.
932  * @param priv
933  *   Pointer to device private data.
934  * @param rxq_data
935  *   Pointer to Rx queue data.
936  * @param idx
937  *   Queue index in DPDK Rx queue array
938  * @param wqe_n
939  *   Number of WQEs in WQ.
940  * @param rxq_obj
941  *   Pointer to Rx queue object data.
942  *
943  * @return
944  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
945  */
946 static struct ibv_wq *
947 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
948                 struct mlx5_rxq_data *rxq_data, uint16_t idx,
949                 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
950 {
951         struct {
952                 struct ibv_wq_init_attr ibv;
953 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
954                 struct mlx5dv_wq_init_attr mlx5;
955 #endif
956         } wq_attr;
957
958         wq_attr.ibv = (struct ibv_wq_init_attr){
959                 .wq_context = NULL, /* Could be useful in the future. */
960                 .wq_type = IBV_WQT_RQ,
961                 /* Max number of outstanding WRs. */
962                 .max_wr = wqe_n >> rxq_data->sges_n,
963                 /* Max number of scatter/gather elements in a WR. */
964                 .max_sge = 1 << rxq_data->sges_n,
965                 .pd = priv->sh->pd,
966                 .cq = rxq_obj->cq,
967                 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
968                 .create_flags = (rxq_data->vlan_strip ?
969                                  IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
970         };
971         /* By default, FCS (CRC) is stripped by hardware. */
972         if (rxq_data->crc_present) {
973                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
974                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
975         }
976         if (priv->config.hw_padding) {
977 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
978                 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
979                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
980 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
981                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
982                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
983 #endif
984         }
985 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
986         wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
987                 .comp_mask = 0,
988         };
989         if (mlx5_rxq_mprq_enabled(rxq_data)) {
990                 struct mlx5dv_striding_rq_init_attr *mprq_attr =
991                                                 &wq_attr.mlx5.striding_rq_attrs;
992
993                 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
994                 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
995                         .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
996                         .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
997                         .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
998                 };
999         }
1000         rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1001                                               &wq_attr.mlx5);
1002 #else
1003         rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1004 #endif
1005         if (rxq_obj->wq) {
1006                 /*
1007                  * Make sure number of WRs*SGEs match expectations since a queue
1008                  * cannot allocate more than "desc" buffers.
1009                  */
1010                 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1011                     wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1012                         DRV_LOG(ERR,
1013                                 "port %u Rx queue %u requested %u*%u but got"
1014                                 " %u*%u WRs*SGEs",
1015                                 dev->data->port_id, idx,
1016                                 wqe_n >> rxq_data->sges_n,
1017                                 (1 << rxq_data->sges_n),
1018                                 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1019                         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1020                         rxq_obj->wq = NULL;
1021                         rte_errno = EINVAL;
1022                 }
1023         }
1024         return rxq_obj->wq;
1025 }
1026
1027 /**
1028  * Fill common fields of create RQ attributes structure.
1029  *
1030  * @param rxq_data
1031  *   Pointer to Rx queue data.
1032  * @param cqn
1033  *   CQ number to use with this RQ.
1034  * @param rq_attr
1035  *   RQ attributes structure to fill..
1036  */
1037 static void
1038 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1039                               struct mlx5_devx_create_rq_attr *rq_attr)
1040 {
1041         rq_attr->state = MLX5_RQC_STATE_RST;
1042         rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1043         rq_attr->cqn = cqn;
1044         rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1045 }
1046
1047 /**
1048  * Fill common fields of DevX WQ attributes structure.
1049  *
1050  * @param priv
1051  *   Pointer to device private data.
1052  * @param rxq_ctrl
1053  *   Pointer to Rx queue control structure.
1054  * @param wq_attr
1055  *   WQ attributes structure to fill..
1056  */
1057 static void
1058 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1059                        struct mlx5_devx_wq_attr *wq_attr)
1060 {
1061         wq_attr->end_padding_mode = priv->config.cqe_pad ?
1062                                         MLX5_WQ_END_PAD_MODE_ALIGN :
1063                                         MLX5_WQ_END_PAD_MODE_NONE;
1064         wq_attr->pd = priv->sh->pdn;
1065         wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1066         wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1067         wq_attr->dbr_umem_valid = 1;
1068         wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1069         wq_attr->wq_umem_valid = 1;
1070 }
1071
1072 /**
1073  * Create a RQ object using DevX.
1074  *
1075  * @param dev
1076  *   Pointer to Ethernet device.
1077  * @param idx
1078  *   Queue index in DPDK Rx queue array
1079  * @param cqn
1080  *   CQ number to use with this RQ.
1081  *
1082  * @return
1083  *   The DevX object initialised, NULL otherwise and rte_errno is set.
1084  */
1085 static struct mlx5_devx_obj *
1086 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1087 {
1088         struct mlx5_priv *priv = dev->data->dev_private;
1089         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1090         struct mlx5_rxq_ctrl *rxq_ctrl =
1091                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1092         struct mlx5_devx_create_rq_attr rq_attr;
1093         uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1094         uint32_t wq_size = 0;
1095         uint32_t wqe_size = 0;
1096         uint32_t log_wqe_size = 0;
1097         void *buf = NULL;
1098         struct mlx5_devx_obj *rq;
1099
1100         memset(&rq_attr, 0, sizeof(rq_attr));
1101         /* Fill RQ attributes. */
1102         rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1103         rq_attr.flush_in_error_en = 1;
1104         mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1105         /* Fill WQ attributes for this RQ. */
1106         if (mlx5_rxq_mprq_enabled(rxq_data)) {
1107                 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1108                 /*
1109                  * Number of strides in each WQE:
1110                  * 512*2^single_wqe_log_num_of_strides.
1111                  */
1112                 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1113                                 rxq_data->strd_num_n -
1114                                 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1115                 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1116                 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1117                                 rxq_data->strd_sz_n -
1118                                 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1119                 wqe_size = sizeof(struct mlx5_wqe_mprq);
1120         } else {
1121                 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1122                 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1123         }
1124         log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1125         rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1126         rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1127         /* Calculate and allocate WQ memory space. */
1128         wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1129         wq_size = wqe_n * wqe_size;
1130         buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE,
1131                                 rxq_ctrl->socket);
1132         if (!buf)
1133                 return NULL;
1134         rxq_data->wqes = buf;
1135         rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1136                                                      buf, wq_size, 0);
1137         if (!rxq_ctrl->wq_umem) {
1138                 rte_free(buf);
1139                 return NULL;
1140         }
1141         mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1142         rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1143         if (!rq)
1144                 rxq_release_rq_resources(rxq_ctrl);
1145         return rq;
1146 }
1147
1148 /**
1149  * Create the Rx queue Verbs/DevX object.
1150  *
1151  * @param dev
1152  *   Pointer to Ethernet device.
1153  * @param idx
1154  *   Queue index in DPDK Rx queue array
1155  * @param type
1156  *   Type of Rx queue object to create.
1157  *
1158  * @return
1159  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1160  */
1161 struct mlx5_rxq_obj *
1162 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1163                  enum mlx5_rxq_obj_type type)
1164 {
1165         struct mlx5_priv *priv = dev->data->dev_private;
1166         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1167         struct mlx5_rxq_ctrl *rxq_ctrl =
1168                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1169         struct ibv_wq_attr mod;
1170         unsigned int cqe_n;
1171         unsigned int wqe_n = 1 << rxq_data->elts_n;
1172         struct mlx5_rxq_obj *tmpl = NULL;
1173         struct mlx5dv_cq cq_info;
1174         struct mlx5dv_rwq rwq;
1175         int ret = 0;
1176         struct mlx5dv_obj obj;
1177
1178         assert(rxq_data);
1179         assert(!rxq_ctrl->obj);
1180         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1181         priv->verbs_alloc_ctx.obj = rxq_ctrl;
1182         tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1183                                  rxq_ctrl->socket);
1184         if (!tmpl) {
1185                 DRV_LOG(ERR,
1186                         "port %u Rx queue %u cannot allocate verbs resources",
1187                         dev->data->port_id, rxq_data->idx);
1188                 rte_errno = ENOMEM;
1189                 goto error;
1190         }
1191         tmpl->type = type;
1192         tmpl->rxq_ctrl = rxq_ctrl;
1193         if (rxq_ctrl->irq) {
1194                 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1195                 if (!tmpl->channel) {
1196                         DRV_LOG(ERR, "port %u: comp channel creation failure",
1197                                 dev->data->port_id);
1198                         rte_errno = ENOMEM;
1199                         goto error;
1200                 }
1201         }
1202         if (mlx5_rxq_mprq_enabled(rxq_data))
1203                 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1204         else
1205                 cqe_n = wqe_n  - 1;
1206         tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1207         if (!tmpl->cq) {
1208                 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1209                         dev->data->port_id, idx);
1210                 rte_errno = ENOMEM;
1211                 goto error;
1212         }
1213         obj.cq.in = tmpl->cq;
1214         obj.cq.out = &cq_info;
1215         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1216         if (ret) {
1217                 rte_errno = ret;
1218                 goto error;
1219         }
1220         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1221                 DRV_LOG(ERR,
1222                         "port %u wrong MLX5_CQE_SIZE environment variable"
1223                         " value: it should be set to %u",
1224                         dev->data->port_id, RTE_CACHE_LINE_SIZE);
1225                 rte_errno = EINVAL;
1226                 goto error;
1227         }
1228         DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1229                 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1230         DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1231                 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1232         /* Allocate door-bell for types created with DevX. */
1233         if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1234                 struct mlx5_devx_dbr_page *dbr_page;
1235                 int64_t dbr_offset;
1236
1237                 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1238                 if (dbr_offset < 0)
1239                         goto error;
1240                 rxq_ctrl->dbr_offset = dbr_offset;
1241                 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1242                 rxq_ctrl->dbr_umem_id_valid = 1;
1243                 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1244                                                (uintptr_t)rxq_ctrl->dbr_offset);
1245         }
1246         if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1247                 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1248                                            tmpl);
1249                 if (!tmpl->wq) {
1250                         DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1251                                 dev->data->port_id, idx);
1252                         rte_errno = ENOMEM;
1253                         goto error;
1254                 }
1255                 /* Change queue state to ready. */
1256                 mod = (struct ibv_wq_attr){
1257                         .attr_mask = IBV_WQ_ATTR_STATE,
1258                         .wq_state = IBV_WQS_RDY,
1259                 };
1260                 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1261                 if (ret) {
1262                         DRV_LOG(ERR,
1263                                 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1264                                 " failed", dev->data->port_id, idx);
1265                         rte_errno = ret;
1266                         goto error;
1267                 }
1268                 obj.rwq.in = tmpl->wq;
1269                 obj.rwq.out = &rwq;
1270                 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1271                 if (ret) {
1272                         rte_errno = ret;
1273                         goto error;
1274                 }
1275                 rxq_data->wqes = rwq.buf;
1276                 rxq_data->rq_db = rwq.dbrec;
1277         } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1278                 struct mlx5_devx_modify_rq_attr rq_attr;
1279
1280                 memset(&rq_attr, 0, sizeof(rq_attr));
1281                 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1282                 if (!tmpl->rq) {
1283                         DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1284                                 dev->data->port_id, idx);
1285                         rte_errno = ENOMEM;
1286                         goto error;
1287                 }
1288                 /* Change queue state to ready. */
1289                 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1290                 rq_attr.state = MLX5_RQC_STATE_RDY;
1291                 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1292                 if (ret)
1293                         goto error;
1294         }
1295         /* Fill the rings. */
1296         rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1297         rxq_data->cq_db = cq_info.dbrec;
1298         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1299         rxq_data->cq_uar = cq_info.cq_uar;
1300         rxq_data->cqn = cq_info.cqn;
1301         rxq_data->cq_arm_sn = 0;
1302         mlx5_rxq_initialize(rxq_data);
1303         rxq_data->cq_ci = 0;
1304         DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1305                 idx, (void *)&tmpl);
1306         rte_atomic32_inc(&tmpl->refcnt);
1307         LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1308         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1309         return tmpl;
1310 error:
1311         if (tmpl) {
1312                 ret = rte_errno; /* Save rte_errno before cleanup. */
1313                 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1314                         claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1315                 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1316                         claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1317                 if (tmpl->cq)
1318                         claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1319                 if (tmpl->channel)
1320                         claim_zero(mlx5_glue->destroy_comp_channel
1321                                                         (tmpl->channel));
1322                 rte_free(tmpl);
1323                 rte_errno = ret; /* Restore rte_errno. */
1324         }
1325         if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1326                 rxq_release_rq_resources(rxq_ctrl);
1327         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1328         return NULL;
1329 }
1330
1331 /**
1332  * Verify the Rx queue objects list is empty
1333  *
1334  * @param dev
1335  *   Pointer to Ethernet device.
1336  *
1337  * @return
1338  *   The number of objects not released.
1339  */
1340 int
1341 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1342 {
1343         struct mlx5_priv *priv = dev->data->dev_private;
1344         int ret = 0;
1345         struct mlx5_rxq_obj *rxq_obj;
1346
1347         LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1348                 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1349                         dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1350                 ++ret;
1351         }
1352         return ret;
1353 }
1354
1355 /**
1356  * Callback function to initialize mbufs for Multi-Packet RQ.
1357  */
1358 static inline void
1359 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1360                     void *_m, unsigned int i __rte_unused)
1361 {
1362         struct mlx5_mprq_buf *buf = _m;
1363         struct rte_mbuf_ext_shared_info *shinfo;
1364         unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1365         unsigned int j;
1366
1367         memset(_m, 0, sizeof(*buf));
1368         buf->mp = mp;
1369         rte_atomic16_set(&buf->refcnt, 1);
1370         for (j = 0; j != strd_n; ++j) {
1371                 shinfo = &buf->shinfos[j];
1372                 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1373                 shinfo->fcb_opaque = buf;
1374         }
1375 }
1376
1377 /**
1378  * Free mempool of Multi-Packet RQ.
1379  *
1380  * @param dev
1381  *   Pointer to Ethernet device.
1382  *
1383  * @return
1384  *   0 on success, negative errno value on failure.
1385  */
1386 int
1387 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1388 {
1389         struct mlx5_priv *priv = dev->data->dev_private;
1390         struct rte_mempool *mp = priv->mprq_mp;
1391         unsigned int i;
1392
1393         if (mp == NULL)
1394                 return 0;
1395         DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1396                 dev->data->port_id, mp->name);
1397         /*
1398          * If a buffer in the pool has been externally attached to a mbuf and it
1399          * is still in use by application, destroying the Rx queue can spoil
1400          * the packet. It is unlikely to happen but if application dynamically
1401          * creates and destroys with holding Rx packets, this can happen.
1402          *
1403          * TODO: It is unavoidable for now because the mempool for Multi-Packet
1404          * RQ isn't provided by application but managed by PMD.
1405          */
1406         if (!rte_mempool_full(mp)) {
1407                 DRV_LOG(ERR,
1408                         "port %u mempool for Multi-Packet RQ is still in use",
1409                         dev->data->port_id);
1410                 rte_errno = EBUSY;
1411                 return -rte_errno;
1412         }
1413         rte_mempool_free(mp);
1414         /* Unset mempool for each Rx queue. */
1415         for (i = 0; i != priv->rxqs_n; ++i) {
1416                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1417
1418                 if (rxq == NULL)
1419                         continue;
1420                 rxq->mprq_mp = NULL;
1421         }
1422         priv->mprq_mp = NULL;
1423         return 0;
1424 }
1425
1426 /**
1427  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1428  * mempool. If already allocated, reuse it if there're enough elements.
1429  * Otherwise, resize it.
1430  *
1431  * @param dev
1432  *   Pointer to Ethernet device.
1433  *
1434  * @return
1435  *   0 on success, negative errno value on failure.
1436  */
1437 int
1438 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1439 {
1440         struct mlx5_priv *priv = dev->data->dev_private;
1441         struct rte_mempool *mp = priv->mprq_mp;
1442         char name[RTE_MEMPOOL_NAMESIZE];
1443         unsigned int desc = 0;
1444         unsigned int buf_len;
1445         unsigned int obj_num;
1446         unsigned int obj_size;
1447         unsigned int strd_num_n = 0;
1448         unsigned int strd_sz_n = 0;
1449         unsigned int i;
1450
1451         if (!mlx5_mprq_enabled(dev))
1452                 return 0;
1453         /* Count the total number of descriptors configured. */
1454         for (i = 0; i != priv->rxqs_n; ++i) {
1455                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1456
1457                 if (rxq == NULL)
1458                         continue;
1459                 desc += 1 << rxq->elts_n;
1460                 /* Get the max number of strides. */
1461                 if (strd_num_n < rxq->strd_num_n)
1462                         strd_num_n = rxq->strd_num_n;
1463                 /* Get the max size of a stride. */
1464                 if (strd_sz_n < rxq->strd_sz_n)
1465                         strd_sz_n = rxq->strd_sz_n;
1466         }
1467         assert(strd_num_n && strd_sz_n);
1468         buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1469         obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1470                 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1471         /*
1472          * Received packets can be either memcpy'd or externally referenced. In
1473          * case that the packet is attached to an mbuf as an external buffer, as
1474          * it isn't possible to predict how the buffers will be queued by
1475          * application, there's no option to exactly pre-allocate needed buffers
1476          * in advance but to speculatively prepares enough buffers.
1477          *
1478          * In the data path, if this Mempool is depleted, PMD will try to memcpy
1479          * received packets to buffers provided by application (rxq->mp) until
1480          * this Mempool gets available again.
1481          */
1482         desc *= 4;
1483         obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1484         /*
1485          * rte_mempool_create_empty() has sanity check to refuse large cache
1486          * size compared to the number of elements.
1487          * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1488          * constant number 2 instead.
1489          */
1490         obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1491         /* Check a mempool is already allocated and if it can be resued. */
1492         if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1493                 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1494                         dev->data->port_id, mp->name);
1495                 /* Reuse. */
1496                 goto exit;
1497         } else if (mp != NULL) {
1498                 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1499                         dev->data->port_id, mp->name);
1500                 /*
1501                  * If failed to free, which means it may be still in use, no way
1502                  * but to keep using the existing one. On buffer underrun,
1503                  * packets will be memcpy'd instead of external buffer
1504                  * attachment.
1505                  */
1506                 if (mlx5_mprq_free_mp(dev)) {
1507                         if (mp->elt_size >= obj_size)
1508                                 goto exit;
1509                         else
1510                                 return -rte_errno;
1511                 }
1512         }
1513         snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1514         mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1515                                 0, NULL, NULL, mlx5_mprq_buf_init,
1516                                 (void *)(uintptr_t)(1 << strd_num_n),
1517                                 dev->device->numa_node, 0);
1518         if (mp == NULL) {
1519                 DRV_LOG(ERR,
1520                         "port %u failed to allocate a mempool for"
1521                         " Multi-Packet RQ, count=%u, size=%u",
1522                         dev->data->port_id, obj_num, obj_size);
1523                 rte_errno = ENOMEM;
1524                 return -rte_errno;
1525         }
1526         priv->mprq_mp = mp;
1527 exit:
1528         /* Set mempool for each Rx queue. */
1529         for (i = 0; i != priv->rxqs_n; ++i) {
1530                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1531
1532                 if (rxq == NULL)
1533                         continue;
1534                 rxq->mprq_mp = mp;
1535         }
1536         DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1537                 dev->data->port_id);
1538         return 0;
1539 }
1540
1541 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
1542 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1543                                         sizeof(struct rte_vlan_hdr) * 2 + \
1544                                         sizeof(struct rte_ipv6_hdr)))
1545 /**
1546  * Adjust the maximum LRO massage size.
1547  *
1548  * @param dev
1549  *   Pointer to Ethernet device.
1550  * @param max_lro_size
1551  *   The maximum size for LRO packet.
1552  */
1553 static void
1554 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size)
1555 {
1556         struct mlx5_priv *priv = dev->data->dev_private;
1557
1558         if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1559             MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1560             MLX5_MAX_TCP_HDR_OFFSET)
1561                 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1562         max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1563         assert(max_lro_size >= 256u);
1564         max_lro_size /= 256u;
1565         if (priv->max_lro_msg_size)
1566                 priv->max_lro_msg_size =
1567                         RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1568         else
1569                 priv->max_lro_msg_size = max_lro_size;
1570 }
1571
1572 /**
1573  * Create a DPDK Rx queue.
1574  *
1575  * @param dev
1576  *   Pointer to Ethernet device.
1577  * @param idx
1578  *   RX queue index.
1579  * @param desc
1580  *   Number of descriptors to configure in queue.
1581  * @param socket
1582  *   NUMA socket on which memory must be allocated.
1583  *
1584  * @return
1585  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1586  */
1587 struct mlx5_rxq_ctrl *
1588 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1589              unsigned int socket, const struct rte_eth_rxconf *conf,
1590              struct rte_mempool *mp)
1591 {
1592         struct mlx5_priv *priv = dev->data->dev_private;
1593         struct mlx5_rxq_ctrl *tmpl;
1594         unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1595         unsigned int mprq_stride_size;
1596         struct mlx5_dev_config *config = &priv->config;
1597         unsigned int strd_headroom_en;
1598         /*
1599          * Always allocate extra slots, even if eventually
1600          * the vector Rx will not be used.
1601          */
1602         uint16_t desc_n =
1603                 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1604         uint64_t offloads = conf->offloads |
1605                            dev->data->dev_conf.rxmode.offloads;
1606         const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1607         unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1608         unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1609                                                         RTE_PKTMBUF_HEADROOM;
1610
1611         if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1612                                                     DEV_RX_OFFLOAD_SCATTER)) {
1613                 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1614                         " configured and no enough mbuf space(%u) to contain "
1615                         "the maximum RX packet length(%u) with head-room(%u)",
1616                         dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1617                         RTE_PKTMBUF_HEADROOM);
1618                 rte_errno = ENOSPC;
1619                 return NULL;
1620         }
1621         tmpl = rte_calloc_socket("RXQ", 1,
1622                                  sizeof(*tmpl) +
1623                                  desc_n * sizeof(struct rte_mbuf *),
1624                                  0, socket);
1625         if (!tmpl) {
1626                 rte_errno = ENOMEM;
1627                 return NULL;
1628         }
1629         if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1630                                MLX5_MR_BTREE_CACHE_N, socket)) {
1631                 /* rte_errno is already set. */
1632                 goto error;
1633         }
1634         tmpl->socket = socket;
1635         if (dev->data->dev_conf.intr_conf.rxq)
1636                 tmpl->irq = 1;
1637         /*
1638          * LRO packet may consume all the stride memory, hence we cannot
1639          * guaranty head-room near the packet memory in the stride.
1640          * In this case scatter is, for sure, enabled and an empty mbuf may be
1641          * added in the start for the head-room.
1642          */
1643         if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&
1644             non_scatter_min_mbuf_size > mb_len) {
1645                 strd_headroom_en = 0;
1646                 mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1647                                         1u << config->mprq.max_stride_size_n);
1648         } else {
1649                 strd_headroom_en = 1;
1650                 mprq_stride_size = non_scatter_min_mbuf_size;
1651         }
1652         /*
1653          * This Rx queue can be configured as a Multi-Packet RQ if all of the
1654          * following conditions are met:
1655          *  - MPRQ is enabled.
1656          *  - The number of descs is more than the number of strides.
1657          *  - max_rx_pkt_len plus overhead is less than the max size of a
1658          *    stride.
1659          *  Otherwise, enable Rx scatter if necessary.
1660          */
1661         if (mprq_en &&
1662             desc > (1U << config->mprq.stride_num_n) &&
1663             mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1664                 /* TODO: Rx scatter isn't supported yet. */
1665                 tmpl->rxq.sges_n = 0;
1666                 /* Trim the number of descs needed. */
1667                 desc >>= config->mprq.stride_num_n;
1668                 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1669                 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1670                                               config->mprq.min_stride_size_n);
1671                 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1672                 tmpl->rxq.strd_headroom_en = strd_headroom_en;
1673                 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
1674                             RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
1675                 mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len,
1676                    (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)));
1677                 DRV_LOG(DEBUG,
1678                         "port %u Rx queue %u: Multi-Packet RQ is enabled"
1679                         " strd_num_n = %u, strd_sz_n = %u",
1680                         dev->data->port_id, idx,
1681                         tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1682         } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
1683                 tmpl->rxq.sges_n = 0;
1684         } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1685                 unsigned int size = non_scatter_min_mbuf_size;
1686                 unsigned int sges_n;
1687
1688                 /*
1689                  * Determine the number of SGEs needed for a full packet
1690                  * and round it to the next power of two.
1691                  */
1692                 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1693                 tmpl->rxq.sges_n = sges_n;
1694                 /* Make sure rxq.sges_n did not overflow. */
1695                 size = mb_len * (1 << tmpl->rxq.sges_n);
1696                 size -= RTE_PKTMBUF_HEADROOM;
1697                 if (size < max_rx_pkt_len) {
1698                         DRV_LOG(ERR,
1699                                 "port %u too many SGEs (%u) needed to handle"
1700                                 " requested maximum packet size %u",
1701                                 dev->data->port_id,
1702                                 1 << sges_n,
1703                                 max_rx_pkt_len);
1704                         rte_errno = EOVERFLOW;
1705                         goto error;
1706                 }
1707         }
1708         if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1709                 DRV_LOG(WARNING,
1710                         "port %u MPRQ is requested but cannot be enabled"
1711                         " (requested: desc = %u, stride_sz = %u,"
1712                         " supported: min_stride_num = %u, max_stride_sz = %u).",
1713                         dev->data->port_id, desc, mprq_stride_size,
1714                         (1 << config->mprq.stride_num_n),
1715                         (1 << config->mprq.max_stride_size_n));
1716         DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1717                 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1718         if (desc % (1 << tmpl->rxq.sges_n)) {
1719                 DRV_LOG(ERR,
1720                         "port %u number of Rx queue descriptors (%u) is not a"
1721                         " multiple of SGEs per packet (%u)",
1722                         dev->data->port_id,
1723                         desc,
1724                         1 << tmpl->rxq.sges_n);
1725                 rte_errno = EINVAL;
1726                 goto error;
1727         }
1728         /* Toggle RX checksum offload if hardware supports it. */
1729         tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1730         tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1731         /* Configure VLAN stripping. */
1732         tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1733         /* By default, FCS (CRC) is stripped by hardware. */
1734         tmpl->rxq.crc_present = 0;
1735         if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1736                 if (config->hw_fcs_strip) {
1737                         /*
1738                          * RQs used for LRO-enabled TIRs should not be
1739                          * configured to scatter the FCS.
1740                          */
1741                         if (mlx5_lro_on(dev))
1742                                 DRV_LOG(WARNING,
1743                                         "port %u CRC stripping has been "
1744                                         "disabled but will still be performed "
1745                                         "by hardware, because LRO is enabled",
1746                                         dev->data->port_id);
1747                         else
1748                                 tmpl->rxq.crc_present = 1;
1749                 } else {
1750                         DRV_LOG(WARNING,
1751                                 "port %u CRC stripping has been disabled but will"
1752                                 " still be performed by hardware, make sure MLNX_OFED"
1753                                 " and firmware are up to date",
1754                                 dev->data->port_id);
1755                 }
1756         }
1757         DRV_LOG(DEBUG,
1758                 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1759                 " incoming frames to hide it",
1760                 dev->data->port_id,
1761                 tmpl->rxq.crc_present ? "disabled" : "enabled",
1762                 tmpl->rxq.crc_present << 2);
1763         /* Save port ID. */
1764         tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1765                 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1766         tmpl->rxq.port_id = dev->data->port_id;
1767         tmpl->priv = priv;
1768         tmpl->rxq.mp = mp;
1769         tmpl->rxq.elts_n = log2above(desc);
1770         tmpl->rxq.rq_repl_thresh =
1771                 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1772         tmpl->rxq.elts =
1773                 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1774 #ifndef RTE_ARCH_64
1775         tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1776 #endif
1777         tmpl->rxq.idx = idx;
1778         rte_atomic32_inc(&tmpl->refcnt);
1779         LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1780         return tmpl;
1781 error:
1782         rte_free(tmpl);
1783         return NULL;
1784 }
1785
1786 /**
1787  * Get a Rx queue.
1788  *
1789  * @param dev
1790  *   Pointer to Ethernet device.
1791  * @param idx
1792  *   RX queue index.
1793  *
1794  * @return
1795  *   A pointer to the queue if it exists, NULL otherwise.
1796  */
1797 struct mlx5_rxq_ctrl *
1798 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1799 {
1800         struct mlx5_priv *priv = dev->data->dev_private;
1801         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1802
1803         if ((*priv->rxqs)[idx]) {
1804                 rxq_ctrl = container_of((*priv->rxqs)[idx],
1805                                         struct mlx5_rxq_ctrl,
1806                                         rxq);
1807                 mlx5_rxq_obj_get(dev, idx);
1808                 rte_atomic32_inc(&rxq_ctrl->refcnt);
1809         }
1810         return rxq_ctrl;
1811 }
1812
1813 /**
1814  * Release a Rx queue.
1815  *
1816  * @param dev
1817  *   Pointer to Ethernet device.
1818  * @param idx
1819  *   RX queue index.
1820  *
1821  * @return
1822  *   1 while a reference on it exists, 0 when freed.
1823  */
1824 int
1825 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1826 {
1827         struct mlx5_priv *priv = dev->data->dev_private;
1828         struct mlx5_rxq_ctrl *rxq_ctrl;
1829
1830         if (!(*priv->rxqs)[idx])
1831                 return 0;
1832         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1833         assert(rxq_ctrl->priv);
1834         if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
1835                 rxq_ctrl->obj = NULL;
1836         if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1837                 if (rxq_ctrl->dbr_umem_id_valid)
1838                         claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
1839                                                     rxq_ctrl->dbr_offset));
1840                 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1841                 LIST_REMOVE(rxq_ctrl, next);
1842                 rte_free(rxq_ctrl);
1843                 (*priv->rxqs)[idx] = NULL;
1844                 return 0;
1845         }
1846         return 1;
1847 }
1848
1849 /**
1850  * Verify the Rx Queue list is empty
1851  *
1852  * @param dev
1853  *   Pointer to Ethernet device.
1854  *
1855  * @return
1856  *   The number of object not released.
1857  */
1858 int
1859 mlx5_rxq_verify(struct rte_eth_dev *dev)
1860 {
1861         struct mlx5_priv *priv = dev->data->dev_private;
1862         struct mlx5_rxq_ctrl *rxq_ctrl;
1863         int ret = 0;
1864
1865         LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1866                 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1867                         dev->data->port_id, rxq_ctrl->rxq.idx);
1868                 ++ret;
1869         }
1870         return ret;
1871 }
1872
1873 /**
1874  * Create an indirection table.
1875  *
1876  * @param dev
1877  *   Pointer to Ethernet device.
1878  * @param queues
1879  *   Queues entering in the indirection table.
1880  * @param queues_n
1881  *   Number of queues in the array.
1882  *
1883  * @return
1884  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1885  */
1886 static struct mlx5_ind_table_obj *
1887 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1888                        uint32_t queues_n, enum mlx5_ind_tbl_type type)
1889 {
1890         struct mlx5_priv *priv = dev->data->dev_private;
1891         struct mlx5_ind_table_obj *ind_tbl;
1892         unsigned int i = 0, j = 0, k = 0;
1893
1894         ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1895                              queues_n * sizeof(uint16_t), 0);
1896         if (!ind_tbl) {
1897                 rte_errno = ENOMEM;
1898                 return NULL;
1899         }
1900         ind_tbl->type = type;
1901         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1902                 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1903                         log2above(queues_n) :
1904                         log2above(priv->config.ind_table_max_size);
1905                 struct ibv_wq *wq[1 << wq_n];
1906
1907                 for (i = 0; i != queues_n; ++i) {
1908                         struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1909                                                                  queues[i]);
1910                         if (!rxq)
1911                                 goto error;
1912                         wq[i] = rxq->obj->wq;
1913                         ind_tbl->queues[i] = queues[i];
1914                 }
1915                 ind_tbl->queues_n = queues_n;
1916                 /* Finalise indirection table. */
1917                 k = i; /* Retain value of i for use in error case. */
1918                 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
1919                         wq[k] = wq[j];
1920                 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1921                         (priv->sh->ctx,
1922                          &(struct ibv_rwq_ind_table_init_attr){
1923                                 .log_ind_tbl_size = wq_n,
1924                                 .ind_tbl = wq,
1925                                 .comp_mask = 0,
1926                         });
1927                 if (!ind_tbl->ind_table) {
1928                         rte_errno = errno;
1929                         goto error;
1930                 }
1931         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
1932                 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
1933
1934                 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
1935                                       queues_n * sizeof(uint16_t), 0);
1936                 if (!rqt_attr) {
1937                         DRV_LOG(ERR, "port %u cannot allocate RQT resources",
1938                                 dev->data->port_id);
1939                         rte_errno = ENOMEM;
1940                         goto error;
1941                 }
1942                 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
1943                 rqt_attr->rqt_actual_size = queues_n;
1944                 for (i = 0; i != queues_n; ++i) {
1945                         struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1946                                                                  queues[i]);
1947                         if (!rxq)
1948                                 goto error;
1949                         rqt_attr->rq_list[i] = rxq->obj->rq->id;
1950                         ind_tbl->queues[i] = queues[i];
1951                 }
1952                 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
1953                                                         rqt_attr);
1954                 rte_free(rqt_attr);
1955                 if (!ind_tbl->rqt) {
1956                         DRV_LOG(ERR, "port %u cannot create DevX RQT",
1957                                 dev->data->port_id);
1958                         rte_errno = errno;
1959                         goto error;
1960                 }
1961                 ind_tbl->queues_n = queues_n;
1962         }
1963         rte_atomic32_inc(&ind_tbl->refcnt);
1964         LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1965         return ind_tbl;
1966 error:
1967         for (j = 0; j < i; j++)
1968                 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1969         rte_free(ind_tbl);
1970         DEBUG("port %u cannot create indirection table", dev->data->port_id);
1971         return NULL;
1972 }
1973
1974 /**
1975  * Get an indirection table.
1976  *
1977  * @param dev
1978  *   Pointer to Ethernet device.
1979  * @param queues
1980  *   Queues entering in the indirection table.
1981  * @param queues_n
1982  *   Number of queues in the array.
1983  *
1984  * @return
1985  *   An indirection table if found.
1986  */
1987 static struct mlx5_ind_table_obj *
1988 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1989                        uint32_t queues_n)
1990 {
1991         struct mlx5_priv *priv = dev->data->dev_private;
1992         struct mlx5_ind_table_obj *ind_tbl;
1993
1994         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1995                 if ((ind_tbl->queues_n == queues_n) &&
1996                     (memcmp(ind_tbl->queues, queues,
1997                             ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1998                      == 0))
1999                         break;
2000         }
2001         if (ind_tbl) {
2002                 unsigned int i;
2003
2004                 rte_atomic32_inc(&ind_tbl->refcnt);
2005                 for (i = 0; i != ind_tbl->queues_n; ++i)
2006                         mlx5_rxq_get(dev, ind_tbl->queues[i]);
2007         }
2008         return ind_tbl;
2009 }
2010
2011 /**
2012  * Release an indirection table.
2013  *
2014  * @param dev
2015  *   Pointer to Ethernet device.
2016  * @param ind_table
2017  *   Indirection table to release.
2018  *
2019  * @return
2020  *   1 while a reference on it exists, 0 when freed.
2021  */
2022 static int
2023 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2024                            struct mlx5_ind_table_obj *ind_tbl)
2025 {
2026         unsigned int i;
2027
2028         if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2029                 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2030                         claim_zero(mlx5_glue->destroy_rwq_ind_table
2031                                                         (ind_tbl->ind_table));
2032                 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2033                         claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2034         }
2035         for (i = 0; i != ind_tbl->queues_n; ++i)
2036                 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2037         if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2038                 LIST_REMOVE(ind_tbl, next);
2039                 rte_free(ind_tbl);
2040                 return 0;
2041         }
2042         return 1;
2043 }
2044
2045 /**
2046  * Verify the Rx Queue list is empty
2047  *
2048  * @param dev
2049  *   Pointer to Ethernet device.
2050  *
2051  * @return
2052  *   The number of object not released.
2053  */
2054 int
2055 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2056 {
2057         struct mlx5_priv *priv = dev->data->dev_private;
2058         struct mlx5_ind_table_obj *ind_tbl;
2059         int ret = 0;
2060
2061         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2062                 DRV_LOG(DEBUG,
2063                         "port %u indirection table obj %p still referenced",
2064                         dev->data->port_id, (void *)ind_tbl);
2065                 ++ret;
2066         }
2067         return ret;
2068 }
2069
2070 /**
2071  * Create an Rx Hash queue.
2072  *
2073  * @param dev
2074  *   Pointer to Ethernet device.
2075  * @param rss_key
2076  *   RSS key for the Rx hash queue.
2077  * @param rss_key_len
2078  *   RSS key length.
2079  * @param hash_fields
2080  *   Verbs protocol hash field to make the RSS on.
2081  * @param queues
2082  *   Queues entering in hash queue. In case of empty hash_fields only the
2083  *   first queue index will be taken for the indirection table.
2084  * @param queues_n
2085  *   Number of queues.
2086  * @param tunnel
2087  *   Tunnel type.
2088  * @param lro
2089  *   Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
2090  *
2091  * @return
2092  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2093  */
2094 struct mlx5_hrxq *
2095 mlx5_hrxq_new(struct rte_eth_dev *dev,
2096               const uint8_t *rss_key, uint32_t rss_key_len,
2097               uint64_t hash_fields,
2098               const uint16_t *queues, uint32_t queues_n,
2099               int tunnel __rte_unused, int lro)
2100 {
2101         struct mlx5_priv *priv = dev->data->dev_private;
2102         struct mlx5_hrxq *hrxq;
2103         struct ibv_qp *qp = NULL;
2104         struct mlx5_ind_table_obj *ind_tbl;
2105         int err;
2106         struct mlx5_devx_obj *tir = NULL;
2107
2108         queues_n = hash_fields ? queues_n : 1;
2109         ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2110         if (!ind_tbl) {
2111                 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2112                 struct mlx5_rxq_ctrl *rxq_ctrl =
2113                         container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2114                 enum mlx5_ind_tbl_type type;
2115
2116                 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2117                                 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2118                 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2119         }
2120         if (!ind_tbl) {
2121                 rte_errno = ENOMEM;
2122                 return NULL;
2123         }
2124         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2125 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2126                 struct mlx5dv_qp_init_attr qp_init_attr;
2127
2128                 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2129                 if (tunnel) {
2130                         qp_init_attr.comp_mask =
2131                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2132                         qp_init_attr.create_flags =
2133                                 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2134                 }
2135 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2136                 if (dev->data->dev_conf.lpbk_mode) {
2137                         /*
2138                          * Allow packet sent from NIC loop back
2139                          * w/o source MAC check.
2140                          */
2141                         qp_init_attr.comp_mask |=
2142                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2143                         qp_init_attr.create_flags |=
2144                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2145                 }
2146 #endif
2147                 qp = mlx5_glue->dv_create_qp
2148                         (priv->sh->ctx,
2149                          &(struct ibv_qp_init_attr_ex){
2150                                 .qp_type = IBV_QPT_RAW_PACKET,
2151                                 .comp_mask =
2152                                         IBV_QP_INIT_ATTR_PD |
2153                                         IBV_QP_INIT_ATTR_IND_TABLE |
2154                                         IBV_QP_INIT_ATTR_RX_HASH,
2155                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
2156                                         .rx_hash_function =
2157                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
2158                                         .rx_hash_key_len = rss_key_len,
2159                                         .rx_hash_key =
2160                                                 (void *)(uintptr_t)rss_key,
2161                                         .rx_hash_fields_mask = hash_fields,
2162                                 },
2163                                 .rwq_ind_tbl = ind_tbl->ind_table,
2164                                 .pd = priv->sh->pd,
2165                           },
2166                           &qp_init_attr);
2167 #else
2168                 qp = mlx5_glue->create_qp_ex
2169                         (priv->sh->ctx,
2170                          &(struct ibv_qp_init_attr_ex){
2171                                 .qp_type = IBV_QPT_RAW_PACKET,
2172                                 .comp_mask =
2173                                         IBV_QP_INIT_ATTR_PD |
2174                                         IBV_QP_INIT_ATTR_IND_TABLE |
2175                                         IBV_QP_INIT_ATTR_RX_HASH,
2176                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
2177                                         .rx_hash_function =
2178                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
2179                                         .rx_hash_key_len = rss_key_len,
2180                                         .rx_hash_key =
2181                                                 (void *)(uintptr_t)rss_key,
2182                                         .rx_hash_fields_mask = hash_fields,
2183                                 },
2184                                 .rwq_ind_tbl = ind_tbl->ind_table,
2185                                 .pd = priv->sh->pd,
2186                          });
2187 #endif
2188                 if (!qp) {
2189                         rte_errno = errno;
2190                         goto error;
2191                 }
2192         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2193                 struct mlx5_devx_tir_attr tir_attr;
2194
2195                 memset(&tir_attr, 0, sizeof(tir_attr));
2196                 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2197                 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2198                 memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
2199                        sizeof(uint64_t));
2200                 tir_attr.transport_domain = priv->sh->tdn;
2201                 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2202                 tir_attr.indirect_table = ind_tbl->rqt->id;
2203                 if (dev->data->dev_conf.lpbk_mode)
2204                         tir_attr.self_lb_block =
2205                                         MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2206                 if (lro) {
2207                         tir_attr.lro_timeout_period_usecs =
2208                                         priv->config.lro.timeout;
2209                         tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2210                         tir_attr.lro_enable_mask = lro;
2211                 }
2212                 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2213                 if (!tir) {
2214                         DRV_LOG(ERR, "port %u cannot create DevX TIR",
2215                                 dev->data->port_id);
2216                         rte_errno = errno;
2217                         goto error;
2218                 }
2219         }
2220         hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2221         if (!hrxq)
2222                 goto error;
2223         hrxq->ind_table = ind_tbl;
2224         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2225                 hrxq->qp = qp;
2226 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2227                 hrxq->action =
2228                         mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2229                 if (!hrxq->action) {
2230                         rte_errno = errno;
2231                         goto error;
2232                 }
2233 #endif
2234         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2235                 hrxq->tir = tir;
2236 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2237                 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2238                                                         (hrxq->tir->obj);
2239                 if (!hrxq->action) {
2240                         rte_errno = errno;
2241                         goto error;
2242                 }
2243 #endif
2244         }
2245         hrxq->rss_key_len = rss_key_len;
2246         hrxq->hash_fields = hash_fields;
2247         memcpy(hrxq->rss_key, rss_key, rss_key_len);
2248         rte_atomic32_inc(&hrxq->refcnt);
2249         LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2250         return hrxq;
2251 error:
2252         err = rte_errno; /* Save rte_errno before cleanup. */
2253         mlx5_ind_table_obj_release(dev, ind_tbl);
2254         if (qp)
2255                 claim_zero(mlx5_glue->destroy_qp(qp));
2256         else if (tir)
2257                 claim_zero(mlx5_devx_cmd_destroy(tir));
2258         rte_errno = err; /* Restore rte_errno. */
2259         return NULL;
2260 }
2261
2262 /**
2263  * Get an Rx Hash queue.
2264  *
2265  * @param dev
2266  *   Pointer to Ethernet device.
2267  * @param rss_conf
2268  *   RSS configuration for the Rx hash queue.
2269  * @param queues
2270  *   Queues entering in hash queue. In case of empty hash_fields only the
2271  *   first queue index will be taken for the indirection table.
2272  * @param queues_n
2273  *   Number of queues.
2274  *
2275  * @return
2276  *   An hash Rx queue on success.
2277  */
2278 struct mlx5_hrxq *
2279 mlx5_hrxq_get(struct rte_eth_dev *dev,
2280               const uint8_t *rss_key, uint32_t rss_key_len,
2281               uint64_t hash_fields,
2282               const uint16_t *queues, uint32_t queues_n)
2283 {
2284         struct mlx5_priv *priv = dev->data->dev_private;
2285         struct mlx5_hrxq *hrxq;
2286
2287         queues_n = hash_fields ? queues_n : 1;
2288         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2289                 struct mlx5_ind_table_obj *ind_tbl;
2290
2291                 if (hrxq->rss_key_len != rss_key_len)
2292                         continue;
2293                 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2294                         continue;
2295                 if (hrxq->hash_fields != hash_fields)
2296                         continue;
2297                 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2298                 if (!ind_tbl)
2299                         continue;
2300                 if (ind_tbl != hrxq->ind_table) {
2301                         mlx5_ind_table_obj_release(dev, ind_tbl);
2302                         continue;
2303                 }
2304                 rte_atomic32_inc(&hrxq->refcnt);
2305                 return hrxq;
2306         }
2307         return NULL;
2308 }
2309
2310 /**
2311  * Release the hash Rx queue.
2312  *
2313  * @param dev
2314  *   Pointer to Ethernet device.
2315  * @param hrxq
2316  *   Pointer to Hash Rx queue to release.
2317  *
2318  * @return
2319  *   1 while a reference on it exists, 0 when freed.
2320  */
2321 int
2322 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2323 {
2324         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2325 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2326                 mlx5_glue->destroy_flow_action(hrxq->action);
2327 #endif
2328                 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2329                         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2330                 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2331                         claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2332                 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2333                 LIST_REMOVE(hrxq, next);
2334                 rte_free(hrxq);
2335                 return 0;
2336         }
2337         claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2338         return 1;
2339 }
2340
2341 /**
2342  * Verify the Rx Queue list is empty
2343  *
2344  * @param dev
2345  *   Pointer to Ethernet device.
2346  *
2347  * @return
2348  *   The number of object not released.
2349  */
2350 int
2351 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2352 {
2353         struct mlx5_priv *priv = dev->data->dev_private;
2354         struct mlx5_hrxq *hrxq;
2355         int ret = 0;
2356
2357         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2358                 DRV_LOG(DEBUG,
2359                         "port %u hash Rx queue %p still referenced",
2360                         dev->data->port_id, (void *)hrxq);
2361                 ++ret;
2362         }
2363         return ret;
2364 }
2365
2366 /**
2367  * Create a drop Rx queue Verbs/DevX object.
2368  *
2369  * @param dev
2370  *   Pointer to Ethernet device.
2371  *
2372  * @return
2373  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2374  */
2375 static struct mlx5_rxq_obj *
2376 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2377 {
2378         struct mlx5_priv *priv = dev->data->dev_private;
2379         struct ibv_context *ctx = priv->sh->ctx;
2380         struct ibv_cq *cq;
2381         struct ibv_wq *wq = NULL;
2382         struct mlx5_rxq_obj *rxq;
2383
2384         if (priv->drop_queue.rxq)
2385                 return priv->drop_queue.rxq;
2386         cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2387         if (!cq) {
2388                 DEBUG("port %u cannot allocate CQ for drop queue",
2389                       dev->data->port_id);
2390                 rte_errno = errno;
2391                 goto error;
2392         }
2393         wq = mlx5_glue->create_wq(ctx,
2394                  &(struct ibv_wq_init_attr){
2395                         .wq_type = IBV_WQT_RQ,
2396                         .max_wr = 1,
2397                         .max_sge = 1,
2398                         .pd = priv->sh->pd,
2399                         .cq = cq,
2400                  });
2401         if (!wq) {
2402                 DEBUG("port %u cannot allocate WQ for drop queue",
2403                       dev->data->port_id);
2404                 rte_errno = errno;
2405                 goto error;
2406         }
2407         rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2408         if (!rxq) {
2409                 DEBUG("port %u cannot allocate drop Rx queue memory",
2410                       dev->data->port_id);
2411                 rte_errno = ENOMEM;
2412                 goto error;
2413         }
2414         rxq->cq = cq;
2415         rxq->wq = wq;
2416         priv->drop_queue.rxq = rxq;
2417         return rxq;
2418 error:
2419         if (wq)
2420                 claim_zero(mlx5_glue->destroy_wq(wq));
2421         if (cq)
2422                 claim_zero(mlx5_glue->destroy_cq(cq));
2423         return NULL;
2424 }
2425
2426 /**
2427  * Release a drop Rx queue Verbs/DevX object.
2428  *
2429  * @param dev
2430  *   Pointer to Ethernet device.
2431  *
2432  * @return
2433  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2434  */
2435 static void
2436 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2437 {
2438         struct mlx5_priv *priv = dev->data->dev_private;
2439         struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2440
2441         if (rxq->wq)
2442                 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2443         if (rxq->cq)
2444                 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2445         rte_free(rxq);
2446         priv->drop_queue.rxq = NULL;
2447 }
2448
2449 /**
2450  * Create a drop indirection table.
2451  *
2452  * @param dev
2453  *   Pointer to Ethernet device.
2454  *
2455  * @return
2456  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2457  */
2458 static struct mlx5_ind_table_obj *
2459 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2460 {
2461         struct mlx5_priv *priv = dev->data->dev_private;
2462         struct mlx5_ind_table_obj *ind_tbl;
2463         struct mlx5_rxq_obj *rxq;
2464         struct mlx5_ind_table_obj tmpl;
2465
2466         rxq = mlx5_rxq_obj_drop_new(dev);
2467         if (!rxq)
2468                 return NULL;
2469         tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2470                 (priv->sh->ctx,
2471                  &(struct ibv_rwq_ind_table_init_attr){
2472                         .log_ind_tbl_size = 0,
2473                         .ind_tbl = &rxq->wq,
2474                         .comp_mask = 0,
2475                  });
2476         if (!tmpl.ind_table) {
2477                 DEBUG("port %u cannot allocate indirection table for drop"
2478                       " queue",
2479                       dev->data->port_id);
2480                 rte_errno = errno;
2481                 goto error;
2482         }
2483         ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2484         if (!ind_tbl) {
2485                 rte_errno = ENOMEM;
2486                 goto error;
2487         }
2488         ind_tbl->ind_table = tmpl.ind_table;
2489         return ind_tbl;
2490 error:
2491         mlx5_rxq_obj_drop_release(dev);
2492         return NULL;
2493 }
2494
2495 /**
2496  * Release a drop indirection table.
2497  *
2498  * @param dev
2499  *   Pointer to Ethernet device.
2500  */
2501 static void
2502 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2503 {
2504         struct mlx5_priv *priv = dev->data->dev_private;
2505         struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2506
2507         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2508         mlx5_rxq_obj_drop_release(dev);
2509         rte_free(ind_tbl);
2510         priv->drop_queue.hrxq->ind_table = NULL;
2511 }
2512
2513 /**
2514  * Create a drop Rx Hash queue.
2515  *
2516  * @param dev
2517  *   Pointer to Ethernet device.
2518  *
2519  * @return
2520  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2521  */
2522 struct mlx5_hrxq *
2523 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2524 {
2525         struct mlx5_priv *priv = dev->data->dev_private;
2526         struct mlx5_ind_table_obj *ind_tbl;
2527         struct ibv_qp *qp;
2528         struct mlx5_hrxq *hrxq;
2529
2530         if (priv->drop_queue.hrxq) {
2531                 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2532                 return priv->drop_queue.hrxq;
2533         }
2534         ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2535         if (!ind_tbl)
2536                 return NULL;
2537         qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2538                  &(struct ibv_qp_init_attr_ex){
2539                         .qp_type = IBV_QPT_RAW_PACKET,
2540                         .comp_mask =
2541                                 IBV_QP_INIT_ATTR_PD |
2542                                 IBV_QP_INIT_ATTR_IND_TABLE |
2543                                 IBV_QP_INIT_ATTR_RX_HASH,
2544                         .rx_hash_conf = (struct ibv_rx_hash_conf){
2545                                 .rx_hash_function =
2546                                         IBV_RX_HASH_FUNC_TOEPLITZ,
2547                                 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2548                                 .rx_hash_key = rss_hash_default_key,
2549                                 .rx_hash_fields_mask = 0,
2550                                 },
2551                         .rwq_ind_tbl = ind_tbl->ind_table,
2552                         .pd = priv->sh->pd
2553                  });
2554         if (!qp) {
2555                 DEBUG("port %u cannot allocate QP for drop queue",
2556                       dev->data->port_id);
2557                 rte_errno = errno;
2558                 goto error;
2559         }
2560         hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2561         if (!hrxq) {
2562                 DRV_LOG(WARNING,
2563                         "port %u cannot allocate memory for drop queue",
2564                         dev->data->port_id);
2565                 rte_errno = ENOMEM;
2566                 goto error;
2567         }
2568         hrxq->ind_table = ind_tbl;
2569         hrxq->qp = qp;
2570 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2571         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2572         if (!hrxq->action) {
2573                 rte_errno = errno;
2574                 goto error;
2575         }
2576 #endif
2577         priv->drop_queue.hrxq = hrxq;
2578         rte_atomic32_set(&hrxq->refcnt, 1);
2579         return hrxq;
2580 error:
2581         if (ind_tbl)
2582                 mlx5_ind_table_obj_drop_release(dev);
2583         return NULL;
2584 }
2585
2586 /**
2587  * Release a drop hash Rx queue.
2588  *
2589  * @param dev
2590  *   Pointer to Ethernet device.
2591  */
2592 void
2593 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2594 {
2595         struct mlx5_priv *priv = dev->data->dev_private;
2596         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2597
2598         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2599 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2600                 mlx5_glue->destroy_flow_action(hrxq->action);
2601 #endif
2602                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2603                 mlx5_ind_table_obj_drop_release(dev);
2604                 rte_free(hrxq);
2605                 priv->drop_queue.hrxq = NULL;
2606         }
2607 }