net/mlx5: separate Rx queue object creations
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <fcntl.h>
11 #include <sys/queue.h>
12
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
19 #include <rte_io.h>
20 #include <rte_eal_paging.h>
21
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_malloc.h>
25
26 #include "mlx5_defs.h"
27 #include "mlx5.h"
28 #include "mlx5_common_os.h"
29 #include "mlx5_rxtx.h"
30 #include "mlx5_utils.h"
31 #include "mlx5_autoconf.h"
32 #include "mlx5_flow.h"
33
34
35 /* Default RSS hash key also used for ConnectX-3. */
36 uint8_t rss_hash_default_key[] = {
37         0x2c, 0xc6, 0x81, 0xd1,
38         0x5b, 0xdb, 0xf4, 0xf7,
39         0xfc, 0xa2, 0x83, 0x19,
40         0xdb, 0x1a, 0x3e, 0x94,
41         0x6b, 0x9e, 0x38, 0xd9,
42         0x2c, 0x9c, 0x03, 0xd1,
43         0xad, 0x99, 0x44, 0xa7,
44         0xd9, 0x56, 0x3d, 0x59,
45         0x06, 0x3c, 0x25, 0xf3,
46         0xfc, 0x1f, 0xdc, 0x2a,
47 };
48
49 /* Length of the default RSS hash key. */
50 static_assert(MLX5_RSS_HASH_KEY_LEN ==
51               (unsigned int)sizeof(rss_hash_default_key),
52               "wrong RSS default key size.");
53
54 /**
55  * Check whether Multi-Packet RQ can be enabled for the device.
56  *
57  * @param dev
58  *   Pointer to Ethernet device.
59  *
60  * @return
61  *   1 if supported, negative errno value if not.
62  */
63 inline int
64 mlx5_check_mprq_support(struct rte_eth_dev *dev)
65 {
66         struct mlx5_priv *priv = dev->data->dev_private;
67
68         if (priv->config.mprq.enabled &&
69             priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
70                 return 1;
71         return -ENOTSUP;
72 }
73
74 /**
75  * Check whether Multi-Packet RQ is enabled for the Rx queue.
76  *
77  *  @param rxq
78  *     Pointer to receive queue structure.
79  *
80  * @return
81  *   0 if disabled, otherwise enabled.
82  */
83 inline int
84 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
85 {
86         return rxq->strd_num_n > 0;
87 }
88
89 /**
90  * Check whether Multi-Packet RQ is enabled for the device.
91  *
92  * @param dev
93  *   Pointer to Ethernet device.
94  *
95  * @return
96  *   0 if disabled, otherwise enabled.
97  */
98 inline int
99 mlx5_mprq_enabled(struct rte_eth_dev *dev)
100 {
101         struct mlx5_priv *priv = dev->data->dev_private;
102         uint32_t i;
103         uint16_t n = 0;
104         uint16_t n_ibv = 0;
105
106         if (mlx5_check_mprq_support(dev) < 0)
107                 return 0;
108         /* All the configured queues should be enabled. */
109         for (i = 0; i < priv->rxqs_n; ++i) {
110                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
111                 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
112                         (rxq, struct mlx5_rxq_ctrl, rxq);
113
114                 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
115                         continue;
116                 n_ibv++;
117                 if (mlx5_rxq_mprq_enabled(rxq))
118                         ++n;
119         }
120         /* Multi-Packet RQ can't be partially configured. */
121         MLX5_ASSERT(n == 0 || n == n_ibv);
122         return n == n_ibv;
123 }
124
125 /**
126  * Allocate RX queue elements for Multi-Packet RQ.
127  *
128  * @param rxq_ctrl
129  *   Pointer to RX queue structure.
130  *
131  * @return
132  *   0 on success, a negative errno value otherwise and rte_errno is set.
133  */
134 static int
135 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
136 {
137         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
138         unsigned int wqe_n = 1 << rxq->elts_n;
139         unsigned int i;
140         int err;
141
142         /* Iterate on segments. */
143         for (i = 0; i <= wqe_n; ++i) {
144                 struct mlx5_mprq_buf *buf;
145
146                 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
147                         DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
148                         rte_errno = ENOMEM;
149                         goto error;
150                 }
151                 if (i < wqe_n)
152                         (*rxq->mprq_bufs)[i] = buf;
153                 else
154                         rxq->mprq_repl = buf;
155         }
156         DRV_LOG(DEBUG,
157                 "port %u Rx queue %u allocated and configured %u segments",
158                 rxq->port_id, rxq->idx, wqe_n);
159         return 0;
160 error:
161         err = rte_errno; /* Save rte_errno before cleanup. */
162         wqe_n = i;
163         for (i = 0; (i != wqe_n); ++i) {
164                 if ((*rxq->mprq_bufs)[i] != NULL)
165                         rte_mempool_put(rxq->mprq_mp,
166                                         (*rxq->mprq_bufs)[i]);
167                 (*rxq->mprq_bufs)[i] = NULL;
168         }
169         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
170                 rxq->port_id, rxq->idx);
171         rte_errno = err; /* Restore rte_errno. */
172         return -rte_errno;
173 }
174
175 /**
176  * Allocate RX queue elements for Single-Packet RQ.
177  *
178  * @param rxq_ctrl
179  *   Pointer to RX queue structure.
180  *
181  * @return
182  *   0 on success, errno value on failure.
183  */
184 static int
185 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
186 {
187         const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
188         unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
189         unsigned int i;
190         int err;
191
192         /* Iterate on segments. */
193         for (i = 0; (i != elts_n); ++i) {
194                 struct rte_mbuf *buf;
195
196                 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
197                 if (buf == NULL) {
198                         DRV_LOG(ERR, "port %u empty mbuf pool",
199                                 PORT_ID(rxq_ctrl->priv));
200                         rte_errno = ENOMEM;
201                         goto error;
202                 }
203                 /* Headroom is reserved by rte_pktmbuf_alloc(). */
204                 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
205                 /* Buffer is supposed to be empty. */
206                 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
207                 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
208                 MLX5_ASSERT(!buf->next);
209                 /* Only the first segment keeps headroom. */
210                 if (i % sges_n)
211                         SET_DATA_OFF(buf, 0);
212                 PORT(buf) = rxq_ctrl->rxq.port_id;
213                 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
214                 PKT_LEN(buf) = DATA_LEN(buf);
215                 NB_SEGS(buf) = 1;
216                 (*rxq_ctrl->rxq.elts)[i] = buf;
217         }
218         /* If Rx vector is activated. */
219         if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
220                 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
221                 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
222                 struct rte_pktmbuf_pool_private *priv =
223                         (struct rte_pktmbuf_pool_private *)
224                                 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
225                 int j;
226
227                 /* Initialize default rearm_data for vPMD. */
228                 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
229                 rte_mbuf_refcnt_set(mbuf_init, 1);
230                 mbuf_init->nb_segs = 1;
231                 mbuf_init->port = rxq->port_id;
232                 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
233                         mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
234                 /*
235                  * prevent compiler reordering:
236                  * rearm_data covers previous fields.
237                  */
238                 rte_compiler_barrier();
239                 rxq->mbuf_initializer =
240                         *(rte_xmm_t *)&mbuf_init->rearm_data;
241                 /* Padding with a fake mbuf for vectorized Rx. */
242                 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
243                         (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
244         }
245         DRV_LOG(DEBUG,
246                 "port %u Rx queue %u allocated and configured %u segments"
247                 " (max %u packets)",
248                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
249                 elts_n / (1 << rxq_ctrl->rxq.sges_n));
250         return 0;
251 error:
252         err = rte_errno; /* Save rte_errno before cleanup. */
253         elts_n = i;
254         for (i = 0; (i != elts_n); ++i) {
255                 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
256                         rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
257                 (*rxq_ctrl->rxq.elts)[i] = NULL;
258         }
259         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
260                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
261         rte_errno = err; /* Restore rte_errno. */
262         return -rte_errno;
263 }
264
265 /**
266  * Allocate RX queue elements.
267  *
268  * @param rxq_ctrl
269  *   Pointer to RX queue structure.
270  *
271  * @return
272  *   0 on success, errno value on failure.
273  */
274 int
275 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
276 {
277         return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
278                rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
279 }
280
281 /**
282  * Free RX queue elements for Multi-Packet RQ.
283  *
284  * @param rxq_ctrl
285  *   Pointer to RX queue structure.
286  */
287 static void
288 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
289 {
290         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
291         uint16_t i;
292
293         DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
294                 rxq->port_id, rxq->idx);
295         if (rxq->mprq_bufs == NULL)
296                 return;
297         MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
298         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
299                 if ((*rxq->mprq_bufs)[i] != NULL)
300                         mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
301                 (*rxq->mprq_bufs)[i] = NULL;
302         }
303         if (rxq->mprq_repl != NULL) {
304                 mlx5_mprq_buf_free(rxq->mprq_repl);
305                 rxq->mprq_repl = NULL;
306         }
307 }
308
309 /**
310  * Free RX queue elements for Single-Packet RQ.
311  *
312  * @param rxq_ctrl
313  *   Pointer to RX queue structure.
314  */
315 static void
316 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
317 {
318         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
319         const uint16_t q_n = (1 << rxq->elts_n);
320         const uint16_t q_mask = q_n - 1;
321         uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
322         uint16_t i;
323
324         DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
325                 PORT_ID(rxq_ctrl->priv), rxq->idx);
326         if (rxq->elts == NULL)
327                 return;
328         /**
329          * Some mbuf in the Ring belongs to the application.  They cannot be
330          * freed.
331          */
332         if (mlx5_rxq_check_vec_support(rxq) > 0) {
333                 for (i = 0; i < used; ++i)
334                         (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
335                 rxq->rq_pi = rxq->rq_ci;
336         }
337         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
338                 if ((*rxq->elts)[i] != NULL)
339                         rte_pktmbuf_free_seg((*rxq->elts)[i]);
340                 (*rxq->elts)[i] = NULL;
341         }
342 }
343
344 /**
345  * Free RX queue elements.
346  *
347  * @param rxq_ctrl
348  *   Pointer to RX queue structure.
349  */
350 void
351 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
352 {
353         if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
354                 rxq_free_elts_mprq(rxq_ctrl);
355         else
356                 rxq_free_elts_sprq(rxq_ctrl);
357 }
358
359 /**
360  * Returns the per-queue supported offloads.
361  *
362  * @param dev
363  *   Pointer to Ethernet device.
364  *
365  * @return
366  *   Supported Rx offloads.
367  */
368 uint64_t
369 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
370 {
371         struct mlx5_priv *priv = dev->data->dev_private;
372         struct mlx5_dev_config *config = &priv->config;
373         uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
374                              DEV_RX_OFFLOAD_TIMESTAMP |
375                              DEV_RX_OFFLOAD_JUMBO_FRAME |
376                              DEV_RX_OFFLOAD_RSS_HASH);
377
378         if (config->hw_fcs_strip)
379                 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
380
381         if (config->hw_csum)
382                 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
383                              DEV_RX_OFFLOAD_UDP_CKSUM |
384                              DEV_RX_OFFLOAD_TCP_CKSUM);
385         if (config->hw_vlan_strip)
386                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
387         if (MLX5_LRO_SUPPORTED(dev))
388                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
389         return offloads;
390 }
391
392
393 /**
394  * Returns the per-port supported offloads.
395  *
396  * @return
397  *   Supported Rx offloads.
398  */
399 uint64_t
400 mlx5_get_rx_port_offloads(void)
401 {
402         uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
403
404         return offloads;
405 }
406
407 /**
408  * Verify if the queue can be released.
409  *
410  * @param dev
411  *   Pointer to Ethernet device.
412  * @param idx
413  *   RX queue index.
414  *
415  * @return
416  *   1 if the queue can be released
417  *   0 if the queue can not be released, there are references to it.
418  *   Negative errno and rte_errno is set if queue doesn't exist.
419  */
420 static int
421 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
422 {
423         struct mlx5_priv *priv = dev->data->dev_private;
424         struct mlx5_rxq_ctrl *rxq_ctrl;
425
426         if (!(*priv->rxqs)[idx]) {
427                 rte_errno = EINVAL;
428                 return -rte_errno;
429         }
430         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
431         return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
432 }
433
434 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
435 static void
436 rxq_sync_cq(struct mlx5_rxq_data *rxq)
437 {
438         const uint16_t cqe_n = 1 << rxq->cqe_n;
439         const uint16_t cqe_mask = cqe_n - 1;
440         volatile struct mlx5_cqe *cqe;
441         int ret, i;
442
443         i = cqe_n;
444         do {
445                 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
446                 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
447                 if (ret == MLX5_CQE_STATUS_HW_OWN)
448                         break;
449                 if (ret == MLX5_CQE_STATUS_ERR) {
450                         rxq->cq_ci++;
451                         continue;
452                 }
453                 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
454                 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
455                         rxq->cq_ci++;
456                         continue;
457                 }
458                 /* Compute the next non compressed CQE. */
459                 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
460
461         } while (--i);
462         /* Move all CQEs to HW ownership, including possible MiniCQEs. */
463         for (i = 0; i < cqe_n; i++) {
464                 cqe = &(*rxq->cqes)[i];
465                 cqe->op_own = MLX5_CQE_INVALIDATE;
466         }
467         /* Resync CQE and WQE (WQ in RESET state). */
468         rte_cio_wmb();
469         *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
470         rte_cio_wmb();
471         *rxq->rq_db = rte_cpu_to_be_32(0);
472         rte_cio_wmb();
473 }
474
475 /**
476  * Rx queue stop. Device queue goes to the RESET state,
477  * all involved mbufs are freed from WQ.
478  *
479  * @param dev
480  *   Pointer to Ethernet device structure.
481  * @param idx
482  *   RX queue index.
483  *
484  * @return
485  *   0 on success, a negative errno value otherwise and rte_errno is set.
486  */
487 int
488 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
489 {
490         struct mlx5_priv *priv = dev->data->dev_private;
491         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
492         struct mlx5_rxq_ctrl *rxq_ctrl =
493                         container_of(rxq, struct mlx5_rxq_ctrl, rxq);
494         int ret;
495
496         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
497         if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
498                 struct ibv_wq_attr mod = {
499                         .attr_mask = IBV_WQ_ATTR_STATE,
500                         .wq_state = IBV_WQS_RESET,
501                 };
502
503                 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
504         } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
505                 struct mlx5_devx_modify_rq_attr rq_attr;
506
507                 memset(&rq_attr, 0, sizeof(rq_attr));
508                 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
509                 rq_attr.state = MLX5_RQC_STATE_RST;
510                 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
511         }
512         if (ret) {
513                 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET:  %s",
514                         strerror(errno));
515                 rte_errno = errno;
516                 return ret;
517         }
518         /* Remove all processes CQEs. */
519         rxq_sync_cq(rxq);
520         /* Free all involved mbufs. */
521         rxq_free_elts(rxq_ctrl);
522         /* Set the actual queue state. */
523         dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
524         return 0;
525 }
526
527 /**
528  * Rx queue stop. Device queue goes to the RESET state,
529  * all involved mbufs are freed from WQ.
530  *
531  * @param dev
532  *   Pointer to Ethernet device structure.
533  * @param idx
534  *   RX queue index.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 int
540 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
541 {
542         eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
543         int ret;
544
545         if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
546                 DRV_LOG(ERR, "Hairpin queue can't be stopped");
547                 rte_errno = EINVAL;
548                 return -EINVAL;
549         }
550         if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
551                 return 0;
552         /*
553          * Vectorized Rx burst requires the CQ and RQ indices
554          * synchronized, that might be broken on RQ restart
555          * and cause Rx malfunction, so queue stopping is
556          * not supported if vectorized Rx burst is engaged.
557          * The routine pointer depends on the process
558          * type, should perform check there.
559          */
560         if (pkt_burst == mlx5_rx_burst) {
561                 DRV_LOG(ERR, "Rx queue stop is not supported "
562                         "for vectorized Rx");
563                 rte_errno = EINVAL;
564                 return -EINVAL;
565         }
566         if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
567                 ret = mlx5_mp_os_req_queue_control(dev, idx,
568                                                    MLX5_MP_REQ_QUEUE_RX_STOP);
569         } else {
570                 ret = mlx5_rx_queue_stop_primary(dev, idx);
571         }
572         return ret;
573 }
574
575 /**
576  * Rx queue start. Device queue goes to the ready state,
577  * all required mbufs are allocated and WQ is replenished.
578  *
579  * @param dev
580  *   Pointer to Ethernet device structure.
581  * @param idx
582  *   RX queue index.
583  *
584  * @return
585  *   0 on success, a negative errno value otherwise and rte_errno is set.
586  */
587 int
588 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
589 {
590         struct mlx5_priv *priv = dev->data->dev_private;
591         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
592         struct mlx5_rxq_ctrl *rxq_ctrl =
593                         container_of(rxq, struct mlx5_rxq_ctrl, rxq);
594         int ret;
595
596         MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
597         /* Allocate needed buffers. */
598         ret = rxq_alloc_elts(rxq_ctrl);
599         if (ret) {
600                 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
601                 rte_errno = errno;
602                 return ret;
603         }
604         rte_cio_wmb();
605         *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
606         rte_cio_wmb();
607         /* Reset RQ consumer before moving queue to READY state. */
608         *rxq->rq_db = rte_cpu_to_be_32(0);
609         rte_cio_wmb();
610         if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
611                 struct ibv_wq_attr mod = {
612                         .attr_mask = IBV_WQ_ATTR_STATE,
613                         .wq_state = IBV_WQS_RDY,
614                 };
615
616                 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
617         } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
618                 struct mlx5_devx_modify_rq_attr rq_attr;
619
620                 memset(&rq_attr, 0, sizeof(rq_attr));
621                 rq_attr.rq_state = MLX5_RQC_STATE_RST;
622                 rq_attr.state = MLX5_RQC_STATE_RDY;
623                 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
624         }
625         if (ret) {
626                 DRV_LOG(ERR, "Cannot change Rx WQ state to READY:  %s",
627                         strerror(errno));
628                 rte_errno = errno;
629                 return ret;
630         }
631         /* Reinitialize RQ - set WQEs. */
632         mlx5_rxq_initialize(rxq);
633         rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
634         /* Set actual queue state. */
635         dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
636         return 0;
637 }
638
639 /**
640  * Rx queue start. Device queue goes to the ready state,
641  * all required mbufs are allocated and WQ is replenished.
642  *
643  * @param dev
644  *   Pointer to Ethernet device structure.
645  * @param idx
646  *   RX queue index.
647  *
648  * @return
649  *   0 on success, a negative errno value otherwise and rte_errno is set.
650  */
651 int
652 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
653 {
654         int ret;
655
656         if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
657                 DRV_LOG(ERR, "Hairpin queue can't be started");
658                 rte_errno = EINVAL;
659                 return -EINVAL;
660         }
661         if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
662                 return 0;
663         if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
664                 ret = mlx5_mp_os_req_queue_control(dev, idx,
665                                                    MLX5_MP_REQ_QUEUE_RX_START);
666         } else {
667                 ret = mlx5_rx_queue_start_primary(dev, idx);
668         }
669         return ret;
670 }
671
672 /**
673  * Rx queue presetup checks.
674  *
675  * @param dev
676  *   Pointer to Ethernet device structure.
677  * @param idx
678  *   RX queue index.
679  * @param desc
680  *   Number of descriptors to configure in queue.
681  *
682  * @return
683  *   0 on success, a negative errno value otherwise and rte_errno is set.
684  */
685 static int
686 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
687 {
688         struct mlx5_priv *priv = dev->data->dev_private;
689
690         if (!rte_is_power_of_2(*desc)) {
691                 *desc = 1 << log2above(*desc);
692                 DRV_LOG(WARNING,
693                         "port %u increased number of descriptors in Rx queue %u"
694                         " to the next power of two (%d)",
695                         dev->data->port_id, idx, *desc);
696         }
697         DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
698                 dev->data->port_id, idx, *desc);
699         if (idx >= priv->rxqs_n) {
700                 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
701                         dev->data->port_id, idx, priv->rxqs_n);
702                 rte_errno = EOVERFLOW;
703                 return -rte_errno;
704         }
705         if (!mlx5_rxq_releasable(dev, idx)) {
706                 DRV_LOG(ERR, "port %u unable to release queue index %u",
707                         dev->data->port_id, idx);
708                 rte_errno = EBUSY;
709                 return -rte_errno;
710         }
711         mlx5_rxq_release(dev, idx);
712         return 0;
713 }
714
715 /**
716  *
717  * @param dev
718  *   Pointer to Ethernet device structure.
719  * @param idx
720  *   RX queue index.
721  * @param desc
722  *   Number of descriptors to configure in queue.
723  * @param socket
724  *   NUMA socket on which memory must be allocated.
725  * @param[in] conf
726  *   Thresholds parameters.
727  * @param mp
728  *   Memory pool for buffer allocations.
729  *
730  * @return
731  *   0 on success, a negative errno value otherwise and rte_errno is set.
732  */
733 int
734 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
735                     unsigned int socket, const struct rte_eth_rxconf *conf,
736                     struct rte_mempool *mp)
737 {
738         struct mlx5_priv *priv = dev->data->dev_private;
739         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
740         struct mlx5_rxq_ctrl *rxq_ctrl =
741                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
742         int res;
743
744         res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
745         if (res)
746                 return res;
747         rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
748         if (!rxq_ctrl) {
749                 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
750                         dev->data->port_id, idx);
751                 rte_errno = ENOMEM;
752                 return -rte_errno;
753         }
754         DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
755                 dev->data->port_id, idx);
756         (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
757         return 0;
758 }
759
760 /**
761  *
762  * @param dev
763  *   Pointer to Ethernet device structure.
764  * @param idx
765  *   RX queue index.
766  * @param desc
767  *   Number of descriptors to configure in queue.
768  * @param hairpin_conf
769  *   Hairpin configuration parameters.
770  *
771  * @return
772  *   0 on success, a negative errno value otherwise and rte_errno is set.
773  */
774 int
775 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
776                             uint16_t desc,
777                             const struct rte_eth_hairpin_conf *hairpin_conf)
778 {
779         struct mlx5_priv *priv = dev->data->dev_private;
780         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
781         struct mlx5_rxq_ctrl *rxq_ctrl =
782                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
783         int res;
784
785         res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
786         if (res)
787                 return res;
788         if (hairpin_conf->peer_count != 1 ||
789             hairpin_conf->peers[0].port != dev->data->port_id ||
790             hairpin_conf->peers[0].queue >= priv->txqs_n) {
791                 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
792                         " invalid hairpind configuration", dev->data->port_id,
793                         idx);
794                 rte_errno = EINVAL;
795                 return -rte_errno;
796         }
797         rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
798         if (!rxq_ctrl) {
799                 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
800                         dev->data->port_id, idx);
801                 rte_errno = ENOMEM;
802                 return -rte_errno;
803         }
804         DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
805                 dev->data->port_id, idx);
806         (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
807         return 0;
808 }
809
810 /**
811  * DPDK callback to release a RX queue.
812  *
813  * @param dpdk_rxq
814  *   Generic RX queue pointer.
815  */
816 void
817 mlx5_rx_queue_release(void *dpdk_rxq)
818 {
819         struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
820         struct mlx5_rxq_ctrl *rxq_ctrl;
821         struct mlx5_priv *priv;
822
823         if (rxq == NULL)
824                 return;
825         rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
826         priv = rxq_ctrl->priv;
827         if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
828                 rte_panic("port %u Rx queue %u is still used by a flow and"
829                           " cannot be removed\n",
830                           PORT_ID(priv), rxq->idx);
831         mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
832 }
833
834 /**
835  * Allocate queue vector and fill epoll fd list for Rx interrupts.
836  *
837  * @param dev
838  *   Pointer to Ethernet device.
839  *
840  * @return
841  *   0 on success, a negative errno value otherwise and rte_errno is set.
842  */
843 int
844 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
845 {
846         struct mlx5_priv *priv = dev->data->dev_private;
847         unsigned int i;
848         unsigned int rxqs_n = priv->rxqs_n;
849         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
850         unsigned int count = 0;
851         struct rte_intr_handle *intr_handle = dev->intr_handle;
852
853         if (!dev->data->dev_conf.intr_conf.rxq)
854                 return 0;
855         mlx5_rx_intr_vec_disable(dev);
856         intr_handle->intr_vec = mlx5_malloc(0,
857                                 n * sizeof(intr_handle->intr_vec[0]),
858                                 0, SOCKET_ID_ANY);
859         if (intr_handle->intr_vec == NULL) {
860                 DRV_LOG(ERR,
861                         "port %u failed to allocate memory for interrupt"
862                         " vector, Rx interrupts will not be supported",
863                         dev->data->port_id);
864                 rte_errno = ENOMEM;
865                 return -rte_errno;
866         }
867         intr_handle->type = RTE_INTR_HANDLE_EXT;
868         for (i = 0; i != n; ++i) {
869                 /* This rxq obj must not be released in this function. */
870                 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
871                 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
872                 int rc;
873
874                 /* Skip queues that cannot request interrupts. */
875                 if (!rxq_obj || (!rxq_obj->ibv_channel &&
876                                  !rxq_obj->devx_channel)) {
877                         /* Use invalid intr_vec[] index to disable entry. */
878                         intr_handle->intr_vec[i] =
879                                 RTE_INTR_VEC_RXTX_OFFSET +
880                                 RTE_MAX_RXTX_INTR_VEC_ID;
881                         /* Decrease the rxq_ctrl's refcnt */
882                         if (rxq_ctrl)
883                                 mlx5_rxq_release(dev, i);
884                         continue;
885                 }
886                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
887                         DRV_LOG(ERR,
888                                 "port %u too many Rx queues for interrupt"
889                                 " vector size (%d), Rx interrupts cannot be"
890                                 " enabled",
891                                 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
892                         mlx5_rx_intr_vec_disable(dev);
893                         rte_errno = ENOMEM;
894                         return -rte_errno;
895                 }
896                 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
897                 if (rc < 0) {
898                         rte_errno = errno;
899                         DRV_LOG(ERR,
900                                 "port %u failed to make Rx interrupt file"
901                                 " descriptor %d non-blocking for queue index"
902                                 " %d",
903                                 dev->data->port_id, rxq_obj->fd, i);
904                         mlx5_rx_intr_vec_disable(dev);
905                         return -rte_errno;
906                 }
907                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
908                 intr_handle->efds[count] = rxq_obj->fd;
909                 count++;
910         }
911         if (!count)
912                 mlx5_rx_intr_vec_disable(dev);
913         else
914                 intr_handle->nb_efd = count;
915         return 0;
916 }
917
918 /**
919  * Clean up Rx interrupts handler.
920  *
921  * @param dev
922  *   Pointer to Ethernet device.
923  */
924 void
925 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
926 {
927         struct mlx5_priv *priv = dev->data->dev_private;
928         struct rte_intr_handle *intr_handle = dev->intr_handle;
929         unsigned int i;
930         unsigned int rxqs_n = priv->rxqs_n;
931         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
932
933         if (!dev->data->dev_conf.intr_conf.rxq)
934                 return;
935         if (!intr_handle->intr_vec)
936                 goto free;
937         for (i = 0; i != n; ++i) {
938                 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
939                     RTE_MAX_RXTX_INTR_VEC_ID)
940                         continue;
941                 /**
942                  * Need to access directly the queue to release the reference
943                  * kept in mlx5_rx_intr_vec_enable().
944                  */
945                 mlx5_rxq_release(dev, i);
946         }
947 free:
948         rte_intr_free_epoll_fd(intr_handle);
949         if (intr_handle->intr_vec)
950                 mlx5_free(intr_handle->intr_vec);
951         intr_handle->nb_efd = 0;
952         intr_handle->intr_vec = NULL;
953 }
954
955 /**
956  *  MLX5 CQ notification .
957  *
958  *  @param rxq
959  *     Pointer to receive queue structure.
960  *  @param sq_n_rxq
961  *     Sequence number per receive queue .
962  */
963 static inline void
964 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
965 {
966         int sq_n = 0;
967         uint32_t doorbell_hi;
968         uint64_t doorbell;
969         void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
970
971         sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
972         doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
973         doorbell = (uint64_t)doorbell_hi << 32;
974         doorbell |= rxq->cqn;
975         rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
976         mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
977                          cq_db_reg, rxq->uar_lock_cq);
978 }
979
980 /**
981  * DPDK callback for Rx queue interrupt enable.
982  *
983  * @param dev
984  *   Pointer to Ethernet device structure.
985  * @param rx_queue_id
986  *   Rx queue number.
987  *
988  * @return
989  *   0 on success, a negative errno value otherwise and rte_errno is set.
990  */
991 int
992 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
993 {
994         struct mlx5_rxq_ctrl *rxq_ctrl;
995
996         rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
997         if (!rxq_ctrl)
998                 goto error;
999         if (rxq_ctrl->irq) {
1000                 if (!rxq_ctrl->obj) {
1001                         mlx5_rxq_release(dev, rx_queue_id);
1002                         goto error;
1003                 }
1004                 mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
1005         }
1006         mlx5_rxq_release(dev, rx_queue_id);
1007         return 0;
1008 error:
1009         rte_errno = EINVAL;
1010         return -rte_errno;
1011 }
1012
1013 /**
1014  * DPDK callback for Rx queue interrupt disable.
1015  *
1016  * @param dev
1017  *   Pointer to Ethernet device structure.
1018  * @param rx_queue_id
1019  *   Rx queue number.
1020  *
1021  * @return
1022  *   0 on success, a negative errno value otherwise and rte_errno is set.
1023  */
1024 int
1025 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1026 {
1027         struct mlx5_rxq_ctrl *rxq_ctrl;
1028         struct mlx5_rxq_obj *rxq_obj = NULL;
1029         struct ibv_cq *ev_cq;
1030         void *ev_ctx;
1031         int ret = 0;
1032
1033         rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1034         if (!rxq_ctrl) {
1035                 rte_errno = EINVAL;
1036                 return -rte_errno;
1037         }
1038         if (!rxq_ctrl->irq) {
1039                 mlx5_rxq_release(dev, rx_queue_id);
1040                 return 0;
1041         }
1042         rxq_obj = rxq_ctrl->obj;
1043         if (!rxq_obj)
1044                 goto error;
1045         if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1046                 ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel, &ev_cq,
1047                                               &ev_ctx);
1048                 if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
1049                         goto error;
1050                 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
1051         } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1052 #ifdef HAVE_IBV_DEVX_EVENT
1053                 union {
1054                         struct mlx5dv_devx_async_event_hdr event_resp;
1055                         uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
1056                                     + 128];
1057                 } out;
1058
1059                 ret = mlx5_glue->devx_get_event
1060                                 (rxq_obj->devx_channel, &out.event_resp,
1061                                  sizeof(out.buf));
1062                 if (ret < 0 || out.event_resp.cookie !=
1063                                 (uint64_t)(uintptr_t)rxq_obj->devx_cq)
1064                         goto error;
1065 #endif /* HAVE_IBV_DEVX_EVENT */
1066         }
1067         rxq_ctrl->rxq.cq_arm_sn++;
1068         mlx5_rxq_release(dev, rx_queue_id);
1069         return 0;
1070 error:
1071         /**
1072          * For ret < 0 save the errno (may be EAGAIN which means the get_event
1073          * function was called before receiving one).
1074          */
1075         if (ret < 0)
1076                 rte_errno = errno;
1077         else
1078                 rte_errno = EINVAL;
1079         ret = rte_errno; /* Save rte_errno before cleanup. */
1080         mlx5_rxq_release(dev, rx_queue_id);
1081         if (ret != EAGAIN)
1082                 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1083                         dev->data->port_id, rx_queue_id);
1084         rte_errno = ret; /* Restore rte_errno. */
1085         return -rte_errno;
1086 }
1087
1088 /**
1089  * Verify the Rx queue objects list is empty
1090  *
1091  * @param dev
1092  *   Pointer to Ethernet device.
1093  *
1094  * @return
1095  *   The number of objects not released.
1096  */
1097 int
1098 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1099 {
1100         struct mlx5_priv *priv = dev->data->dev_private;
1101         int ret = 0;
1102         struct mlx5_rxq_obj *rxq_obj;
1103
1104         LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1105                 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1106                         dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1107                 ++ret;
1108         }
1109         return ret;
1110 }
1111
1112 /**
1113  * Callback function to initialize mbufs for Multi-Packet RQ.
1114  */
1115 static inline void
1116 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1117                     void *_m, unsigned int i __rte_unused)
1118 {
1119         struct mlx5_mprq_buf *buf = _m;
1120         struct rte_mbuf_ext_shared_info *shinfo;
1121         unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1122         unsigned int j;
1123
1124         memset(_m, 0, sizeof(*buf));
1125         buf->mp = mp;
1126         rte_atomic16_set(&buf->refcnt, 1);
1127         for (j = 0; j != strd_n; ++j) {
1128                 shinfo = &buf->shinfos[j];
1129                 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1130                 shinfo->fcb_opaque = buf;
1131         }
1132 }
1133
1134 /**
1135  * Free mempool of Multi-Packet RQ.
1136  *
1137  * @param dev
1138  *   Pointer to Ethernet device.
1139  *
1140  * @return
1141  *   0 on success, negative errno value on failure.
1142  */
1143 int
1144 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1145 {
1146         struct mlx5_priv *priv = dev->data->dev_private;
1147         struct rte_mempool *mp = priv->mprq_mp;
1148         unsigned int i;
1149
1150         if (mp == NULL)
1151                 return 0;
1152         DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1153                 dev->data->port_id, mp->name);
1154         /*
1155          * If a buffer in the pool has been externally attached to a mbuf and it
1156          * is still in use by application, destroying the Rx queue can spoil
1157          * the packet. It is unlikely to happen but if application dynamically
1158          * creates and destroys with holding Rx packets, this can happen.
1159          *
1160          * TODO: It is unavoidable for now because the mempool for Multi-Packet
1161          * RQ isn't provided by application but managed by PMD.
1162          */
1163         if (!rte_mempool_full(mp)) {
1164                 DRV_LOG(ERR,
1165                         "port %u mempool for Multi-Packet RQ is still in use",
1166                         dev->data->port_id);
1167                 rte_errno = EBUSY;
1168                 return -rte_errno;
1169         }
1170         rte_mempool_free(mp);
1171         /* Unset mempool for each Rx queue. */
1172         for (i = 0; i != priv->rxqs_n; ++i) {
1173                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1174
1175                 if (rxq == NULL)
1176                         continue;
1177                 rxq->mprq_mp = NULL;
1178         }
1179         priv->mprq_mp = NULL;
1180         return 0;
1181 }
1182
1183 /**
1184  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1185  * mempool. If already allocated, reuse it if there're enough elements.
1186  * Otherwise, resize it.
1187  *
1188  * @param dev
1189  *   Pointer to Ethernet device.
1190  *
1191  * @return
1192  *   0 on success, negative errno value on failure.
1193  */
1194 int
1195 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1196 {
1197         struct mlx5_priv *priv = dev->data->dev_private;
1198         struct rte_mempool *mp = priv->mprq_mp;
1199         char name[RTE_MEMPOOL_NAMESIZE];
1200         unsigned int desc = 0;
1201         unsigned int buf_len;
1202         unsigned int obj_num;
1203         unsigned int obj_size;
1204         unsigned int strd_num_n = 0;
1205         unsigned int strd_sz_n = 0;
1206         unsigned int i;
1207         unsigned int n_ibv = 0;
1208
1209         if (!mlx5_mprq_enabled(dev))
1210                 return 0;
1211         /* Count the total number of descriptors configured. */
1212         for (i = 0; i != priv->rxqs_n; ++i) {
1213                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1214                 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1215                         (rxq, struct mlx5_rxq_ctrl, rxq);
1216
1217                 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1218                         continue;
1219                 n_ibv++;
1220                 desc += 1 << rxq->elts_n;
1221                 /* Get the max number of strides. */
1222                 if (strd_num_n < rxq->strd_num_n)
1223                         strd_num_n = rxq->strd_num_n;
1224                 /* Get the max size of a stride. */
1225                 if (strd_sz_n < rxq->strd_sz_n)
1226                         strd_sz_n = rxq->strd_sz_n;
1227         }
1228         MLX5_ASSERT(strd_num_n && strd_sz_n);
1229         buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1230         obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1231                 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1232         /*
1233          * Received packets can be either memcpy'd or externally referenced. In
1234          * case that the packet is attached to an mbuf as an external buffer, as
1235          * it isn't possible to predict how the buffers will be queued by
1236          * application, there's no option to exactly pre-allocate needed buffers
1237          * in advance but to speculatively prepares enough buffers.
1238          *
1239          * In the data path, if this Mempool is depleted, PMD will try to memcpy
1240          * received packets to buffers provided by application (rxq->mp) until
1241          * this Mempool gets available again.
1242          */
1243         desc *= 4;
1244         obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1245         /*
1246          * rte_mempool_create_empty() has sanity check to refuse large cache
1247          * size compared to the number of elements.
1248          * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1249          * constant number 2 instead.
1250          */
1251         obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1252         /* Check a mempool is already allocated and if it can be resued. */
1253         if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1254                 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1255                         dev->data->port_id, mp->name);
1256                 /* Reuse. */
1257                 goto exit;
1258         } else if (mp != NULL) {
1259                 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1260                         dev->data->port_id, mp->name);
1261                 /*
1262                  * If failed to free, which means it may be still in use, no way
1263                  * but to keep using the existing one. On buffer underrun,
1264                  * packets will be memcpy'd instead of external buffer
1265                  * attachment.
1266                  */
1267                 if (mlx5_mprq_free_mp(dev)) {
1268                         if (mp->elt_size >= obj_size)
1269                                 goto exit;
1270                         else
1271                                 return -rte_errno;
1272                 }
1273         }
1274         snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1275         mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1276                                 0, NULL, NULL, mlx5_mprq_buf_init,
1277                                 (void *)(uintptr_t)(1 << strd_num_n),
1278                                 dev->device->numa_node, 0);
1279         if (mp == NULL) {
1280                 DRV_LOG(ERR,
1281                         "port %u failed to allocate a mempool for"
1282                         " Multi-Packet RQ, count=%u, size=%u",
1283                         dev->data->port_id, obj_num, obj_size);
1284                 rte_errno = ENOMEM;
1285                 return -rte_errno;
1286         }
1287         priv->mprq_mp = mp;
1288 exit:
1289         /* Set mempool for each Rx queue. */
1290         for (i = 0; i != priv->rxqs_n; ++i) {
1291                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1292                 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1293                         (rxq, struct mlx5_rxq_ctrl, rxq);
1294
1295                 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1296                         continue;
1297                 rxq->mprq_mp = mp;
1298         }
1299         DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1300                 dev->data->port_id);
1301         return 0;
1302 }
1303
1304 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1305                                         sizeof(struct rte_vlan_hdr) * 2 + \
1306                                         sizeof(struct rte_ipv6_hdr)))
1307 #define MAX_TCP_OPTION_SIZE 40u
1308 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1309                                  sizeof(struct rte_tcp_hdr) + \
1310                                  MAX_TCP_OPTION_SIZE))
1311
1312 /**
1313  * Adjust the maximum LRO massage size.
1314  *
1315  * @param dev
1316  *   Pointer to Ethernet device.
1317  * @param idx
1318  *   RX queue index.
1319  * @param max_lro_size
1320  *   The maximum size for LRO packet.
1321  */
1322 static void
1323 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1324                              uint32_t max_lro_size)
1325 {
1326         struct mlx5_priv *priv = dev->data->dev_private;
1327
1328         if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1329             MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1330             MLX5_MAX_TCP_HDR_OFFSET)
1331                 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1332         max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1333         MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1334         max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1335         if (priv->max_lro_msg_size)
1336                 priv->max_lro_msg_size =
1337                         RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1338         else
1339                 priv->max_lro_msg_size = max_lro_size;
1340         DRV_LOG(DEBUG,
1341                 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1342                 dev->data->port_id, idx,
1343                 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1344 }
1345
1346 /**
1347  * Create a DPDK Rx queue.
1348  *
1349  * @param dev
1350  *   Pointer to Ethernet device.
1351  * @param idx
1352  *   RX queue index.
1353  * @param desc
1354  *   Number of descriptors to configure in queue.
1355  * @param socket
1356  *   NUMA socket on which memory must be allocated.
1357  *
1358  * @return
1359  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1360  */
1361 struct mlx5_rxq_ctrl *
1362 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1363              unsigned int socket, const struct rte_eth_rxconf *conf,
1364              struct rte_mempool *mp)
1365 {
1366         struct mlx5_priv *priv = dev->data->dev_private;
1367         struct mlx5_rxq_ctrl *tmpl;
1368         unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1369         unsigned int mprq_stride_nums;
1370         unsigned int mprq_stride_size;
1371         unsigned int mprq_stride_cap;
1372         struct mlx5_dev_config *config = &priv->config;
1373         /*
1374          * Always allocate extra slots, even if eventually
1375          * the vector Rx will not be used.
1376          */
1377         uint16_t desc_n =
1378                 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1379         uint64_t offloads = conf->offloads |
1380                            dev->data->dev_conf.rxmode.offloads;
1381         unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1382         const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1383         unsigned int max_rx_pkt_len = lro_on_queue ?
1384                         dev->data->dev_conf.rxmode.max_lro_pkt_size :
1385                         dev->data->dev_conf.rxmode.max_rx_pkt_len;
1386         unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1387                                                         RTE_PKTMBUF_HEADROOM;
1388         unsigned int max_lro_size = 0;
1389         unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1390
1391         if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1392                                                     DEV_RX_OFFLOAD_SCATTER)) {
1393                 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1394                         " configured and no enough mbuf space(%u) to contain "
1395                         "the maximum RX packet length(%u) with head-room(%u)",
1396                         dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1397                         RTE_PKTMBUF_HEADROOM);
1398                 rte_errno = ENOSPC;
1399                 return NULL;
1400         }
1401         tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1402                            desc_n * sizeof(struct rte_mbuf *), 0, socket);
1403         if (!tmpl) {
1404                 rte_errno = ENOMEM;
1405                 return NULL;
1406         }
1407         tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1408         if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1409                                MLX5_MR_BTREE_CACHE_N, socket)) {
1410                 /* rte_errno is already set. */
1411                 goto error;
1412         }
1413         tmpl->socket = socket;
1414         if (dev->data->dev_conf.intr_conf.rxq)
1415                 tmpl->irq = 1;
1416         mprq_stride_nums = config->mprq.stride_num_n ?
1417                 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1418         mprq_stride_size = non_scatter_min_mbuf_size <=
1419                 (1U << config->mprq.max_stride_size_n) ?
1420                 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1421         mprq_stride_cap = (config->mprq.stride_num_n ?
1422                 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1423                         (config->mprq.stride_size_n ?
1424                 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1425         /*
1426          * This Rx queue can be configured as a Multi-Packet RQ if all of the
1427          * following conditions are met:
1428          *  - MPRQ is enabled.
1429          *  - The number of descs is more than the number of strides.
1430          *  - max_rx_pkt_len plus overhead is less than the max size
1431          *    of a stride or mprq_stride_size is specified by a user.
1432          *    Need to nake sure that there are enough stides to encap
1433          *    the maximum packet size in case mprq_stride_size is set.
1434          *  Otherwise, enable Rx scatter if necessary.
1435          */
1436         if (mprq_en && desc > (1U << mprq_stride_nums) &&
1437             (non_scatter_min_mbuf_size <=
1438              (1U << config->mprq.max_stride_size_n) ||
1439              (config->mprq.stride_size_n &&
1440               non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1441                 /* TODO: Rx scatter isn't supported yet. */
1442                 tmpl->rxq.sges_n = 0;
1443                 /* Trim the number of descs needed. */
1444                 desc >>= mprq_stride_nums;
1445                 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1446                         config->mprq.stride_num_n : mprq_stride_nums;
1447                 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1448                         config->mprq.stride_size_n : mprq_stride_size;
1449                 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1450                 tmpl->rxq.strd_scatter_en =
1451                                 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1452                 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1453                                 config->mprq.max_memcpy_len);
1454                 max_lro_size = RTE_MIN(max_rx_pkt_len,
1455                                        (1u << tmpl->rxq.strd_num_n) *
1456                                        (1u << tmpl->rxq.strd_sz_n));
1457                 DRV_LOG(DEBUG,
1458                         "port %u Rx queue %u: Multi-Packet RQ is enabled"
1459                         " strd_num_n = %u, strd_sz_n = %u",
1460                         dev->data->port_id, idx,
1461                         tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1462         } else if (max_rx_pkt_len <= first_mb_free_size) {
1463                 tmpl->rxq.sges_n = 0;
1464                 max_lro_size = max_rx_pkt_len;
1465         } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1466                 unsigned int size = non_scatter_min_mbuf_size;
1467                 unsigned int sges_n;
1468
1469                 if (lro_on_queue && first_mb_free_size <
1470                     MLX5_MAX_LRO_HEADER_FIX) {
1471                         DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1472                                 " to include the max header size(%u) for LRO",
1473                                 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1474                         rte_errno = ENOTSUP;
1475                         goto error;
1476                 }
1477                 /*
1478                  * Determine the number of SGEs needed for a full packet
1479                  * and round it to the next power of two.
1480                  */
1481                 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1482                 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1483                         DRV_LOG(ERR,
1484                                 "port %u too many SGEs (%u) needed to handle"
1485                                 " requested maximum packet size %u, the maximum"
1486                                 " supported are %u", dev->data->port_id,
1487                                 1 << sges_n, max_rx_pkt_len,
1488                                 1u << MLX5_MAX_LOG_RQ_SEGS);
1489                         rte_errno = ENOTSUP;
1490                         goto error;
1491                 }
1492                 tmpl->rxq.sges_n = sges_n;
1493                 max_lro_size = max_rx_pkt_len;
1494         }
1495         if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1496                 DRV_LOG(WARNING,
1497                         "port %u MPRQ is requested but cannot be enabled\n"
1498                         " (requested: pkt_sz = %u, desc_num = %u,"
1499                         " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1500                         "  supported: min_rxqs_num = %u,"
1501                         " min_stride_sz = %u, max_stride_sz = %u).",
1502                         dev->data->port_id, non_scatter_min_mbuf_size,
1503                         desc, priv->rxqs_n,
1504                         config->mprq.stride_size_n ?
1505                                 (1U << config->mprq.stride_size_n) :
1506                                 (1U << mprq_stride_size),
1507                         config->mprq.stride_num_n ?
1508                                 (1U << config->mprq.stride_num_n) :
1509                                 (1U << mprq_stride_nums),
1510                         config->mprq.min_rxqs_num,
1511                         (1U << config->mprq.min_stride_size_n),
1512                         (1U << config->mprq.max_stride_size_n));
1513         DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1514                 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1515         if (desc % (1 << tmpl->rxq.sges_n)) {
1516                 DRV_LOG(ERR,
1517                         "port %u number of Rx queue descriptors (%u) is not a"
1518                         " multiple of SGEs per packet (%u)",
1519                         dev->data->port_id,
1520                         desc,
1521                         1 << tmpl->rxq.sges_n);
1522                 rte_errno = EINVAL;
1523                 goto error;
1524         }
1525         mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1526         /* Toggle RX checksum offload if hardware supports it. */
1527         tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1528         tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1529         /* Configure VLAN stripping. */
1530         tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1531         /* By default, FCS (CRC) is stripped by hardware. */
1532         tmpl->rxq.crc_present = 0;
1533         tmpl->rxq.lro = lro_on_queue;
1534         if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1535                 if (config->hw_fcs_strip) {
1536                         /*
1537                          * RQs used for LRO-enabled TIRs should not be
1538                          * configured to scatter the FCS.
1539                          */
1540                         if (lro_on_queue)
1541                                 DRV_LOG(WARNING,
1542                                         "port %u CRC stripping has been "
1543                                         "disabled but will still be performed "
1544                                         "by hardware, because LRO is enabled",
1545                                         dev->data->port_id);
1546                         else
1547                                 tmpl->rxq.crc_present = 1;
1548                 } else {
1549                         DRV_LOG(WARNING,
1550                                 "port %u CRC stripping has been disabled but will"
1551                                 " still be performed by hardware, make sure MLNX_OFED"
1552                                 " and firmware are up to date",
1553                                 dev->data->port_id);
1554                 }
1555         }
1556         DRV_LOG(DEBUG,
1557                 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1558                 " incoming frames to hide it",
1559                 dev->data->port_id,
1560                 tmpl->rxq.crc_present ? "disabled" : "enabled",
1561                 tmpl->rxq.crc_present << 2);
1562         /* Save port ID. */
1563         tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1564                 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1565         tmpl->rxq.port_id = dev->data->port_id;
1566         tmpl->priv = priv;
1567         tmpl->rxq.mp = mp;
1568         tmpl->rxq.elts_n = log2above(desc);
1569         tmpl->rxq.rq_repl_thresh =
1570                 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1571         tmpl->rxq.elts =
1572                 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1573 #ifndef RTE_ARCH_64
1574         tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1575 #endif
1576         tmpl->rxq.idx = idx;
1577         rte_atomic32_inc(&tmpl->refcnt);
1578         LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1579         return tmpl;
1580 error:
1581         mlx5_free(tmpl);
1582         return NULL;
1583 }
1584
1585 /**
1586  * Create a DPDK Rx hairpin queue.
1587  *
1588  * @param dev
1589  *   Pointer to Ethernet device.
1590  * @param idx
1591  *   RX queue index.
1592  * @param desc
1593  *   Number of descriptors to configure in queue.
1594  * @param hairpin_conf
1595  *   The hairpin binding configuration.
1596  *
1597  * @return
1598  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1599  */
1600 struct mlx5_rxq_ctrl *
1601 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1602                      const struct rte_eth_hairpin_conf *hairpin_conf)
1603 {
1604         struct mlx5_priv *priv = dev->data->dev_private;
1605         struct mlx5_rxq_ctrl *tmpl;
1606
1607         tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1608                            SOCKET_ID_ANY);
1609         if (!tmpl) {
1610                 rte_errno = ENOMEM;
1611                 return NULL;
1612         }
1613         tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1614         tmpl->socket = SOCKET_ID_ANY;
1615         tmpl->rxq.rss_hash = 0;
1616         tmpl->rxq.port_id = dev->data->port_id;
1617         tmpl->priv = priv;
1618         tmpl->rxq.mp = NULL;
1619         tmpl->rxq.elts_n = log2above(desc);
1620         tmpl->rxq.elts = NULL;
1621         tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1622         tmpl->hairpin_conf = *hairpin_conf;
1623         tmpl->rxq.idx = idx;
1624         rte_atomic32_inc(&tmpl->refcnt);
1625         LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1626         return tmpl;
1627 }
1628
1629 /**
1630  * Get a Rx queue.
1631  *
1632  * @param dev
1633  *   Pointer to Ethernet device.
1634  * @param idx
1635  *   RX queue index.
1636  *
1637  * @return
1638  *   A pointer to the queue if it exists, NULL otherwise.
1639  */
1640 struct mlx5_rxq_ctrl *
1641 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1642 {
1643         struct mlx5_priv *priv = dev->data->dev_private;
1644         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1645         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1646
1647         if (rxq_data) {
1648                 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1649                 rte_atomic32_inc(&rxq_ctrl->refcnt);
1650         }
1651         return rxq_ctrl;
1652 }
1653
1654 /**
1655  * Release a Rx queue.
1656  *
1657  * @param dev
1658  *   Pointer to Ethernet device.
1659  * @param idx
1660  *   RX queue index.
1661  *
1662  * @return
1663  *   1 while a reference on it exists, 0 when freed.
1664  */
1665 int
1666 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1667 {
1668         struct mlx5_priv *priv = dev->data->dev_private;
1669         struct mlx5_rxq_ctrl *rxq_ctrl;
1670
1671         if (!(*priv->rxqs)[idx])
1672                 return 0;
1673         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1674         if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
1675                 return 1;
1676         if (rxq_ctrl->obj) {
1677                 priv->obj_ops->rxq_obj_release(rxq_ctrl->obj);
1678                 rxq_ctrl->obj = NULL;
1679         }
1680         if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1681                 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1682         LIST_REMOVE(rxq_ctrl, next);
1683         mlx5_free(rxq_ctrl);
1684         (*priv->rxqs)[idx] = NULL;
1685         return 0;
1686 }
1687
1688 /**
1689  * Verify the Rx Queue list is empty
1690  *
1691  * @param dev
1692  *   Pointer to Ethernet device.
1693  *
1694  * @return
1695  *   The number of object not released.
1696  */
1697 int
1698 mlx5_rxq_verify(struct rte_eth_dev *dev)
1699 {
1700         struct mlx5_priv *priv = dev->data->dev_private;
1701         struct mlx5_rxq_ctrl *rxq_ctrl;
1702         int ret = 0;
1703
1704         LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1705                 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1706                         dev->data->port_id, rxq_ctrl->rxq.idx);
1707                 ++ret;
1708         }
1709         return ret;
1710 }
1711
1712 /**
1713  * Get a Rx queue type.
1714  *
1715  * @param dev
1716  *   Pointer to Ethernet device.
1717  * @param idx
1718  *   Rx queue index.
1719  *
1720  * @return
1721  *   The Rx queue type.
1722  */
1723 enum mlx5_rxq_type
1724 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1725 {
1726         struct mlx5_priv *priv = dev->data->dev_private;
1727         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1728
1729         if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1730                 rxq_ctrl = container_of((*priv->rxqs)[idx],
1731                                         struct mlx5_rxq_ctrl,
1732                                         rxq);
1733                 return rxq_ctrl->type;
1734         }
1735         return MLX5_RXQ_TYPE_UNDEFINED;
1736 }
1737
1738 /**
1739  * Create an indirection table.
1740  *
1741  * @param dev
1742  *   Pointer to Ethernet device.
1743  * @param queues
1744  *   Queues entering in the indirection table.
1745  * @param queues_n
1746  *   Number of queues in the array.
1747  *
1748  * @return
1749  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1750  */
1751 static struct mlx5_ind_table_obj *
1752 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1753                        uint32_t queues_n, enum mlx5_ind_tbl_type type)
1754 {
1755         struct mlx5_priv *priv = dev->data->dev_private;
1756         struct mlx5_ind_table_obj *ind_tbl;
1757         unsigned int i = 0, j = 0, k = 0;
1758
1759         ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
1760                               queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
1761         if (!ind_tbl) {
1762                 rte_errno = ENOMEM;
1763                 return NULL;
1764         }
1765         ind_tbl->type = type;
1766         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1767                 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1768                         log2above(queues_n) :
1769                         log2above(priv->config.ind_table_max_size);
1770                 struct ibv_wq *wq[1 << wq_n];
1771
1772                 for (i = 0; i != queues_n; ++i) {
1773                         struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1774                                                                  queues[i]);
1775                         if (!rxq)
1776                                 goto error;
1777                         wq[i] = rxq->obj->wq;
1778                         ind_tbl->queues[i] = queues[i];
1779                 }
1780                 ind_tbl->queues_n = queues_n;
1781                 /* Finalise indirection table. */
1782                 k = i; /* Retain value of i for use in error case. */
1783                 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
1784                         wq[k] = wq[j];
1785                 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1786                         (priv->sh->ctx,
1787                          &(struct ibv_rwq_ind_table_init_attr){
1788                                 .log_ind_tbl_size = wq_n,
1789                                 .ind_tbl = wq,
1790                                 .comp_mask = 0,
1791                         });
1792                 if (!ind_tbl->ind_table) {
1793                         rte_errno = errno;
1794                         goto error;
1795                 }
1796         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
1797                 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
1798                 const unsigned int rqt_n =
1799                         1 << (rte_is_power_of_2(queues_n) ?
1800                               log2above(queues_n) :
1801                               log2above(priv->config.ind_table_max_size));
1802
1803                 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
1804                                       rqt_n * sizeof(uint32_t), 0,
1805                                       SOCKET_ID_ANY);
1806                 if (!rqt_attr) {
1807                         DRV_LOG(ERR, "port %u cannot allocate RQT resources",
1808                                 dev->data->port_id);
1809                         rte_errno = ENOMEM;
1810                         goto error;
1811                 }
1812                 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
1813                 rqt_attr->rqt_actual_size = rqt_n;
1814                 for (i = 0; i != queues_n; ++i) {
1815                         struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1816                                                                  queues[i]);
1817                         if (!rxq)
1818                                 goto error;
1819                         rqt_attr->rq_list[i] = rxq->obj->rq->id;
1820                         ind_tbl->queues[i] = queues[i];
1821                 }
1822                 k = i; /* Retain value of i for use in error case. */
1823                 for (j = 0; k != rqt_n; ++k, ++j)
1824                         rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
1825                 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
1826                                                         rqt_attr);
1827                 mlx5_free(rqt_attr);
1828                 if (!ind_tbl->rqt) {
1829                         DRV_LOG(ERR, "port %u cannot create DevX RQT",
1830                                 dev->data->port_id);
1831                         rte_errno = errno;
1832                         goto error;
1833                 }
1834                 ind_tbl->queues_n = queues_n;
1835         }
1836         rte_atomic32_inc(&ind_tbl->refcnt);
1837         LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1838         return ind_tbl;
1839 error:
1840         for (j = 0; j < i; j++)
1841                 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1842         mlx5_free(ind_tbl);
1843         DEBUG("port %u cannot create indirection table", dev->data->port_id);
1844         return NULL;
1845 }
1846
1847 /**
1848  * Get an indirection table.
1849  *
1850  * @param dev
1851  *   Pointer to Ethernet device.
1852  * @param queues
1853  *   Queues entering in the indirection table.
1854  * @param queues_n
1855  *   Number of queues in the array.
1856  *
1857  * @return
1858  *   An indirection table if found.
1859  */
1860 static struct mlx5_ind_table_obj *
1861 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1862                        uint32_t queues_n)
1863 {
1864         struct mlx5_priv *priv = dev->data->dev_private;
1865         struct mlx5_ind_table_obj *ind_tbl;
1866
1867         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1868                 if ((ind_tbl->queues_n == queues_n) &&
1869                     (memcmp(ind_tbl->queues, queues,
1870                             ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1871                      == 0))
1872                         break;
1873         }
1874         if (ind_tbl) {
1875                 unsigned int i;
1876
1877                 rte_atomic32_inc(&ind_tbl->refcnt);
1878                 for (i = 0; i != ind_tbl->queues_n; ++i)
1879                         mlx5_rxq_get(dev, ind_tbl->queues[i]);
1880         }
1881         return ind_tbl;
1882 }
1883
1884 /**
1885  * Release an indirection table.
1886  *
1887  * @param dev
1888  *   Pointer to Ethernet device.
1889  * @param ind_table
1890  *   Indirection table to release.
1891  *
1892  * @return
1893  *   1 while a reference on it exists, 0 when freed.
1894  */
1895 static int
1896 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1897                            struct mlx5_ind_table_obj *ind_tbl)
1898 {
1899         unsigned int i;
1900
1901         if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
1902                 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
1903                         claim_zero(mlx5_glue->destroy_rwq_ind_table
1904                                                         (ind_tbl->ind_table));
1905                 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
1906                         claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
1907         }
1908         for (i = 0; i != ind_tbl->queues_n; ++i)
1909                 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1910         if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1911                 LIST_REMOVE(ind_tbl, next);
1912                 mlx5_free(ind_tbl);
1913                 return 0;
1914         }
1915         return 1;
1916 }
1917
1918 /**
1919  * Verify the Rx Queue list is empty
1920  *
1921  * @param dev
1922  *   Pointer to Ethernet device.
1923  *
1924  * @return
1925  *   The number of object not released.
1926  */
1927 int
1928 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1929 {
1930         struct mlx5_priv *priv = dev->data->dev_private;
1931         struct mlx5_ind_table_obj *ind_tbl;
1932         int ret = 0;
1933
1934         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1935                 DRV_LOG(DEBUG,
1936                         "port %u indirection table obj %p still referenced",
1937                         dev->data->port_id, (void *)ind_tbl);
1938                 ++ret;
1939         }
1940         return ret;
1941 }
1942
1943 /**
1944  * Create an Rx Hash queue.
1945  *
1946  * @param dev
1947  *   Pointer to Ethernet device.
1948  * @param rss_key
1949  *   RSS key for the Rx hash queue.
1950  * @param rss_key_len
1951  *   RSS key length.
1952  * @param hash_fields
1953  *   Verbs protocol hash field to make the RSS on.
1954  * @param queues
1955  *   Queues entering in hash queue. In case of empty hash_fields only the
1956  *   first queue index will be taken for the indirection table.
1957  * @param queues_n
1958  *   Number of queues.
1959  * @param tunnel
1960  *   Tunnel type.
1961  *
1962  * @return
1963  *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
1964  */
1965 uint32_t
1966 mlx5_hrxq_new(struct rte_eth_dev *dev,
1967               const uint8_t *rss_key, uint32_t rss_key_len,
1968               uint64_t hash_fields,
1969               const uint16_t *queues, uint32_t queues_n,
1970               int tunnel __rte_unused)
1971 {
1972         struct mlx5_priv *priv = dev->data->dev_private;
1973         struct mlx5_hrxq *hrxq = NULL;
1974         uint32_t hrxq_idx = 0;
1975         struct ibv_qp *qp = NULL;
1976         struct mlx5_ind_table_obj *ind_tbl;
1977         int err;
1978         struct mlx5_devx_obj *tir = NULL;
1979         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
1980         struct mlx5_rxq_ctrl *rxq_ctrl =
1981                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1982
1983         queues_n = hash_fields ? queues_n : 1;
1984         ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
1985         if (!ind_tbl) {
1986                 enum mlx5_ind_tbl_type type;
1987
1988                 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
1989                                 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
1990                 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
1991         }
1992         if (!ind_tbl) {
1993                 rte_errno = ENOMEM;
1994                 return 0;
1995         }
1996         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1997 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1998                 struct mlx5dv_qp_init_attr qp_init_attr;
1999
2000                 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2001                 if (tunnel) {
2002                         qp_init_attr.comp_mask =
2003                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2004                         qp_init_attr.create_flags =
2005                                 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2006                 }
2007 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2008                 if (dev->data->dev_conf.lpbk_mode) {
2009                         /*
2010                          * Allow packet sent from NIC loop back
2011                          * w/o source MAC check.
2012                          */
2013                         qp_init_attr.comp_mask |=
2014                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2015                         qp_init_attr.create_flags |=
2016                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2017                 }
2018 #endif
2019                 qp = mlx5_glue->dv_create_qp
2020                         (priv->sh->ctx,
2021                          &(struct ibv_qp_init_attr_ex){
2022                                 .qp_type = IBV_QPT_RAW_PACKET,
2023                                 .comp_mask =
2024                                         IBV_QP_INIT_ATTR_PD |
2025                                         IBV_QP_INIT_ATTR_IND_TABLE |
2026                                         IBV_QP_INIT_ATTR_RX_HASH,
2027                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
2028                                         .rx_hash_function =
2029                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
2030                                         .rx_hash_key_len = rss_key_len,
2031                                         .rx_hash_key =
2032                                                 (void *)(uintptr_t)rss_key,
2033                                         .rx_hash_fields_mask = hash_fields,
2034                                 },
2035                                 .rwq_ind_tbl = ind_tbl->ind_table,
2036                                 .pd = priv->sh->pd,
2037                           },
2038                           &qp_init_attr);
2039 #else
2040                 qp = mlx5_glue->create_qp_ex
2041                         (priv->sh->ctx,
2042                          &(struct ibv_qp_init_attr_ex){
2043                                 .qp_type = IBV_QPT_RAW_PACKET,
2044                                 .comp_mask =
2045                                         IBV_QP_INIT_ATTR_PD |
2046                                         IBV_QP_INIT_ATTR_IND_TABLE |
2047                                         IBV_QP_INIT_ATTR_RX_HASH,
2048                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
2049                                         .rx_hash_function =
2050                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
2051                                         .rx_hash_key_len = rss_key_len,
2052                                         .rx_hash_key =
2053                                                 (void *)(uintptr_t)rss_key,
2054                                         .rx_hash_fields_mask = hash_fields,
2055                                 },
2056                                 .rwq_ind_tbl = ind_tbl->ind_table,
2057                                 .pd = priv->sh->pd,
2058                          });
2059 #endif
2060                 if (!qp) {
2061                         rte_errno = errno;
2062                         goto error;
2063                 }
2064         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2065                 struct mlx5_devx_tir_attr tir_attr;
2066                 uint32_t i;
2067                 uint32_t lro = 1;
2068
2069                 /* Enable TIR LRO only if all the queues were configured for. */
2070                 for (i = 0; i < queues_n; ++i) {
2071                         if (!(*priv->rxqs)[queues[i]]->lro) {
2072                                 lro = 0;
2073                                 break;
2074                         }
2075                 }
2076                 memset(&tir_attr, 0, sizeof(tir_attr));
2077                 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2078                 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2079                 tir_attr.tunneled_offload_en = !!tunnel;
2080                 /* If needed, translate hash_fields bitmap to PRM format. */
2081                 if (hash_fields) {
2082 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2083                         struct mlx5_rx_hash_field_select *rx_hash_field_select =
2084                                         hash_fields & IBV_RX_HASH_INNER ?
2085                                         &tir_attr.rx_hash_field_selector_inner :
2086                                         &tir_attr.rx_hash_field_selector_outer;
2087 #else
2088                         struct mlx5_rx_hash_field_select *rx_hash_field_select =
2089                                         &tir_attr.rx_hash_field_selector_outer;
2090 #endif
2091
2092                         /* 1 bit: 0: IPv4, 1: IPv6. */
2093                         rx_hash_field_select->l3_prot_type =
2094                                 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2095                         /* 1 bit: 0: TCP, 1: UDP. */
2096                         rx_hash_field_select->l4_prot_type =
2097                                 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2098                         /* Bitmask which sets which fields to use in RX Hash. */
2099                         rx_hash_field_select->selected_fields =
2100                         ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2101                          MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2102                         (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2103                          MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2104                         (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2105                          MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2106                         (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2107                          MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2108                 }
2109                 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2110                         tir_attr.transport_domain = priv->sh->td->id;
2111                 else
2112                         tir_attr.transport_domain = priv->sh->tdn;
2113                 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
2114                        MLX5_RSS_HASH_KEY_LEN);
2115                 tir_attr.indirect_table = ind_tbl->rqt->id;
2116                 if (dev->data->dev_conf.lpbk_mode)
2117                         tir_attr.self_lb_block =
2118                                         MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2119                 if (lro) {
2120                         tir_attr.lro_timeout_period_usecs =
2121                                         priv->config.lro.timeout;
2122                         tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2123                         tir_attr.lro_enable_mask =
2124                                         MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2125                                         MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2126                 }
2127                 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2128                 if (!tir) {
2129                         DRV_LOG(ERR, "port %u cannot create DevX TIR",
2130                                 dev->data->port_id);
2131                         rte_errno = errno;
2132                         goto error;
2133                 }
2134         }
2135         hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2136         if (!hrxq)
2137                 goto error;
2138         hrxq->ind_table = ind_tbl;
2139         if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2140                 hrxq->qp = qp;
2141 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2142                 hrxq->action =
2143                         mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2144                 if (!hrxq->action) {
2145                         rte_errno = errno;
2146                         goto error;
2147                 }
2148 #endif
2149         } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2150                 hrxq->tir = tir;
2151 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2152                 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2153                                                         (hrxq->tir->obj);
2154                 if (!hrxq->action) {
2155                         rte_errno = errno;
2156                         goto error;
2157                 }
2158 #endif
2159         }
2160         hrxq->rss_key_len = rss_key_len;
2161         hrxq->hash_fields = hash_fields;
2162         memcpy(hrxq->rss_key, rss_key, rss_key_len);
2163         rte_atomic32_inc(&hrxq->refcnt);
2164         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2165                      hrxq, next);
2166         return hrxq_idx;
2167 error:
2168         err = rte_errno; /* Save rte_errno before cleanup. */
2169         mlx5_ind_table_obj_release(dev, ind_tbl);
2170         if (qp)
2171                 claim_zero(mlx5_glue->destroy_qp(qp));
2172         else if (tir)
2173                 claim_zero(mlx5_devx_cmd_destroy(tir));
2174         if (hrxq)
2175                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2176         rte_errno = err; /* Restore rte_errno. */
2177         return 0;
2178 }
2179
2180 /**
2181  * Get an Rx Hash queue.
2182  *
2183  * @param dev
2184  *   Pointer to Ethernet device.
2185  * @param rss_conf
2186  *   RSS configuration for the Rx hash queue.
2187  * @param queues
2188  *   Queues entering in hash queue. In case of empty hash_fields only the
2189  *   first queue index will be taken for the indirection table.
2190  * @param queues_n
2191  *   Number of queues.
2192  *
2193  * @return
2194  *   An hash Rx queue index on success.
2195  */
2196 uint32_t
2197 mlx5_hrxq_get(struct rte_eth_dev *dev,
2198               const uint8_t *rss_key, uint32_t rss_key_len,
2199               uint64_t hash_fields,
2200               const uint16_t *queues, uint32_t queues_n)
2201 {
2202         struct mlx5_priv *priv = dev->data->dev_private;
2203         struct mlx5_hrxq *hrxq;
2204         uint32_t idx;
2205
2206         queues_n = hash_fields ? queues_n : 1;
2207         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2208                       hrxq, next) {
2209                 struct mlx5_ind_table_obj *ind_tbl;
2210
2211                 if (hrxq->rss_key_len != rss_key_len)
2212                         continue;
2213                 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2214                         continue;
2215                 if (hrxq->hash_fields != hash_fields)
2216                         continue;
2217                 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2218                 if (!ind_tbl)
2219                         continue;
2220                 if (ind_tbl != hrxq->ind_table) {
2221                         mlx5_ind_table_obj_release(dev, ind_tbl);
2222                         continue;
2223                 }
2224                 rte_atomic32_inc(&hrxq->refcnt);
2225                 return idx;
2226         }
2227         return 0;
2228 }
2229
2230 /**
2231  * Release the hash Rx queue.
2232  *
2233  * @param dev
2234  *   Pointer to Ethernet device.
2235  * @param hrxq
2236  *   Index to Hash Rx queue to release.
2237  *
2238  * @return
2239  *   1 while a reference on it exists, 0 when freed.
2240  */
2241 int
2242 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2243 {
2244         struct mlx5_priv *priv = dev->data->dev_private;
2245         struct mlx5_hrxq *hrxq;
2246
2247         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2248         if (!hrxq)
2249                 return 0;
2250         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2251 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2252                 mlx5_glue->destroy_flow_action(hrxq->action);
2253 #endif
2254                 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2255                         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2256                 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2257                         claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2258                 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2259                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2260                              hrxq_idx, hrxq, next);
2261                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2262                 return 0;
2263         }
2264         claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2265         return 1;
2266 }
2267
2268 /**
2269  * Verify the Rx Queue list is empty
2270  *
2271  * @param dev
2272  *   Pointer to Ethernet device.
2273  *
2274  * @return
2275  *   The number of object not released.
2276  */
2277 int
2278 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2279 {
2280         struct mlx5_priv *priv = dev->data->dev_private;
2281         struct mlx5_hrxq *hrxq;
2282         uint32_t idx;
2283         int ret = 0;
2284
2285         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2286                       hrxq, next) {
2287                 DRV_LOG(DEBUG,
2288                         "port %u hash Rx queue %p still referenced",
2289                         dev->data->port_id, (void *)hrxq);
2290                 ++ret;
2291         }
2292         return ret;
2293 }
2294
2295 /**
2296  * Create a drop Rx queue Verbs/DevX object.
2297  *
2298  * @param dev
2299  *   Pointer to Ethernet device.
2300  *
2301  * @return
2302  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2303  */
2304 static struct mlx5_rxq_obj *
2305 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2306 {
2307         struct mlx5_priv *priv = dev->data->dev_private;
2308         struct ibv_context *ctx = priv->sh->ctx;
2309         struct ibv_cq *cq;
2310         struct ibv_wq *wq = NULL;
2311         struct mlx5_rxq_obj *rxq;
2312
2313         if (priv->drop_queue.rxq)
2314                 return priv->drop_queue.rxq;
2315         cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2316         if (!cq) {
2317                 DEBUG("port %u cannot allocate CQ for drop queue",
2318                       dev->data->port_id);
2319                 rte_errno = errno;
2320                 goto error;
2321         }
2322         wq = mlx5_glue->create_wq(ctx,
2323                  &(struct ibv_wq_init_attr){
2324                         .wq_type = IBV_WQT_RQ,
2325                         .max_wr = 1,
2326                         .max_sge = 1,
2327                         .pd = priv->sh->pd,
2328                         .cq = cq,
2329                  });
2330         if (!wq) {
2331                 DEBUG("port %u cannot allocate WQ for drop queue",
2332                       dev->data->port_id);
2333                 rte_errno = errno;
2334                 goto error;
2335         }
2336         rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
2337         if (!rxq) {
2338                 DEBUG("port %u cannot allocate drop Rx queue memory",
2339                       dev->data->port_id);
2340                 rte_errno = ENOMEM;
2341                 goto error;
2342         }
2343         rxq->ibv_cq = cq;
2344         rxq->wq = wq;
2345         priv->drop_queue.rxq = rxq;
2346         return rxq;
2347 error:
2348         if (wq)
2349                 claim_zero(mlx5_glue->destroy_wq(wq));
2350         if (cq)
2351                 claim_zero(mlx5_glue->destroy_cq(cq));
2352         return NULL;
2353 }
2354
2355 /**
2356  * Release a drop Rx queue Verbs/DevX object.
2357  *
2358  * @param dev
2359  *   Pointer to Ethernet device.
2360  *
2361  * @return
2362  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2363  */
2364 static void
2365 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2366 {
2367         struct mlx5_priv *priv = dev->data->dev_private;
2368         struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2369
2370         if (rxq->wq)
2371                 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2372         if (rxq->ibv_cq)
2373                 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
2374         mlx5_free(rxq);
2375         priv->drop_queue.rxq = NULL;
2376 }
2377
2378 /**
2379  * Create a drop indirection table.
2380  *
2381  * @param dev
2382  *   Pointer to Ethernet device.
2383  *
2384  * @return
2385  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2386  */
2387 static struct mlx5_ind_table_obj *
2388 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2389 {
2390         struct mlx5_priv *priv = dev->data->dev_private;
2391         struct mlx5_ind_table_obj *ind_tbl;
2392         struct mlx5_rxq_obj *rxq;
2393         struct mlx5_ind_table_obj tmpl;
2394
2395         rxq = mlx5_rxq_obj_drop_new(dev);
2396         if (!rxq)
2397                 return NULL;
2398         tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2399                 (priv->sh->ctx,
2400                  &(struct ibv_rwq_ind_table_init_attr){
2401                         .log_ind_tbl_size = 0,
2402                         .ind_tbl = (struct ibv_wq **)&rxq->wq,
2403                         .comp_mask = 0,
2404                  });
2405         if (!tmpl.ind_table) {
2406                 DEBUG("port %u cannot allocate indirection table for drop"
2407                       " queue",
2408                       dev->data->port_id);
2409                 rte_errno = errno;
2410                 goto error;
2411         }
2412         ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
2413                               SOCKET_ID_ANY);
2414         if (!ind_tbl) {
2415                 rte_errno = ENOMEM;
2416                 goto error;
2417         }
2418         ind_tbl->ind_table = tmpl.ind_table;
2419         return ind_tbl;
2420 error:
2421         mlx5_rxq_obj_drop_release(dev);
2422         return NULL;
2423 }
2424
2425 /**
2426  * Release a drop indirection table.
2427  *
2428  * @param dev
2429  *   Pointer to Ethernet device.
2430  */
2431 static void
2432 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2433 {
2434         struct mlx5_priv *priv = dev->data->dev_private;
2435         struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2436
2437         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2438         mlx5_rxq_obj_drop_release(dev);
2439         mlx5_free(ind_tbl);
2440         priv->drop_queue.hrxq->ind_table = NULL;
2441 }
2442
2443 /**
2444  * Create a drop Rx Hash queue.
2445  *
2446  * @param dev
2447  *   Pointer to Ethernet device.
2448  *
2449  * @return
2450  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2451  */
2452 struct mlx5_hrxq *
2453 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2454 {
2455         struct mlx5_priv *priv = dev->data->dev_private;
2456         struct mlx5_ind_table_obj *ind_tbl = NULL;
2457         struct ibv_qp *qp = NULL;
2458         struct mlx5_hrxq *hrxq = NULL;
2459
2460         if (priv->drop_queue.hrxq) {
2461                 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2462                 return priv->drop_queue.hrxq;
2463         }
2464         hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2465         if (!hrxq) {
2466                 DRV_LOG(WARNING,
2467                         "port %u cannot allocate memory for drop queue",
2468                         dev->data->port_id);
2469                 rte_errno = ENOMEM;
2470                 goto error;
2471         }
2472         priv->drop_queue.hrxq = hrxq;
2473         ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2474         if (!ind_tbl)
2475                 goto error;
2476         hrxq->ind_table = ind_tbl;
2477         qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2478                  &(struct ibv_qp_init_attr_ex){
2479                         .qp_type = IBV_QPT_RAW_PACKET,
2480                         .comp_mask =
2481                                 IBV_QP_INIT_ATTR_PD |
2482                                 IBV_QP_INIT_ATTR_IND_TABLE |
2483                                 IBV_QP_INIT_ATTR_RX_HASH,
2484                         .rx_hash_conf = (struct ibv_rx_hash_conf){
2485                                 .rx_hash_function =
2486                                         IBV_RX_HASH_FUNC_TOEPLITZ,
2487                                 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2488                                 .rx_hash_key = rss_hash_default_key,
2489                                 .rx_hash_fields_mask = 0,
2490                                 },
2491                         .rwq_ind_tbl = ind_tbl->ind_table,
2492                         .pd = priv->sh->pd
2493                  });
2494         if (!qp) {
2495                 DEBUG("port %u cannot allocate QP for drop queue",
2496                       dev->data->port_id);
2497                 rte_errno = errno;
2498                 goto error;
2499         }
2500         hrxq->qp = qp;
2501 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2502         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2503         if (!hrxq->action) {
2504                 rte_errno = errno;
2505                 goto error;
2506         }
2507 #endif
2508         rte_atomic32_set(&hrxq->refcnt, 1);
2509         return hrxq;
2510 error:
2511 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2512         if (hrxq && hrxq->action)
2513                 mlx5_glue->destroy_flow_action(hrxq->action);
2514 #endif
2515         if (qp)
2516                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2517         if (ind_tbl)
2518                 mlx5_ind_table_obj_drop_release(dev);
2519         if (hrxq) {
2520                 priv->drop_queue.hrxq = NULL;
2521                 mlx5_free(hrxq);
2522         }
2523         return NULL;
2524 }
2525
2526 /**
2527  * Release a drop hash Rx queue.
2528  *
2529  * @param dev
2530  *   Pointer to Ethernet device.
2531  */
2532 void
2533 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2534 {
2535         struct mlx5_priv *priv = dev->data->dev_private;
2536         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2537
2538         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2539 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2540                 mlx5_glue->destroy_flow_action(hrxq->action);
2541 #endif
2542                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2543                 mlx5_ind_table_obj_drop_release(dev);
2544                 mlx5_free(hrxq);
2545                 priv->drop_queue.hrxq = NULL;
2546         }
2547 }
2548
2549
2550 /**
2551  * Set the Rx queue timestamp conversion parameters
2552  *
2553  * @param[in] dev
2554  *   Pointer to the Ethernet device structure.
2555  */
2556 void
2557 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2558 {
2559         struct mlx5_priv *priv = dev->data->dev_private;
2560         struct mlx5_dev_ctx_shared *sh = priv->sh;
2561         struct mlx5_rxq_data *data;
2562         unsigned int i;
2563
2564         for (i = 0; i != priv->rxqs_n; ++i) {
2565                 if (!(*priv->rxqs)[i])
2566                         continue;
2567                 data = (*priv->rxqs)[i];
2568                 data->sh = sh;
2569                 data->rt_timestamp = priv->config.rt_timestamp;
2570         }
2571 }