ethdev: new Rx/Tx offloads API
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <fcntl.h>
12 #include <sys/queue.h>
13
14 /* Verbs header. */
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic ignored "-Wpedantic"
18 #endif
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
21 #ifdef PEDANTIC
22 #pragma GCC diagnostic error "-Wpedantic"
23 #endif
24
25 #include <rte_mbuf.h>
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
31 #include <rte_io.h>
32
33 #include "mlx5.h"
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
39
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42         0x2c, 0xc6, 0x81, 0xd1,
43         0x5b, 0xdb, 0xf4, 0xf7,
44         0xfc, 0xa2, 0x83, 0x19,
45         0xdb, 0x1a, 0x3e, 0x94,
46         0x6b, 0x9e, 0x38, 0xd9,
47         0x2c, 0x9c, 0x03, 0xd1,
48         0xad, 0x99, 0x44, 0xa7,
49         0xd9, 0x56, 0x3d, 0x59,
50         0x06, 0x3c, 0x25, 0xf3,
51         0xfc, 0x1f, 0xdc, 0x2a,
52 };
53
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
56
57 /**
58  * Allocate RX queue elements.
59  *
60  * @param rxq_ctrl
61  *   Pointer to RX queue structure.
62  *
63  * @return
64  *   0 on success, a negative errno value otherwise and rte_errno is set.
65  */
66 int
67 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
68 {
69         const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
70         unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
71         unsigned int i;
72         int err;
73
74         /* Iterate on segments. */
75         for (i = 0; (i != elts_n); ++i) {
76                 struct rte_mbuf *buf;
77
78                 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
79                 if (buf == NULL) {
80                         DRV_LOG(ERR, "port %u empty mbuf pool",
81                                 PORT_ID(rxq_ctrl->priv));
82                         rte_errno = ENOMEM;
83                         goto error;
84                 }
85                 /* Headroom is reserved by rte_pktmbuf_alloc(). */
86                 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
87                 /* Buffer is supposed to be empty. */
88                 assert(rte_pktmbuf_data_len(buf) == 0);
89                 assert(rte_pktmbuf_pkt_len(buf) == 0);
90                 assert(!buf->next);
91                 /* Only the first segment keeps headroom. */
92                 if (i % sges_n)
93                         SET_DATA_OFF(buf, 0);
94                 PORT(buf) = rxq_ctrl->rxq.port_id;
95                 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
96                 PKT_LEN(buf) = DATA_LEN(buf);
97                 NB_SEGS(buf) = 1;
98                 (*rxq_ctrl->rxq.elts)[i] = buf;
99         }
100         /* If Rx vector is activated. */
101         if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
102                 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
103                 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
104                 int j;
105
106                 /* Initialize default rearm_data for vPMD. */
107                 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
108                 rte_mbuf_refcnt_set(mbuf_init, 1);
109                 mbuf_init->nb_segs = 1;
110                 mbuf_init->port = rxq->port_id;
111                 /*
112                  * prevent compiler reordering:
113                  * rearm_data covers previous fields.
114                  */
115                 rte_compiler_barrier();
116                 rxq->mbuf_initializer =
117                         *(uint64_t *)&mbuf_init->rearm_data;
118                 /* Padding with a fake mbuf for vectorized Rx. */
119                 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
120                         (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
121         }
122         DRV_LOG(DEBUG,
123                 "port %u Rx queue %u allocated and configured %u segments"
124                 " (max %u packets)",
125                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
126                 elts_n / (1 << rxq_ctrl->rxq.sges_n));
127         return 0;
128 error:
129         err = rte_errno; /* Save rte_errno before cleanup. */
130         elts_n = i;
131         for (i = 0; (i != elts_n); ++i) {
132                 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
133                         rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
134                 (*rxq_ctrl->rxq.elts)[i] = NULL;
135         }
136         DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
137                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
138         rte_errno = err; /* Restore rte_errno. */
139         return -rte_errno;
140 }
141
142 /**
143  * Free RX queue elements.
144  *
145  * @param rxq_ctrl
146  *   Pointer to RX queue structure.
147  */
148 static void
149 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
150 {
151         struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
152         const uint16_t q_n = (1 << rxq->elts_n);
153         const uint16_t q_mask = q_n - 1;
154         uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
155         uint16_t i;
156
157         DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
158                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
159         if (rxq->elts == NULL)
160                 return;
161         /**
162          * Some mbuf in the Ring belongs to the application.  They cannot be
163          * freed.
164          */
165         if (mlx5_rxq_check_vec_support(rxq) > 0) {
166                 for (i = 0; i < used; ++i)
167                         (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
168                 rxq->rq_pi = rxq->rq_ci;
169         }
170         for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
171                 if ((*rxq->elts)[i] != NULL)
172                         rte_pktmbuf_free_seg((*rxq->elts)[i]);
173                 (*rxq->elts)[i] = NULL;
174         }
175 }
176
177 /**
178  * Clean up a RX queue.
179  *
180  * Destroy objects, free allocated memory and reset the structure for reuse.
181  *
182  * @param rxq_ctrl
183  *   Pointer to RX queue structure.
184  */
185 void
186 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
187 {
188         DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
189                 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
190         if (rxq_ctrl->ibv)
191                 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
192         memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
193 }
194
195 /**
196  * Returns the per-queue supported offloads.
197  *
198  * @param dev
199  *   Pointer to Ethernet device.
200  *
201  * @return
202  *   Supported Rx offloads.
203  */
204 uint64_t
205 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
206 {
207         struct priv *priv = dev->data->dev_private;
208         struct mlx5_dev_config *config = &priv->config;
209         uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
210                              DEV_RX_OFFLOAD_TIMESTAMP |
211                              DEV_RX_OFFLOAD_JUMBO_FRAME);
212
213         if (config->hw_fcs_strip)
214                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
215         if (config->hw_csum)
216                 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
217                              DEV_RX_OFFLOAD_UDP_CKSUM |
218                              DEV_RX_OFFLOAD_TCP_CKSUM);
219         if (config->hw_vlan_strip)
220                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
221         return offloads;
222 }
223
224
225 /**
226  * Returns the per-port supported offloads.
227  *
228  * @return
229  *   Supported Rx offloads.
230  */
231 uint64_t
232 mlx5_get_rx_port_offloads(void)
233 {
234         uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
235
236         return offloads;
237 }
238
239 /**
240  *
241  * @param dev
242  *   Pointer to Ethernet device structure.
243  * @param idx
244  *   RX queue index.
245  * @param desc
246  *   Number of descriptors to configure in queue.
247  * @param socket
248  *   NUMA socket on which memory must be allocated.
249  * @param[in] conf
250  *   Thresholds parameters.
251  * @param mp
252  *   Memory pool for buffer allocations.
253  *
254  * @return
255  *   0 on success, a negative errno value otherwise and rte_errno is set.
256  */
257 int
258 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
259                     unsigned int socket, const struct rte_eth_rxconf *conf,
260                     struct rte_mempool *mp)
261 {
262         struct priv *priv = dev->data->dev_private;
263         struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
264         struct mlx5_rxq_ctrl *rxq_ctrl =
265                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
266
267         if (!rte_is_power_of_2(desc)) {
268                 desc = 1 << log2above(desc);
269                 DRV_LOG(WARNING,
270                         "port %u increased number of descriptors in Rx queue %u"
271                         " to the next power of two (%d)",
272                         dev->data->port_id, idx, desc);
273         }
274         DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
275                 dev->data->port_id, idx, desc);
276         if (idx >= priv->rxqs_n) {
277                 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
278                         dev->data->port_id, idx, priv->rxqs_n);
279                 rte_errno = EOVERFLOW;
280                 return -rte_errno;
281         }
282         if (!mlx5_rxq_releasable(dev, idx)) {
283                 DRV_LOG(ERR, "port %u unable to release queue index %u",
284                         dev->data->port_id, idx);
285                 rte_errno = EBUSY;
286                 return -rte_errno;
287         }
288         mlx5_rxq_release(dev, idx);
289         rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
290         if (!rxq_ctrl) {
291                 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
292                         dev->data->port_id, idx);
293                 rte_errno = ENOMEM;
294                 return -rte_errno;
295         }
296         DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
297                 dev->data->port_id, idx);
298         (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
299         return 0;
300 }
301
302 /**
303  * DPDK callback to release a RX queue.
304  *
305  * @param dpdk_rxq
306  *   Generic RX queue pointer.
307  */
308 void
309 mlx5_rx_queue_release(void *dpdk_rxq)
310 {
311         struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
312         struct mlx5_rxq_ctrl *rxq_ctrl;
313         struct priv *priv;
314
315         if (rxq == NULL)
316                 return;
317         rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
318         priv = rxq_ctrl->priv;
319         if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
320                 rte_panic("port %u Rx queue %u is still used by a flow and"
321                           " cannot be removed\n",
322                           PORT_ID(priv), rxq_ctrl->idx);
323         mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
324 }
325
326 /**
327  * Allocate queue vector and fill epoll fd list for Rx interrupts.
328  *
329  * @param dev
330  *   Pointer to Ethernet device.
331  *
332  * @return
333  *   0 on success, a negative errno value otherwise and rte_errno is set.
334  */
335 int
336 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
337 {
338         struct priv *priv = dev->data->dev_private;
339         unsigned int i;
340         unsigned int rxqs_n = priv->rxqs_n;
341         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
342         unsigned int count = 0;
343         struct rte_intr_handle *intr_handle = dev->intr_handle;
344
345         if (!dev->data->dev_conf.intr_conf.rxq)
346                 return 0;
347         mlx5_rx_intr_vec_disable(dev);
348         intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
349         if (intr_handle->intr_vec == NULL) {
350                 DRV_LOG(ERR,
351                         "port %u failed to allocate memory for interrupt"
352                         " vector, Rx interrupts will not be supported",
353                         dev->data->port_id);
354                 rte_errno = ENOMEM;
355                 return -rte_errno;
356         }
357         intr_handle->type = RTE_INTR_HANDLE_EXT;
358         for (i = 0; i != n; ++i) {
359                 /* This rxq ibv must not be released in this function. */
360                 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
361                 int fd;
362                 int flags;
363                 int rc;
364
365                 /* Skip queues that cannot request interrupts. */
366                 if (!rxq_ibv || !rxq_ibv->channel) {
367                         /* Use invalid intr_vec[] index to disable entry. */
368                         intr_handle->intr_vec[i] =
369                                 RTE_INTR_VEC_RXTX_OFFSET +
370                                 RTE_MAX_RXTX_INTR_VEC_ID;
371                         continue;
372                 }
373                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
374                         DRV_LOG(ERR,
375                                 "port %u too many Rx queues for interrupt"
376                                 " vector size (%d), Rx interrupts cannot be"
377                                 " enabled",
378                                 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
379                         mlx5_rx_intr_vec_disable(dev);
380                         rte_errno = ENOMEM;
381                         return -rte_errno;
382                 }
383                 fd = rxq_ibv->channel->fd;
384                 flags = fcntl(fd, F_GETFL);
385                 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
386                 if (rc < 0) {
387                         rte_errno = errno;
388                         DRV_LOG(ERR,
389                                 "port %u failed to make Rx interrupt file"
390                                 " descriptor %d non-blocking for queue index"
391                                 " %d",
392                                 dev->data->port_id, fd, i);
393                         mlx5_rx_intr_vec_disable(dev);
394                         return -rte_errno;
395                 }
396                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
397                 intr_handle->efds[count] = fd;
398                 count++;
399         }
400         if (!count)
401                 mlx5_rx_intr_vec_disable(dev);
402         else
403                 intr_handle->nb_efd = count;
404         return 0;
405 }
406
407 /**
408  * Clean up Rx interrupts handler.
409  *
410  * @param dev
411  *   Pointer to Ethernet device.
412  */
413 void
414 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
415 {
416         struct priv *priv = dev->data->dev_private;
417         struct rte_intr_handle *intr_handle = dev->intr_handle;
418         unsigned int i;
419         unsigned int rxqs_n = priv->rxqs_n;
420         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
421
422         if (!dev->data->dev_conf.intr_conf.rxq)
423                 return;
424         if (!intr_handle->intr_vec)
425                 goto free;
426         for (i = 0; i != n; ++i) {
427                 struct mlx5_rxq_ctrl *rxq_ctrl;
428                 struct mlx5_rxq_data *rxq_data;
429
430                 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
431                     RTE_MAX_RXTX_INTR_VEC_ID)
432                         continue;
433                 /**
434                  * Need to access directly the queue to release the reference
435                  * kept in priv_rx_intr_vec_enable().
436                  */
437                 rxq_data = (*priv->rxqs)[i];
438                 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
439                 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
440         }
441 free:
442         rte_intr_free_epoll_fd(intr_handle);
443         if (intr_handle->intr_vec)
444                 free(intr_handle->intr_vec);
445         intr_handle->nb_efd = 0;
446         intr_handle->intr_vec = NULL;
447 }
448
449 /**
450  *  MLX5 CQ notification .
451  *
452  *  @param rxq
453  *     Pointer to receive queue structure.
454  *  @param sq_n_rxq
455  *     Sequence number per receive queue .
456  */
457 static inline void
458 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
459 {
460         int sq_n = 0;
461         uint32_t doorbell_hi;
462         uint64_t doorbell;
463         void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
464
465         sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
466         doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
467         doorbell = (uint64_t)doorbell_hi << 32;
468         doorbell |=  rxq->cqn;
469         rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
470         rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
471 }
472
473 /**
474  * DPDK callback for Rx queue interrupt enable.
475  *
476  * @param dev
477  *   Pointer to Ethernet device structure.
478  * @param rx_queue_id
479  *   Rx queue number.
480  *
481  * @return
482  *   0 on success, a negative errno value otherwise and rte_errno is set.
483  */
484 int
485 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
486 {
487         struct priv *priv = dev->data->dev_private;
488         struct mlx5_rxq_data *rxq_data;
489         struct mlx5_rxq_ctrl *rxq_ctrl;
490
491         rxq_data = (*priv->rxqs)[rx_queue_id];
492         if (!rxq_data) {
493                 rte_errno = EINVAL;
494                 return -rte_errno;
495         }
496         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
497         if (rxq_ctrl->irq) {
498                 struct mlx5_rxq_ibv *rxq_ibv;
499
500                 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
501                 if (!rxq_ibv) {
502                         rte_errno = EINVAL;
503                         return -rte_errno;
504                 }
505                 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
506                 mlx5_rxq_ibv_release(rxq_ibv);
507         }
508         return 0;
509 }
510
511 /**
512  * DPDK callback for Rx queue interrupt disable.
513  *
514  * @param dev
515  *   Pointer to Ethernet device structure.
516  * @param rx_queue_id
517  *   Rx queue number.
518  *
519  * @return
520  *   0 on success, a negative errno value otherwise and rte_errno is set.
521  */
522 int
523 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
524 {
525         struct priv *priv = dev->data->dev_private;
526         struct mlx5_rxq_data *rxq_data;
527         struct mlx5_rxq_ctrl *rxq_ctrl;
528         struct mlx5_rxq_ibv *rxq_ibv = NULL;
529         struct ibv_cq *ev_cq;
530         void *ev_ctx;
531         int ret;
532
533         rxq_data = (*priv->rxqs)[rx_queue_id];
534         if (!rxq_data) {
535                 rte_errno = EINVAL;
536                 return -rte_errno;
537         }
538         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
539         if (!rxq_ctrl->irq)
540                 return 0;
541         rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
542         if (!rxq_ibv) {
543                 rte_errno = EINVAL;
544                 return -rte_errno;
545         }
546         ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
547         if (ret || ev_cq != rxq_ibv->cq) {
548                 rte_errno = EINVAL;
549                 goto exit;
550         }
551         rxq_data->cq_arm_sn++;
552         mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
553         return 0;
554 exit:
555         ret = rte_errno; /* Save rte_errno before cleanup. */
556         if (rxq_ibv)
557                 mlx5_rxq_ibv_release(rxq_ibv);
558         DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
559                 dev->data->port_id, rx_queue_id);
560         rte_errno = ret; /* Restore rte_errno. */
561         return -rte_errno;
562 }
563
564 /**
565  * Create the Rx queue Verbs object.
566  *
567  * @param dev
568  *   Pointer to Ethernet device.
569  * @param idx
570  *   Queue index in DPDK Rx queue array
571  *
572  * @return
573  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
574  */
575 struct mlx5_rxq_ibv *
576 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
577 {
578         struct priv *priv = dev->data->dev_private;
579         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
580         struct mlx5_rxq_ctrl *rxq_ctrl =
581                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
582         struct ibv_wq_attr mod;
583         union {
584                 struct {
585                         struct ibv_cq_init_attr_ex ibv;
586                         struct mlx5dv_cq_init_attr mlx5;
587                 } cq;
588                 struct ibv_wq_init_attr wq;
589                 struct ibv_cq_ex cq_attr;
590         } attr;
591         unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
592         struct mlx5_rxq_ibv *tmpl;
593         struct mlx5dv_cq cq_info;
594         struct mlx5dv_rwq rwq;
595         unsigned int i;
596         int ret = 0;
597         struct mlx5dv_obj obj;
598         struct mlx5_dev_config *config = &priv->config;
599
600         assert(rxq_data);
601         assert(!rxq_ctrl->ibv);
602         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
603         priv->verbs_alloc_ctx.obj = rxq_ctrl;
604         tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
605                                  rxq_ctrl->socket);
606         if (!tmpl) {
607                 DRV_LOG(ERR,
608                         "port %u Rx queue %u cannot allocate verbs resources",
609                         dev->data->port_id, rxq_ctrl->idx);
610                 rte_errno = ENOMEM;
611                 goto error;
612         }
613         tmpl->rxq_ctrl = rxq_ctrl;
614         /* Use the entire RX mempool as the memory region. */
615         tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
616         if (!tmpl->mr) {
617                 tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
618                 if (!tmpl->mr) {
619                         DRV_LOG(ERR, "port %u: memeroy region creation failure",
620                                 dev->data->port_id);
621                         goto error;
622                 }
623         }
624         if (rxq_ctrl->irq) {
625                 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
626                 if (!tmpl->channel) {
627                         DRV_LOG(ERR, "port %u: comp channel creation failure",
628                                 dev->data->port_id);
629                         rte_errno = ENOMEM;
630                         goto error;
631                 }
632         }
633         attr.cq.ibv = (struct ibv_cq_init_attr_ex){
634                 .cqe = cqe_n,
635                 .channel = tmpl->channel,
636                 .comp_mask = 0,
637         };
638         attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
639                 .comp_mask = 0,
640         };
641         if (config->cqe_comp && !rxq_data->hw_timestamp) {
642                 attr.cq.mlx5.comp_mask |=
643                         MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
644                 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
645                 /*
646                  * For vectorized Rx, it must not be doubled in order to
647                  * make cq_ci and rq_ci aligned.
648                  */
649                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
650                         attr.cq.ibv.cqe *= 2;
651         } else if (config->cqe_comp && rxq_data->hw_timestamp) {
652                 DRV_LOG(DEBUG,
653                         "port %u Rx CQE compression is disabled for HW"
654                         " timestamp",
655                         dev->data->port_id);
656         }
657         tmpl->cq = mlx5_glue->cq_ex_to_cq
658                 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
659                                          &attr.cq.mlx5));
660         if (tmpl->cq == NULL) {
661                 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
662                         dev->data->port_id, idx);
663                 rte_errno = ENOMEM;
664                 goto error;
665         }
666         DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
667                 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
668         DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
669                 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
670         attr.wq = (struct ibv_wq_init_attr){
671                 .wq_context = NULL, /* Could be useful in the future. */
672                 .wq_type = IBV_WQT_RQ,
673                 /* Max number of outstanding WRs. */
674                 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
675                 /* Max number of scatter/gather elements in a WR. */
676                 .max_sge = 1 << rxq_data->sges_n,
677                 .pd = priv->pd,
678                 .cq = tmpl->cq,
679                 .comp_mask =
680                         IBV_WQ_FLAGS_CVLAN_STRIPPING |
681                         0,
682                 .create_flags = (rxq_data->vlan_strip ?
683                                  IBV_WQ_FLAGS_CVLAN_STRIPPING :
684                                  0),
685         };
686         /* By default, FCS (CRC) is stripped by hardware. */
687         if (rxq_data->crc_present) {
688                 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
689                 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
690         }
691 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
692         if (config->hw_padding) {
693                 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
694                 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
695         }
696 #endif
697         tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
698         if (tmpl->wq == NULL) {
699                 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
700                         dev->data->port_id, idx);
701                 rte_errno = ENOMEM;
702                 goto error;
703         }
704         /*
705          * Make sure number of WRs*SGEs match expectations since a queue
706          * cannot allocate more than "desc" buffers.
707          */
708         if (((int)attr.wq.max_wr !=
709              ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
710             ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
711                 DRV_LOG(ERR,
712                         "port %u Rx queue %u requested %u*%u but got %u*%u"
713                         " WRs*SGEs",
714                         dev->data->port_id, idx,
715                         ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
716                         (1 << rxq_data->sges_n),
717                         attr.wq.max_wr, attr.wq.max_sge);
718                 rte_errno = EINVAL;
719                 goto error;
720         }
721         /* Change queue state to ready. */
722         mod = (struct ibv_wq_attr){
723                 .attr_mask = IBV_WQ_ATTR_STATE,
724                 .wq_state = IBV_WQS_RDY,
725         };
726         ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
727         if (ret) {
728                 DRV_LOG(ERR,
729                         "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
730                         dev->data->port_id, idx);
731                 rte_errno = ret;
732                 goto error;
733         }
734         obj.cq.in = tmpl->cq;
735         obj.cq.out = &cq_info;
736         obj.rwq.in = tmpl->wq;
737         obj.rwq.out = &rwq;
738         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
739         if (ret) {
740                 rte_errno = ret;
741                 goto error;
742         }
743         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
744                 DRV_LOG(ERR,
745                         "port %u wrong MLX5_CQE_SIZE environment variable"
746                         " value: it should be set to %u",
747                         dev->data->port_id, RTE_CACHE_LINE_SIZE);
748                 rte_errno = EINVAL;
749                 goto error;
750         }
751         /* Fill the rings. */
752         rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
753                 (uintptr_t)rwq.buf;
754         for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
755                 struct rte_mbuf *buf = (*rxq_data->elts)[i];
756                 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
757
758                 /* scat->addr must be able to store a pointer. */
759                 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
760                 *scat = (struct mlx5_wqe_data_seg){
761                         .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
762                                                                   uintptr_t)),
763                         .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
764                         .lkey = tmpl->mr->lkey,
765                 };
766         }
767         rxq_data->rq_db = rwq.dbrec;
768         rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
769         rxq_data->cq_ci = 0;
770         rxq_data->rq_ci = 0;
771         rxq_data->rq_pi = 0;
772         rxq_data->zip = (struct rxq_zip){
773                 .ai = 0,
774         };
775         rxq_data->cq_db = cq_info.dbrec;
776         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
777         rxq_data->cq_uar = cq_info.cq_uar;
778         rxq_data->cqn = cq_info.cqn;
779         rxq_data->cq_arm_sn = 0;
780         /* Update doorbell counter. */
781         rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
782         rte_wmb();
783         *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
784         DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
785                 idx, (void *)&tmpl);
786         rte_atomic32_inc(&tmpl->refcnt);
787         DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
788                 dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
789         LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
790         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
791         return tmpl;
792 error:
793         ret = rte_errno; /* Save rte_errno before cleanup. */
794         if (tmpl->wq)
795                 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
796         if (tmpl->cq)
797                 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
798         if (tmpl->channel)
799                 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
800         if (tmpl->mr)
801                 mlx5_mr_release(tmpl->mr);
802         priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
803         rte_errno = ret; /* Restore rte_errno. */
804         return NULL;
805 }
806
807 /**
808  * Get an Rx queue Verbs object.
809  *
810  * @param dev
811  *   Pointer to Ethernet device.
812  * @param idx
813  *   Queue index in DPDK Rx queue array
814  *
815  * @return
816  *   The Verbs object if it exists.
817  */
818 struct mlx5_rxq_ibv *
819 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
820 {
821         struct priv *priv = dev->data->dev_private;
822         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
823         struct mlx5_rxq_ctrl *rxq_ctrl;
824
825         if (idx >= priv->rxqs_n)
826                 return NULL;
827         if (!rxq_data)
828                 return NULL;
829         rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
830         if (rxq_ctrl->ibv) {
831                 mlx5_mr_get(dev, rxq_data->mp);
832                 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
833                 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
834                         dev->data->port_id, rxq_ctrl->idx,
835                         rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
836         }
837         return rxq_ctrl->ibv;
838 }
839
840 /**
841  * Release an Rx verbs queue object.
842  *
843  * @param rxq_ibv
844  *   Verbs Rx queue object.
845  *
846  * @return
847  *   1 while a reference on it exists, 0 when freed.
848  */
849 int
850 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
851 {
852         int ret;
853
854         assert(rxq_ibv);
855         assert(rxq_ibv->wq);
856         assert(rxq_ibv->cq);
857         assert(rxq_ibv->mr);
858         ret = mlx5_mr_release(rxq_ibv->mr);
859         if (!ret)
860                 rxq_ibv->mr = NULL;
861         DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
862                 PORT_ID(rxq_ibv->rxq_ctrl->priv),
863                 rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
864         if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
865                 rxq_free_elts(rxq_ibv->rxq_ctrl);
866                 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
867                 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
868                 if (rxq_ibv->channel)
869                         claim_zero(mlx5_glue->destroy_comp_channel
870                                    (rxq_ibv->channel));
871                 LIST_REMOVE(rxq_ibv, next);
872                 rte_free(rxq_ibv);
873                 return 0;
874         }
875         return 1;
876 }
877
878 /**
879  * Verify the Verbs Rx queue list is empty
880  *
881  * @param dev
882  *   Pointer to Ethernet device.
883  *
884  * @return
885  *   The number of object not released.
886  */
887 int
888 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
889 {
890         struct priv *priv = dev->data->dev_private;
891         int ret = 0;
892         struct mlx5_rxq_ibv *rxq_ibv;
893
894         LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
895                 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
896                         dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
897                 ++ret;
898         }
899         return ret;
900 }
901
902 /**
903  * Return true if a single reference exists on the object.
904  *
905  * @param rxq_ibv
906  *   Verbs Rx queue object.
907  */
908 int
909 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
910 {
911         assert(rxq_ibv);
912         return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
913 }
914
915 /**
916  * Create a DPDK Rx queue.
917  *
918  * @param dev
919  *   Pointer to Ethernet device.
920  * @param idx
921  *   TX queue index.
922  * @param desc
923  *   Number of descriptors to configure in queue.
924  * @param socket
925  *   NUMA socket on which memory must be allocated.
926  *
927  * @return
928  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
929  */
930 struct mlx5_rxq_ctrl *
931 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
932              unsigned int socket, const struct rte_eth_rxconf *conf,
933              struct rte_mempool *mp)
934 {
935         struct priv *priv = dev->data->dev_private;
936         struct mlx5_rxq_ctrl *tmpl;
937         unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
938         struct mlx5_dev_config *config = &priv->config;
939         /*
940          * Always allocate extra slots, even if eventually
941          * the vector Rx will not be used.
942          */
943         const uint16_t desc_n =
944                 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
945         uint64_t offloads = conf->offloads |
946                            dev->data->dev_conf.rxmode.offloads;
947
948         tmpl = rte_calloc_socket("RXQ", 1,
949                                  sizeof(*tmpl) +
950                                  desc_n * sizeof(struct rte_mbuf *),
951                                  0, socket);
952         if (!tmpl) {
953                 rte_errno = ENOMEM;
954                 return NULL;
955         }
956         tmpl->socket = socket;
957         if (dev->data->dev_conf.intr_conf.rxq)
958                 tmpl->irq = 1;
959         /* Enable scattered packets support for this queue if necessary. */
960         assert(mb_len >= RTE_PKTMBUF_HEADROOM);
961         if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
962             (mb_len - RTE_PKTMBUF_HEADROOM)) {
963                 tmpl->rxq.sges_n = 0;
964         } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
965                 unsigned int size =
966                         RTE_PKTMBUF_HEADROOM +
967                         dev->data->dev_conf.rxmode.max_rx_pkt_len;
968                 unsigned int sges_n;
969
970                 /*
971                  * Determine the number of SGEs needed for a full packet
972                  * and round it to the next power of two.
973                  */
974                 sges_n = log2above((size / mb_len) + !!(size % mb_len));
975                 tmpl->rxq.sges_n = sges_n;
976                 /* Make sure rxq.sges_n did not overflow. */
977                 size = mb_len * (1 << tmpl->rxq.sges_n);
978                 size -= RTE_PKTMBUF_HEADROOM;
979                 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
980                         DRV_LOG(ERR,
981                                 "port %u too many SGEs (%u) needed to handle"
982                                 " requested maximum packet size %u",
983                                 dev->data->port_id,
984                                 1 << sges_n,
985                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
986                         rte_errno = EOVERFLOW;
987                         goto error;
988                 }
989         } else {
990                 DRV_LOG(WARNING,
991                         "port %u the requested maximum Rx packet size (%u) is"
992                         " larger than a single mbuf (%u) and scattered mode has"
993                         " not been requested",
994                         dev->data->port_id,
995                         dev->data->dev_conf.rxmode.max_rx_pkt_len,
996                         mb_len - RTE_PKTMBUF_HEADROOM);
997         }
998         DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
999                 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1000         if (desc % (1 << tmpl->rxq.sges_n)) {
1001                 DRV_LOG(ERR,
1002                         "port %u number of Rx queue descriptors (%u) is not a"
1003                         " multiple of SGEs per packet (%u)",
1004                         dev->data->port_id,
1005                         desc,
1006                         1 << tmpl->rxq.sges_n);
1007                 rte_errno = EINVAL;
1008                 goto error;
1009         }
1010         /* Toggle RX checksum offload if hardware supports it. */
1011         tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1012         tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1013         /* Configure VLAN stripping. */
1014         tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1015         /* By default, FCS (CRC) is stripped by hardware. */
1016         if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1017                 tmpl->rxq.crc_present = 0;
1018         } else if (config->hw_fcs_strip) {
1019                 tmpl->rxq.crc_present = 1;
1020         } else {
1021                 DRV_LOG(WARNING,
1022                         "port %u CRC stripping has been disabled but will"
1023                         " still be performed by hardware, make sure MLNX_OFED"
1024                         " and firmware are up to date",
1025                         dev->data->port_id);
1026                 tmpl->rxq.crc_present = 0;
1027         }
1028         DRV_LOG(DEBUG,
1029                 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1030                 " incoming frames to hide it",
1031                 dev->data->port_id,
1032                 tmpl->rxq.crc_present ? "disabled" : "enabled",
1033                 tmpl->rxq.crc_present << 2);
1034         /* Save port ID. */
1035         tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1036                 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1037         tmpl->rxq.port_id = dev->data->port_id;
1038         tmpl->priv = priv;
1039         tmpl->rxq.mp = mp;
1040         tmpl->rxq.stats.idx = idx;
1041         tmpl->rxq.elts_n = log2above(desc);
1042         tmpl->rxq.elts =
1043                 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1044         tmpl->idx = idx;
1045         rte_atomic32_inc(&tmpl->refcnt);
1046         DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1047                 idx, rte_atomic32_read(&tmpl->refcnt));
1048         LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1049         return tmpl;
1050 error:
1051         rte_free(tmpl);
1052         return NULL;
1053 }
1054
1055 /**
1056  * Get a Rx queue.
1057  *
1058  * @param dev
1059  *   Pointer to Ethernet device.
1060  * @param idx
1061  *   TX queue index.
1062  *
1063  * @return
1064  *   A pointer to the queue if it exists, NULL otherwise.
1065  */
1066 struct mlx5_rxq_ctrl *
1067 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1068 {
1069         struct priv *priv = dev->data->dev_private;
1070         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1071
1072         if ((*priv->rxqs)[idx]) {
1073                 rxq_ctrl = container_of((*priv->rxqs)[idx],
1074                                         struct mlx5_rxq_ctrl,
1075                                         rxq);
1076                 mlx5_rxq_ibv_get(dev, idx);
1077                 rte_atomic32_inc(&rxq_ctrl->refcnt);
1078                 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
1079                         dev->data->port_id, rxq_ctrl->idx,
1080                         rte_atomic32_read(&rxq_ctrl->refcnt));
1081         }
1082         return rxq_ctrl;
1083 }
1084
1085 /**
1086  * Release a Rx queue.
1087  *
1088  * @param dev
1089  *   Pointer to Ethernet device.
1090  * @param idx
1091  *   TX queue index.
1092  *
1093  * @return
1094  *   1 while a reference on it exists, 0 when freed.
1095  */
1096 int
1097 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1098 {
1099         struct priv *priv = dev->data->dev_private;
1100         struct mlx5_rxq_ctrl *rxq_ctrl;
1101
1102         if (!(*priv->rxqs)[idx])
1103                 return 0;
1104         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1105         assert(rxq_ctrl->priv);
1106         if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1107                 rxq_ctrl->ibv = NULL;
1108         DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1109                 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1110         if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1111                 LIST_REMOVE(rxq_ctrl, next);
1112                 rte_free(rxq_ctrl);
1113                 (*priv->rxqs)[idx] = NULL;
1114                 return 0;
1115         }
1116         return 1;
1117 }
1118
1119 /**
1120  * Verify if the queue can be released.
1121  *
1122  * @param dev
1123  *   Pointer to Ethernet device.
1124  * @param idx
1125  *   TX queue index.
1126  *
1127  * @return
1128  *   1 if the queue can be released, negative errno otherwise and rte_errno is
1129  *   set.
1130  */
1131 int
1132 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1133 {
1134         struct priv *priv = dev->data->dev_private;
1135         struct mlx5_rxq_ctrl *rxq_ctrl;
1136
1137         if (!(*priv->rxqs)[idx]) {
1138                 rte_errno = EINVAL;
1139                 return -rte_errno;
1140         }
1141         rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1142         return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1143 }
1144
1145 /**
1146  * Verify the Rx Queue list is empty
1147  *
1148  * @param dev
1149  *   Pointer to Ethernet device.
1150  *
1151  * @return
1152  *   The number of object not released.
1153  */
1154 int
1155 mlx5_rxq_verify(struct rte_eth_dev *dev)
1156 {
1157         struct priv *priv = dev->data->dev_private;
1158         struct mlx5_rxq_ctrl *rxq_ctrl;
1159         int ret = 0;
1160
1161         LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1162                 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1163                         dev->data->port_id, rxq_ctrl->idx);
1164                 ++ret;
1165         }
1166         return ret;
1167 }
1168
1169 /**
1170  * Create an indirection table.
1171  *
1172  * @param dev
1173  *   Pointer to Ethernet device.
1174  * @param queues
1175  *   Queues entering in the indirection table.
1176  * @param queues_n
1177  *   Number of queues in the array.
1178  *
1179  * @return
1180  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1181  */
1182 struct mlx5_ind_table_ibv *
1183 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1184                        uint32_t queues_n)
1185 {
1186         struct priv *priv = dev->data->dev_private;
1187         struct mlx5_ind_table_ibv *ind_tbl;
1188         const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1189                 log2above(queues_n) :
1190                 log2above(priv->config.ind_table_max_size);
1191         struct ibv_wq *wq[1 << wq_n];
1192         unsigned int i;
1193         unsigned int j;
1194
1195         ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1196                              queues_n * sizeof(uint16_t), 0);
1197         if (!ind_tbl) {
1198                 rte_errno = ENOMEM;
1199                 return NULL;
1200         }
1201         for (i = 0; i != queues_n; ++i) {
1202                 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1203
1204                 if (!rxq)
1205                         goto error;
1206                 wq[i] = rxq->ibv->wq;
1207                 ind_tbl->queues[i] = queues[i];
1208         }
1209         ind_tbl->queues_n = queues_n;
1210         /* Finalise indirection table. */
1211         for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1212                 wq[i] = wq[j];
1213         ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1214                 (priv->ctx,
1215                  &(struct ibv_rwq_ind_table_init_attr){
1216                         .log_ind_tbl_size = wq_n,
1217                         .ind_tbl = wq,
1218                         .comp_mask = 0,
1219                  });
1220         if (!ind_tbl->ind_table) {
1221                 rte_errno = errno;
1222                 goto error;
1223         }
1224         rte_atomic32_inc(&ind_tbl->refcnt);
1225         LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1226         DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
1227               dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
1228               rte_atomic32_read(&ind_tbl->refcnt));
1229         return ind_tbl;
1230 error:
1231         rte_free(ind_tbl);
1232         DRV_LOG(DEBUG, "port %u cannot create indirection table",
1233                 dev->data->port_id);
1234         return NULL;
1235 }
1236
1237 /**
1238  * Get an indirection table.
1239  *
1240  * @param dev
1241  *   Pointer to Ethernet device.
1242  * @param queues
1243  *   Queues entering in the indirection table.
1244  * @param queues_n
1245  *   Number of queues in the array.
1246  *
1247  * @return
1248  *   An indirection table if found.
1249  */
1250 struct mlx5_ind_table_ibv *
1251 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1252                        uint32_t queues_n)
1253 {
1254         struct priv *priv = dev->data->dev_private;
1255         struct mlx5_ind_table_ibv *ind_tbl;
1256
1257         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1258                 if ((ind_tbl->queues_n == queues_n) &&
1259                     (memcmp(ind_tbl->queues, queues,
1260                             ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1261                      == 0))
1262                         break;
1263         }
1264         if (ind_tbl) {
1265                 unsigned int i;
1266
1267                 rte_atomic32_inc(&ind_tbl->refcnt);
1268                 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1269                         dev->data->port_id, (void *)ind_tbl,
1270                         rte_atomic32_read(&ind_tbl->refcnt));
1271                 for (i = 0; i != ind_tbl->queues_n; ++i)
1272                         mlx5_rxq_get(dev, ind_tbl->queues[i]);
1273         }
1274         return ind_tbl;
1275 }
1276
1277 /**
1278  * Release an indirection table.
1279  *
1280  * @param dev
1281  *   Pointer to Ethernet device.
1282  * @param ind_table
1283  *   Indirection table to release.
1284  *
1285  * @return
1286  *   1 while a reference on it exists, 0 when freed.
1287  */
1288 int
1289 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1290                            struct mlx5_ind_table_ibv *ind_tbl)
1291 {
1292         unsigned int i;
1293
1294         DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1295                 dev->data->port_id, (void *)ind_tbl,
1296                 rte_atomic32_read(&ind_tbl->refcnt));
1297         if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
1298                 claim_zero(mlx5_glue->destroy_rwq_ind_table
1299                            (ind_tbl->ind_table));
1300                 DEBUG("port %u delete indirection table %p: queues: %u",
1301                       dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
1302         }
1303         for (i = 0; i != ind_tbl->queues_n; ++i)
1304                 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1305         if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1306                 LIST_REMOVE(ind_tbl, next);
1307                 rte_free(ind_tbl);
1308                 return 0;
1309         }
1310         return 1;
1311 }
1312
1313 /**
1314  * Verify the Rx Queue list is empty
1315  *
1316  * @param dev
1317  *   Pointer to Ethernet device.
1318  *
1319  * @return
1320  *   The number of object not released.
1321  */
1322 int
1323 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1324 {
1325         struct priv *priv = dev->data->dev_private;
1326         struct mlx5_ind_table_ibv *ind_tbl;
1327         int ret = 0;
1328
1329         LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1330                 DRV_LOG(DEBUG,
1331                         "port %u Verbs indirection table %p still referenced",
1332                         dev->data->port_id, (void *)ind_tbl);
1333                 ++ret;
1334         }
1335         return ret;
1336 }
1337
1338 /**
1339  * Create an Rx Hash queue.
1340  *
1341  * @param dev
1342  *   Pointer to Ethernet device.
1343  * @param rss_key
1344  *   RSS key for the Rx hash queue.
1345  * @param rss_key_len
1346  *   RSS key length.
1347  * @param hash_fields
1348  *   Verbs protocol hash field to make the RSS on.
1349  * @param queues
1350  *   Queues entering in hash queue. In case of empty hash_fields only the
1351  *   first queue index will be taken for the indirection table.
1352  * @param queues_n
1353  *   Number of queues.
1354  * @param tunnel
1355  *   Tunnel type, implies tunnel offloading like inner checksum if available.
1356  * @param rss_level
1357  *   RSS hash on tunnel level.
1358  *
1359  * @return
1360  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1361  */
1362 struct mlx5_hrxq *
1363 mlx5_hrxq_new(struct rte_eth_dev *dev,
1364               const uint8_t *rss_key, uint32_t rss_key_len,
1365               uint64_t hash_fields,
1366               const uint16_t *queues, uint32_t queues_n,
1367               uint32_t tunnel, uint32_t rss_level)
1368 {
1369         struct priv *priv = dev->data->dev_private;
1370         struct mlx5_hrxq *hrxq;
1371         struct mlx5_ind_table_ibv *ind_tbl;
1372         struct ibv_qp *qp;
1373         int err;
1374 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1375         struct mlx5dv_qp_init_attr qp_init_attr = {0};
1376 #endif
1377
1378         queues_n = hash_fields ? queues_n : 1;
1379         ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1380         if (!ind_tbl)
1381                 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1382         if (!ind_tbl) {
1383                 rte_errno = ENOMEM;
1384                 return NULL;
1385         }
1386         if (!rss_key_len) {
1387                 rss_key_len = rss_hash_default_key_len;
1388                 rss_key = rss_hash_default_key;
1389         }
1390 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1391         if (tunnel) {
1392                 qp_init_attr.comp_mask =
1393                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1394                 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1395         }
1396         qp = mlx5_glue->dv_create_qp
1397                 (priv->ctx,
1398                  &(struct ibv_qp_init_attr_ex){
1399                         .qp_type = IBV_QPT_RAW_PACKET,
1400                         .comp_mask =
1401                                 IBV_QP_INIT_ATTR_PD |
1402                                 IBV_QP_INIT_ATTR_IND_TABLE |
1403                                 IBV_QP_INIT_ATTR_RX_HASH,
1404                         .rx_hash_conf = (struct ibv_rx_hash_conf){
1405                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1406                                 .rx_hash_key_len = rss_key_len ? rss_key_len :
1407                                                    rss_hash_default_key_len,
1408                                 .rx_hash_key = rss_key ?
1409                                                (void *)(uintptr_t)rss_key :
1410                                                rss_hash_default_key,
1411                                 .rx_hash_fields_mask = hash_fields |
1412                                         (tunnel && rss_level > 1 ?
1413                                         (uint32_t)IBV_RX_HASH_INNER : 0),
1414                         },
1415                         .rwq_ind_tbl = ind_tbl->ind_table,
1416                         .pd = priv->pd,
1417                  },
1418                  &qp_init_attr);
1419         DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
1420               " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
1421               " create_flags:0x%x",
1422               dev->data->port_id, (void *)qp, (void *)ind_tbl,
1423               (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
1424               hash_fields, tunnel, rss_level,
1425               qp_init_attr.comp_mask, qp_init_attr.create_flags);
1426 #else
1427         qp = mlx5_glue->create_qp_ex
1428                 (priv->ctx,
1429                  &(struct ibv_qp_init_attr_ex){
1430                         .qp_type = IBV_QPT_RAW_PACKET,
1431                         .comp_mask =
1432                                 IBV_QP_INIT_ATTR_PD |
1433                                 IBV_QP_INIT_ATTR_IND_TABLE |
1434                                 IBV_QP_INIT_ATTR_RX_HASH,
1435                         .rx_hash_conf = (struct ibv_rx_hash_conf){
1436                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1437                                 .rx_hash_key_len = rss_key_len ? rss_key_len :
1438                                                    rss_hash_default_key_len,
1439                                 .rx_hash_key = rss_key ?
1440                                                (void *)(uintptr_t)rss_key :
1441                                                rss_hash_default_key,
1442                                 .rx_hash_fields_mask = hash_fields,
1443                         },
1444                         .rwq_ind_tbl = ind_tbl->ind_table,
1445                         .pd = priv->pd,
1446                  });
1447         DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
1448               " tunnel:0x%x level:%hhu",
1449               dev->data->port_id, (void *)qp, (void *)ind_tbl,
1450               hash_fields, tunnel, rss_level);
1451 #endif
1452         if (!qp) {
1453                 rte_errno = errno;
1454                 goto error;
1455         }
1456         hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1457         if (!hrxq)
1458                 goto error;
1459         hrxq->ind_table = ind_tbl;
1460         hrxq->qp = qp;
1461         hrxq->rss_key_len = rss_key_len;
1462         hrxq->hash_fields = hash_fields;
1463         hrxq->tunnel = tunnel;
1464         hrxq->rss_level = rss_level;
1465         memcpy(hrxq->rss_key, rss_key, rss_key_len);
1466         rte_atomic32_inc(&hrxq->refcnt);
1467         LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1468         DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1469                 dev->data->port_id, (void *)hrxq,
1470                 rte_atomic32_read(&hrxq->refcnt));
1471         return hrxq;
1472 error:
1473         err = rte_errno; /* Save rte_errno before cleanup. */
1474         mlx5_ind_table_ibv_release(dev, ind_tbl);
1475         if (qp)
1476                 claim_zero(mlx5_glue->destroy_qp(qp));
1477         rte_errno = err; /* Restore rte_errno. */
1478         return NULL;
1479 }
1480
1481 /**
1482  * Get an Rx Hash queue.
1483  *
1484  * @param dev
1485  *   Pointer to Ethernet device.
1486  * @param rss_conf
1487  *   RSS configuration for the Rx hash queue.
1488  * @param queues
1489  *   Queues entering in hash queue. In case of empty hash_fields only the
1490  *   first queue index will be taken for the indirection table.
1491  * @param queues_n
1492  *   Number of queues.
1493  * @param tunnel
1494  *   Tunnel type, implies tunnel offloading like inner checksum if available.
1495  * @param rss_level
1496  *   RSS hash on tunnel level
1497  *
1498  * @return
1499  *   An hash Rx queue on success.
1500  */
1501 struct mlx5_hrxq *
1502 mlx5_hrxq_get(struct rte_eth_dev *dev,
1503               const uint8_t *rss_key, uint32_t rss_key_len,
1504               uint64_t hash_fields,
1505               const uint16_t *queues, uint32_t queues_n,
1506               uint32_t tunnel, uint32_t rss_level)
1507 {
1508         struct priv *priv = dev->data->dev_private;
1509         struct mlx5_hrxq *hrxq;
1510
1511         queues_n = hash_fields ? queues_n : 1;
1512         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1513                 struct mlx5_ind_table_ibv *ind_tbl;
1514
1515                 if (hrxq->rss_key_len != rss_key_len)
1516                         continue;
1517                 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1518                         continue;
1519                 if (hrxq->hash_fields != hash_fields)
1520                         continue;
1521                 if (hrxq->tunnel != tunnel)
1522                         continue;
1523                 if (hrxq->rss_level != rss_level)
1524                         continue;
1525                 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1526                 if (!ind_tbl)
1527                         continue;
1528                 if (ind_tbl != hrxq->ind_table) {
1529                         mlx5_ind_table_ibv_release(dev, ind_tbl);
1530                         continue;
1531                 }
1532                 rte_atomic32_inc(&hrxq->refcnt);
1533                 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1534                         dev->data->port_id, (void *)hrxq,
1535                         rte_atomic32_read(&hrxq->refcnt));
1536                 return hrxq;
1537         }
1538         return NULL;
1539 }
1540
1541 /**
1542  * Release the hash Rx queue.
1543  *
1544  * @param dev
1545  *   Pointer to Ethernet device.
1546  * @param hrxq
1547  *   Pointer to Hash Rx queue to release.
1548  *
1549  * @return
1550  *   1 while a reference on it exists, 0 when freed.
1551  */
1552 int
1553 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1554 {
1555         DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1556                 dev->data->port_id, (void *)hrxq,
1557                 rte_atomic32_read(&hrxq->refcnt));
1558         if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1559                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1560                 DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
1561                       " 0x%x, level: %u",
1562                       dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
1563                       hrxq->tunnel, hrxq->rss_level);
1564                 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1565                 LIST_REMOVE(hrxq, next);
1566                 rte_free(hrxq);
1567                 return 0;
1568         }
1569         claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1570         return 1;
1571 }
1572
1573 /**
1574  * Verify the Rx Queue list is empty
1575  *
1576  * @param dev
1577  *   Pointer to Ethernet device.
1578  *
1579  * @return
1580  *   The number of object not released.
1581  */
1582 int
1583 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1584 {
1585         struct priv *priv = dev->data->dev_private;
1586         struct mlx5_hrxq *hrxq;
1587         int ret = 0;
1588
1589         LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1590                 DRV_LOG(DEBUG,
1591                         "port %u Verbs hash Rx queue %p still referenced",
1592                         dev->data->port_id, (void *)hrxq);
1593                 ++ret;
1594         }
1595         return ret;
1596 }