4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-pedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-pedantic"
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-pedantic"
62 #include "mlx5_utils.h"
63 #include "mlx5_defs.h"
65 #include "mlx5_rxtx.h"
66 #include "mlx5_autoconf.h"
67 #include "mlx5_defs.h"
70 * Allocate TX queue elements.
73 * Pointer to TX queue structure.
75 * Number of elements to allocate.
78 txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
82 for (i = 0; (i != elts_n); ++i)
83 (*txq_ctrl->txq.elts)[i] = NULL;
84 for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) {
85 volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i];
87 memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
89 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
90 txq_ctrl->txq.elts_head = 0;
91 txq_ctrl->txq.elts_tail = 0;
95 * Free TX queue elements.
98 * Pointer to TX queue structure.
101 txq_free_elts(struct txq_ctrl *txq_ctrl)
103 unsigned int elts_n = txq_ctrl->txq.elts_n;
104 unsigned int elts_head = txq_ctrl->txq.elts_head;
105 unsigned int elts_tail = txq_ctrl->txq.elts_tail;
106 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
108 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
109 txq_ctrl->txq.elts_head = 0;
110 txq_ctrl->txq.elts_tail = 0;
112 while (elts_tail != elts_head) {
113 struct rte_mbuf *elt = (*elts)[elts_tail];
116 rte_pktmbuf_free(elt);
119 memset(&(*elts)[elts_tail],
121 sizeof((*elts)[elts_tail]));
123 if (++elts_tail == elts_n)
129 * Clean up a TX queue.
131 * Destroy objects, free allocated memory and reset the structure for reuse.
134 * Pointer to TX queue structure.
137 txq_cleanup(struct txq_ctrl *txq_ctrl)
139 struct ibv_exp_release_intf_params params;
142 DEBUG("cleaning up %p", (void *)txq_ctrl);
143 txq_free_elts(txq_ctrl);
144 if (txq_ctrl->if_qp != NULL) {
145 assert(txq_ctrl->priv != NULL);
146 assert(txq_ctrl->priv->ctx != NULL);
147 assert(txq_ctrl->qp != NULL);
148 params = (struct ibv_exp_release_intf_params){
151 claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
155 if (txq_ctrl->if_cq != NULL) {
156 assert(txq_ctrl->priv != NULL);
157 assert(txq_ctrl->priv->ctx != NULL);
158 assert(txq_ctrl->cq != NULL);
159 params = (struct ibv_exp_release_intf_params){
162 claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
166 if (txq_ctrl->qp != NULL)
167 claim_zero(ibv_destroy_qp(txq_ctrl->qp));
168 if (txq_ctrl->cq != NULL)
169 claim_zero(ibv_destroy_cq(txq_ctrl->cq));
170 if (txq_ctrl->rd != NULL) {
171 struct ibv_exp_destroy_res_domain_attr attr = {
175 assert(txq_ctrl->priv != NULL);
176 assert(txq_ctrl->priv->ctx != NULL);
177 claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx,
181 for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
182 if (txq_ctrl->txq.mp2mr[i].mp == NULL)
184 assert(txq_ctrl->txq.mp2mr[i].mr != NULL);
185 claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
187 memset(txq_ctrl, 0, sizeof(*txq_ctrl));
191 * Initialize TX queue.
194 * Pointer to TX queue control template.
196 * Pointer to TX queue control.
199 * 0 on success, errno value on failure.
202 txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
204 struct mlx5_qp *qp = to_mqp(tmpl->qp);
205 struct ibv_cq *ibcq = tmpl->cq;
206 struct mlx5_cq *cq = to_mxxx(cq, cq);
208 if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
209 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
210 "it should be set to %u", RTE_CACHE_LINE_SIZE);
213 tmpl->txq.cqe_n = ibcq->cqe + 1;
214 tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
216 (volatile union mlx5_wqe (*)[])
217 (uintptr_t)qp->gen_data.sqstart;
218 tmpl->txq.wqe_n = qp->sq.wqe_cnt;
219 tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
220 tmpl->txq.bf_reg = qp->gen_data.bf->reg;
221 tmpl->txq.bf_offset = qp->gen_data.bf->offset;
222 tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size;
223 tmpl->txq.cq_db = cq->dbrec;
225 (volatile struct mlx5_cqe (*)[])
226 (uintptr_t)cq->active_buf->buf;
228 (struct rte_mbuf *(*)[tmpl->txq.elts_n])
229 ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
234 * Configure a TX queue.
237 * Pointer to Ethernet device structure.
239 * Pointer to TX queue structure.
241 * Number of descriptors to configure in queue.
243 * NUMA socket on which memory must be allocated.
245 * Thresholds parameters.
248 * 0 on success, errno value on failure.
251 txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
252 uint16_t desc, unsigned int socket,
253 const struct rte_eth_txconf *conf)
255 struct priv *priv = mlx5_get_priv(dev);
256 struct txq_ctrl tmpl = {
261 struct ibv_exp_query_intf_params params;
262 struct ibv_exp_qp_init_attr init;
263 struct ibv_exp_res_domain_init_attr rd;
264 struct ibv_exp_cq_init_attr cq;
265 struct ibv_exp_qp_attr mod;
266 struct ibv_exp_cq_attr cq_attr;
268 enum ibv_exp_query_intf_status status;
271 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
273 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
276 (void)conf; /* Thresholds configuration (ignored). */
277 tmpl.txq.elts_n = desc;
279 * Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or
280 * at least 4 times per ring.
282 tmpl.txq.elts_comp_cd_init =
283 ((MLX5_PMD_TX_PER_COMP_REQ < (desc / 4)) ?
284 MLX5_PMD_TX_PER_COMP_REQ : (desc / 4));
285 tmpl.txq.elts_comp = tmpl.txq.elts_comp_cd_init;
286 /* MRs will be registered in mp2mr[] later. */
287 attr.rd = (struct ibv_exp_res_domain_init_attr){
288 .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
289 IBV_EXP_RES_DOMAIN_MSG_MODEL),
290 .thread_model = IBV_EXP_THREAD_SINGLE,
291 .msg_model = IBV_EXP_MSG_HIGH_BW,
293 tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
294 if (tmpl.rd == NULL) {
296 ERROR("%p: RD creation failure: %s",
297 (void *)dev, strerror(ret));
300 attr.cq = (struct ibv_exp_cq_init_attr){
301 .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
302 .res_domain = tmpl.rd,
304 tmpl.cq = ibv_exp_create_cq(priv->ctx,
305 (desc / tmpl.txq.elts_comp_cd_init) - 1,
306 NULL, NULL, 0, &attr.cq);
307 if (tmpl.cq == NULL) {
309 ERROR("%p: CQ creation failure: %s",
310 (void *)dev, strerror(ret));
313 DEBUG("priv->device_attr.max_qp_wr is %d",
314 priv->device_attr.max_qp_wr);
315 DEBUG("priv->device_attr.max_sge is %d",
316 priv->device_attr.max_sge);
317 attr.init = (struct ibv_exp_qp_init_attr){
318 /* CQ to be associated with the send queue. */
320 /* CQ to be associated with the receive queue. */
323 /* Max number of outstanding WRs. */
324 .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
325 priv->device_attr.max_qp_wr :
327 /* Max number of scatter/gather elements in a WR. */
330 .qp_type = IBV_QPT_RAW_PACKET,
331 /* Do *NOT* enable this, completions events are managed per
335 .res_domain = tmpl.rd,
336 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
337 IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
339 tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
340 if (tmpl.qp == NULL) {
341 ret = (errno ? errno : EINVAL);
342 ERROR("%p: QP creation failure: %s",
343 (void *)dev, strerror(ret));
346 attr.mod = (struct ibv_exp_qp_attr){
347 /* Move the QP to this state. */
348 .qp_state = IBV_QPS_INIT,
349 /* Primary port number. */
350 .port_num = priv->port
352 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
353 (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
355 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
356 (void *)dev, strerror(ret));
359 ret = txq_setup(&tmpl, txq_ctrl);
361 ERROR("%p: cannot initialize TX queue structure: %s",
362 (void *)dev, strerror(ret));
365 txq_alloc_elts(&tmpl, desc);
366 attr.mod = (struct ibv_exp_qp_attr){
367 .qp_state = IBV_QPS_RTR
369 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
371 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
372 (void *)dev, strerror(ret));
375 attr.mod.qp_state = IBV_QPS_RTS;
376 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
378 ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
379 (void *)dev, strerror(ret));
382 attr.params = (struct ibv_exp_query_intf_params){
383 .intf_scope = IBV_EXP_INTF_GLOBAL,
384 .intf = IBV_EXP_INTF_CQ,
387 tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
388 if (tmpl.if_cq == NULL) {
390 ERROR("%p: CQ interface family query failed with status %d",
391 (void *)dev, status);
394 attr.params = (struct ibv_exp_query_intf_params){
395 .intf_scope = IBV_EXP_INTF_GLOBAL,
396 .intf = IBV_EXP_INTF_QP_BURST,
399 /* Enable multi-packet send if supported. */
402 IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
405 tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
406 if (tmpl.if_qp == NULL) {
408 ERROR("%p: QP interface family query failed with status %d",
409 (void *)dev, status);
412 /* Clean up txq in case we're reinitializing it. */
413 DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
414 txq_cleanup(txq_ctrl);
416 DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
417 /* Pre-register known mempools. */
418 rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
428 * DPDK callback to configure a TX queue.
431 * Pointer to Ethernet device structure.
435 * Number of descriptors to configure in queue.
437 * NUMA socket on which memory must be allocated.
439 * Thresholds parameters.
442 * 0 on success, negative errno value on failure.
445 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
446 unsigned int socket, const struct rte_eth_txconf *conf)
448 struct priv *priv = dev->data->dev_private;
449 struct txq *txq = (*priv->txqs)[idx];
450 struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
453 if (mlx5_is_secondary())
454 return -E_RTE_SECONDARY;
457 if (!rte_is_power_of_2(desc)) {
458 desc = 1 << log2above(desc);
459 WARN("%p: increased number of descriptors in TX queue %u"
460 " to the next power of two (%d)",
461 (void *)dev, idx, desc);
463 DEBUG("%p: configuring queue %u for %u descriptors",
464 (void *)dev, idx, desc);
465 if (idx >= priv->txqs_n) {
466 ERROR("%p: queue index out of range (%u >= %u)",
467 (void *)dev, idx, priv->txqs_n);
472 DEBUG("%p: reusing already allocated queue index %u (%p)",
473 (void *)dev, idx, (void *)txq);
478 (*priv->txqs)[idx] = NULL;
479 txq_cleanup(txq_ctrl);
482 rte_calloc_socket("TXQ", 1,
484 desc * sizeof(struct rte_mbuf *),
486 if (txq_ctrl == NULL) {
487 ERROR("%p: unable to allocate queue index %u",
493 ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
497 txq_ctrl->txq.stats.idx = idx;
498 DEBUG("%p: adding TX queue %p to list",
499 (void *)dev, (void *)txq_ctrl);
500 (*priv->txqs)[idx] = &txq_ctrl->txq;
501 /* Update send callback. */
502 priv_select_tx_function(priv);
509 * DPDK callback to release a TX queue.
512 * Generic TX queue pointer.
515 mlx5_tx_queue_release(void *dpdk_txq)
517 struct txq *txq = (struct txq *)dpdk_txq;
518 struct txq_ctrl *txq_ctrl;
522 if (mlx5_is_secondary())
527 txq_ctrl = container_of(txq, struct txq_ctrl, txq);
528 priv = txq_ctrl->priv;
530 for (i = 0; (i != priv->txqs_n); ++i)
531 if ((*priv->txqs)[i] == txq) {
532 DEBUG("%p: removing TX queue %p from list",
533 (void *)priv->dev, (void *)txq_ctrl);
534 (*priv->txqs)[i] = NULL;
537 txq_cleanup(txq_ctrl);
543 * DPDK callback for TX in secondary processes.
545 * This function configures all queues from primary process information
546 * if necessary before reverting to the normal TX burst callback.
549 * Generic pointer to TX queue structure.
551 * Packets to transmit.
553 * Number of packets in array.
556 * Number of packets successfully transmitted (<= pkts_n).
559 mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
562 struct txq *txq = dpdk_txq;
563 struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
564 struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv);
565 struct priv *primary_priv;
571 mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
572 /* Look for queue index in both private structures. */
573 for (index = 0; index != priv->txqs_n; ++index)
574 if (((*primary_priv->txqs)[index] == txq) ||
575 ((*priv->txqs)[index] == txq))
577 if (index == priv->txqs_n)
579 txq = (*priv->txqs)[index];
580 return priv->dev->tx_pkt_burst(txq, pkts, pkts_n);