4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-Wpedantic"
51 #include <rte_malloc.h>
52 #include <rte_ethdev.h>
53 #include <rte_common.h>
55 #include "mlx5_utils.h"
56 #include "mlx5_defs.h"
58 #include "mlx5_rxtx.h"
59 #include "mlx5_autoconf.h"
62 * Allocate TX queue elements.
65 * Pointer to TX queue structure.
67 * Number of elements to allocate.
70 txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
74 for (i = 0; (i != elts_n); ++i)
75 (*txq_ctrl->txq.elts)[i] = NULL;
76 for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
77 volatile struct mlx5_wqe64 *wqe =
78 (volatile struct mlx5_wqe64 *)
79 txq_ctrl->txq.wqes + i;
81 memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
83 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
84 txq_ctrl->txq.elts_head = 0;
85 txq_ctrl->txq.elts_tail = 0;
86 txq_ctrl->txq.elts_comp = 0;
90 * Free TX queue elements.
93 * Pointer to TX queue structure.
96 txq_free_elts(struct txq_ctrl *txq_ctrl)
98 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
99 const uint16_t elts_m = elts_n - 1;
100 uint16_t elts_head = txq_ctrl->txq.elts_head;
101 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
102 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
104 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
105 txq_ctrl->txq.elts_head = 0;
106 txq_ctrl->txq.elts_tail = 0;
107 txq_ctrl->txq.elts_comp = 0;
109 while (elts_tail != elts_head) {
110 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
113 rte_pktmbuf_free_seg(elt);
116 memset(&(*elts)[elts_tail & elts_m],
118 sizeof((*elts)[elts_tail & elts_m]));
125 * Clean up a TX queue.
127 * Destroy objects, free allocated memory and reset the structure for reuse.
130 * Pointer to TX queue structure.
133 txq_cleanup(struct txq_ctrl *txq_ctrl)
137 DEBUG("cleaning up %p", (void *)txq_ctrl);
138 txq_free_elts(txq_ctrl);
139 if (txq_ctrl->qp != NULL)
140 claim_zero(ibv_destroy_qp(txq_ctrl->qp));
141 if (txq_ctrl->cq != NULL)
142 claim_zero(ibv_destroy_cq(txq_ctrl->cq));
143 for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
144 if (txq_ctrl->txq.mp2mr[i].mr == NULL)
146 claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
148 memset(txq_ctrl, 0, sizeof(*txq_ctrl));
152 * Initialize TX queue.
155 * Pointer to TX queue control template.
157 * Pointer to TX queue control.
160 * 0 on success, errno value on failure.
163 txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
165 struct mlx5_qp *qp = to_mqp(tmpl->qp);
166 struct ibv_cq *ibcq = tmpl->cq;
167 struct ibv_mlx5_cq_info cq_info;
169 if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
170 ERROR("Unable to query CQ info. check your OFED.");
173 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
174 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
175 "it should be set to %u", RTE_CACHE_LINE_SIZE);
178 tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
179 tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
180 tmpl->txq.wqes = qp->gen_data.sqstart;
181 tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
182 tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
183 tmpl->txq.bf_reg = qp->gen_data.bf->reg;
184 tmpl->txq.cq_db = cq_info.dbrec;
186 (volatile struct mlx5_cqe (*)[])
187 (uintptr_t)cq_info.buf;
189 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
190 ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
195 * Configure a TX queue.
198 * Pointer to Ethernet device structure.
200 * Pointer to TX queue structure.
202 * Number of descriptors to configure in queue.
204 * NUMA socket on which memory must be allocated.
206 * Thresholds parameters.
209 * 0 on success, errno value on failure.
212 txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
213 uint16_t desc, unsigned int socket,
214 const struct rte_eth_txconf *conf)
216 struct priv *priv = mlx5_get_priv(dev);
217 struct txq_ctrl tmpl = {
222 struct ibv_exp_qp_init_attr init;
223 struct ibv_exp_cq_init_attr cq;
224 struct ibv_exp_qp_attr mod;
225 struct ibv_exp_cq_attr cq_attr;
228 const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
229 (RTE_CACHE_LINE_SIZE - 1)) /
230 RTE_CACHE_LINE_SIZE);
233 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
235 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
238 tmpl.txq.flags = conf->txq_flags;
239 assert(desc > MLX5_TX_COMP_THRESH);
240 tmpl.txq.elts_n = log2above(desc);
241 if (priv->mps == MLX5_MPW_ENHANCED)
242 tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
243 /* MRs will be registered in mp2mr[] later. */
244 attr.cq = (struct ibv_exp_cq_init_attr){
247 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
248 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
249 if (priv->mps == MLX5_MPW_ENHANCED)
250 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
251 tmpl.cq = ibv_exp_create_cq(priv->ctx,
253 NULL, NULL, 0, &attr.cq);
254 if (tmpl.cq == NULL) {
256 ERROR("%p: CQ creation failure: %s",
257 (void *)dev, strerror(ret));
260 DEBUG("priv->device_attr.max_qp_wr is %d",
261 priv->device_attr.max_qp_wr);
262 DEBUG("priv->device_attr.max_sge is %d",
263 priv->device_attr.max_sge);
264 attr.init = (struct ibv_exp_qp_init_attr){
265 /* CQ to be associated with the send queue. */
267 /* CQ to be associated with the receive queue. */
270 /* Max number of outstanding WRs. */
271 .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
272 priv->device_attr.max_qp_wr :
275 * Max number of scatter/gather elements in a WR,
276 * must be 1 to prevent libmlx5 from trying to affect
277 * too much memory. TX gather is not impacted by the
278 * priv->device_attr.max_sge limit and will still work
283 .qp_type = IBV_QPT_RAW_PACKET,
284 /* Do *NOT* enable this, completions events are managed per
288 .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
290 if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
293 tmpl.txq.max_inline =
294 ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
295 RTE_CACHE_LINE_SIZE);
296 tmpl.txq.inline_en = 1;
297 /* TSO and MPS can't be enabled concurrently. */
298 assert(!priv->tso || !priv->mps);
299 if (priv->mps == MLX5_MPW_ENHANCED) {
300 tmpl.txq.inline_max_packet_sz =
301 priv->inline_max_packet_sz;
302 /* To minimize the size of data set, avoid requesting
305 attr.init.cap.max_inline_data =
306 ((RTE_MIN(priv->txq_inline,
307 priv->inline_max_packet_sz) +
308 (RTE_CACHE_LINE_SIZE - 1)) /
309 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
310 } else if (priv->tso) {
311 int inline_diff = tmpl.txq.max_inline - max_tso_inline;
314 * Adjust inline value as Verbs aggregates
315 * tso_inline and txq_inline fields.
317 attr.init.cap.max_inline_data = inline_diff > 0 ?
319 RTE_CACHE_LINE_SIZE :
322 attr.init.cap.max_inline_data =
323 tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
326 * Check if the inline size is too large in a way which
327 * can make the WQE DS to overflow.
328 * Considering in calculation:
334 (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE);
335 if (ds_cnt > MLX5_DSEG_MAX) {
336 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
339 max_inline = max_inline - (max_inline %
340 RTE_CACHE_LINE_SIZE);
341 WARN("txq inline is too large (%d) setting it to "
342 "the maximum possible: %d\n",
343 priv->txq_inline, max_inline);
344 tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
345 attr.init.cap.max_inline_data = max_inline;
349 attr.init.max_tso_header =
350 max_tso_inline * RTE_CACHE_LINE_SIZE;
351 attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
352 tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
357 tmpl.txq.tunnel_en = 1;
358 tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
359 if (tmpl.qp == NULL) {
360 ret = (errno ? errno : EINVAL);
361 ERROR("%p: QP creation failure: %s",
362 (void *)dev, strerror(ret));
365 DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
366 " max_inline_data=%u",
367 attr.init.cap.max_send_wr,
368 attr.init.cap.max_send_sge,
369 attr.init.cap.max_inline_data);
370 attr.mod = (struct ibv_exp_qp_attr){
371 /* Move the QP to this state. */
372 .qp_state = IBV_QPS_INIT,
373 /* Primary port number. */
374 .port_num = priv->port
376 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
377 (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
379 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
380 (void *)dev, strerror(ret));
383 ret = txq_setup(&tmpl, txq_ctrl);
385 ERROR("%p: cannot initialize TX queue structure: %s",
386 (void *)dev, strerror(ret));
389 txq_alloc_elts(&tmpl, desc);
390 attr.mod = (struct ibv_exp_qp_attr){
391 .qp_state = IBV_QPS_RTR
393 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
395 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
396 (void *)dev, strerror(ret));
399 attr.mod.qp_state = IBV_QPS_RTS;
400 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
402 ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
403 (void *)dev, strerror(ret));
406 /* Clean up txq in case we're reinitializing it. */
407 DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
408 txq_cleanup(txq_ctrl);
410 DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
411 /* Pre-register known mempools. */
412 rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
422 * DPDK callback to configure a TX queue.
425 * Pointer to Ethernet device structure.
429 * Number of descriptors to configure in queue.
431 * NUMA socket on which memory must be allocated.
433 * Thresholds parameters.
436 * 0 on success, negative errno value on failure.
439 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
440 unsigned int socket, const struct rte_eth_txconf *conf)
442 struct priv *priv = dev->data->dev_private;
443 struct txq *txq = (*priv->txqs)[idx];
444 struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
447 if (mlx5_is_secondary())
448 return -E_RTE_SECONDARY;
451 if (desc <= MLX5_TX_COMP_THRESH) {
452 WARN("%p: number of descriptors requested for TX queue %u"
453 " must be higher than MLX5_TX_COMP_THRESH, using"
455 (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
456 desc = MLX5_TX_COMP_THRESH + 1;
458 if (!rte_is_power_of_2(desc)) {
459 desc = 1 << log2above(desc);
460 WARN("%p: increased number of descriptors in TX queue %u"
461 " to the next power of two (%d)",
462 (void *)dev, idx, desc);
464 DEBUG("%p: configuring queue %u for %u descriptors",
465 (void *)dev, idx, desc);
466 if (idx >= priv->txqs_n) {
467 ERROR("%p: queue index out of range (%u >= %u)",
468 (void *)dev, idx, priv->txqs_n);
473 DEBUG("%p: reusing already allocated queue index %u (%p)",
474 (void *)dev, idx, (void *)txq);
479 (*priv->txqs)[idx] = NULL;
480 txq_cleanup(txq_ctrl);
481 /* Resize if txq size is changed. */
482 if (txq_ctrl->txq.elts_n != log2above(desc)) {
483 txq_ctrl = rte_realloc(txq_ctrl,
485 desc * sizeof(struct rte_mbuf *),
486 RTE_CACHE_LINE_SIZE);
488 ERROR("%p: unable to reallocate queue index %u",
496 rte_calloc_socket("TXQ", 1,
498 desc * sizeof(struct rte_mbuf *),
500 if (txq_ctrl == NULL) {
501 ERROR("%p: unable to allocate queue index %u",
507 ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
511 txq_ctrl->txq.stats.idx = idx;
512 DEBUG("%p: adding TX queue %p to list",
513 (void *)dev, (void *)txq_ctrl);
514 (*priv->txqs)[idx] = &txq_ctrl->txq;
521 * DPDK callback to release a TX queue.
524 * Generic TX queue pointer.
527 mlx5_tx_queue_release(void *dpdk_txq)
529 struct txq *txq = (struct txq *)dpdk_txq;
530 struct txq_ctrl *txq_ctrl;
534 if (mlx5_is_secondary())
539 txq_ctrl = container_of(txq, struct txq_ctrl, txq);
540 priv = txq_ctrl->priv;
542 for (i = 0; (i != priv->txqs_n); ++i)
543 if ((*priv->txqs)[i] == txq) {
544 DEBUG("%p: removing TX queue %p from list",
545 (void *)priv->dev, (void *)txq_ctrl);
546 (*priv->txqs)[i] = NULL;
549 txq_cleanup(txq_ctrl);