1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_rxtx.h"
29 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
32 * Move QP from error state to running state and initialize indexes.
35 * Pointer to TX queue control structure.
38 * 0 on success, else -1.
41 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
43 struct mlx5_mp_arg_queue_state_modify sm = {
45 .queue_id = txq_ctrl->txq.idx,
48 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
50 txq_ctrl->txq.wqe_ci = 0;
51 txq_ctrl->txq.wqe_pi = 0;
52 txq_ctrl->txq.elts_comp = 0;
56 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
58 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
60 static const uint8_t magic[] = "seen";
64 for (i = 0; i < sizeof(magic); ++i)
65 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
67 err_cqe->rsvd1[i] = magic[i];
76 * Pointer to TX queue structure.
78 * Pointer to the error CQE.
81 * Negative value if queue recovery failed, otherwise
82 * the error completion entry is handled successfully.
85 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
86 volatile struct mlx5_err_cqe *err_cqe)
88 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
89 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
90 struct mlx5_txq_ctrl *txq_ctrl =
91 container_of(txq, struct mlx5_txq_ctrl, txq);
92 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
93 int seen = check_err_cqe_seen(err_cqe);
95 if (!seen && txq_ctrl->dump_file_n <
96 txq_ctrl->priv->config.max_dump_files_num) {
97 MKSTR(err_str, "Unexpected CQE error syndrome "
98 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
99 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
100 txq->cqe_s, txq->qp_num_8s >> 8,
101 rte_be_to_cpu_16(err_cqe->wqe_counter),
102 txq->wqe_ci, txq->cq_ci);
103 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
104 PORT_ID(txq_ctrl->priv), txq->idx,
105 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
106 mlx5_dump_debug_information(name, NULL, err_str, 0);
107 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
108 (const void *)((uintptr_t)
112 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
113 (const void *)((uintptr_t)
117 txq_ctrl->dump_file_n++;
121 * Count errors in WQEs units.
122 * Later it can be improved to count error packets,
123 * for example, by SQ parsing to find how much packets
124 * should be counted for each WQE.
126 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
128 if (tx_recover_qp(txq_ctrl)) {
129 /* Recovering failed - retry later on the same WQE. */
132 /* Release all the remaining buffers. */
133 txq_free_elts(txq_ctrl);
139 * Update completion queue consuming index via doorbell
140 * and flush the completed data buffers.
143 * Pointer to TX queue structure.
145 * valid CQE pointer, if not NULL update txq->wqe_pi and flush the buffers.
147 * Configured Tx offloads mask. It is fully defined at
148 * compile time and may be used for optimization.
150 static __rte_always_inline void
151 mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
152 volatile struct mlx5_cqe *last_cqe,
153 unsigned int olx __rte_unused)
155 if (likely(last_cqe != NULL)) {
158 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
159 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
160 if (likely(tail != txq->elts_tail)) {
161 mlx5_tx_free_elts(txq, tail, olx);
162 MLX5_ASSERT(tail == txq->elts_tail);
168 * Manage TX completions. This routine checks the CQ for
169 * arrived CQEs, deduces the last accomplished WQE in SQ,
170 * updates SQ producing index and frees all completed mbufs.
173 * Pointer to TX queue structure.
175 * Configured Tx offloads mask. It is fully defined at
176 * compile time and may be used for optimization.
178 * NOTE: not inlined intentionally, it makes tx_burst
179 * routine smaller, simple and faster - from experiments.
182 mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
183 unsigned int olx __rte_unused)
185 unsigned int count = MLX5_TX_COMP_MAX_CQE;
186 volatile struct mlx5_cqe *last_cqe = NULL;
187 bool ring_doorbell = false;
191 volatile struct mlx5_cqe *cqe;
193 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
194 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
195 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
196 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
197 /* No new CQEs in completion queue. */
198 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
202 * Some error occurred, try to restart.
203 * We have no barrier after WQE related Doorbell
204 * written, make sure all writes are completed
205 * here, before we might perform SQ reset.
208 ret = mlx5_tx_error_cqe_handle
209 (txq, (volatile struct mlx5_err_cqe *)cqe);
210 if (unlikely(ret < 0)) {
212 * Some error occurred on queue error
213 * handling, we do not advance the index
214 * here, allowing to retry on next call.
219 * We are going to fetch all entries with
220 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
221 * The send queue is supposed to be empty.
223 ring_doorbell = true;
225 txq->cq_pi = txq->cq_ci;
229 /* Normal transmit completion. */
230 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
231 #ifdef RTE_LIBRTE_MLX5_DEBUG
232 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
235 ring_doorbell = true;
239 * We have to restrict the amount of processed CQEs
240 * in one tx_burst routine call. The CQ may be large
241 * and many CQEs may be updated by the NIC in one
242 * transaction. Buffers freeing is time consuming,
243 * multiple iterations may introduce significant latency.
245 if (likely(--count == 0))
248 if (likely(ring_doorbell)) {
249 /* Ring doorbell to notify hardware. */
250 rte_compiler_barrier();
251 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
252 mlx5_tx_comp_flush(txq, last_cqe, olx);
257 * DPDK callback to check the status of a Tx descriptor.
262 * The index of the descriptor in the ring.
265 * The status of the Tx descriptor.
268 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
270 struct mlx5_txq_data *__rte_restrict txq = tx_queue;
273 mlx5_tx_handle_completion(txq, 0);
274 used = txq->elts_head - txq->elts_tail;
276 return RTE_ETH_TX_DESC_FULL;
277 return RTE_ETH_TX_DESC_DONE;
281 * Array of declared and compiled Tx burst function and corresponding
282 * supported offloads set. The array is used to select the Tx burst
283 * function for specified offloads set at Tx queue configuration time.
289 MLX5_TXOFF_INFO(full_empw,
290 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
291 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
292 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
293 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
295 MLX5_TXOFF_INFO(none_empw,
296 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
298 MLX5_TXOFF_INFO(md_empw,
299 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
301 MLX5_TXOFF_INFO(mt_empw,
302 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
303 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
305 MLX5_TXOFF_INFO(mtsc_empw,
306 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
307 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
308 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
310 MLX5_TXOFF_INFO(mti_empw,
311 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
312 MLX5_TXOFF_CONFIG_INLINE |
313 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
315 MLX5_TXOFF_INFO(mtv_empw,
316 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
317 MLX5_TXOFF_CONFIG_VLAN |
318 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
320 MLX5_TXOFF_INFO(mtiv_empw,
321 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
322 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
323 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
325 MLX5_TXOFF_INFO(sc_empw,
326 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
327 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
329 MLX5_TXOFF_INFO(sci_empw,
330 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
331 MLX5_TXOFF_CONFIG_INLINE |
332 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
334 MLX5_TXOFF_INFO(scv_empw,
335 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
336 MLX5_TXOFF_CONFIG_VLAN |
337 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
339 MLX5_TXOFF_INFO(sciv_empw,
340 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
341 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
342 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
344 MLX5_TXOFF_INFO(i_empw,
345 MLX5_TXOFF_CONFIG_INLINE |
346 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
348 MLX5_TXOFF_INFO(v_empw,
349 MLX5_TXOFF_CONFIG_VLAN |
350 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
352 MLX5_TXOFF_INFO(iv_empw,
353 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
354 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
356 MLX5_TXOFF_INFO(full_ts_nompw,
357 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP)
359 MLX5_TXOFF_INFO(full_ts_nompwi,
360 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
361 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
362 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
363 MLX5_TXOFF_CONFIG_TXPP)
365 MLX5_TXOFF_INFO(full_ts,
366 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_TXPP |
367 MLX5_TXOFF_CONFIG_EMPW)
369 MLX5_TXOFF_INFO(full_ts_noi,
370 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
371 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
372 MLX5_TXOFF_CONFIG_VLAN | MLX5_TXOFF_CONFIG_METADATA |
373 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
375 MLX5_TXOFF_INFO(none_ts,
376 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_TXPP |
377 MLX5_TXOFF_CONFIG_EMPW)
379 MLX5_TXOFF_INFO(mdi_ts,
380 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
381 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
383 MLX5_TXOFF_INFO(mti_ts,
384 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
385 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_METADATA |
386 MLX5_TXOFF_CONFIG_TXPP | MLX5_TXOFF_CONFIG_EMPW)
388 MLX5_TXOFF_INFO(mtiv_ts,
389 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
390 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
391 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_TXPP |
392 MLX5_TXOFF_CONFIG_EMPW)
394 MLX5_TXOFF_INFO(full,
395 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
396 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
397 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
398 MLX5_TXOFF_CONFIG_METADATA)
400 MLX5_TXOFF_INFO(none,
401 MLX5_TXOFF_CONFIG_NONE)
404 MLX5_TXOFF_CONFIG_METADATA)
407 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
408 MLX5_TXOFF_CONFIG_METADATA)
410 MLX5_TXOFF_INFO(mtsc,
411 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
412 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
413 MLX5_TXOFF_CONFIG_METADATA)
416 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
417 MLX5_TXOFF_CONFIG_INLINE |
418 MLX5_TXOFF_CONFIG_METADATA)
421 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
422 MLX5_TXOFF_CONFIG_VLAN |
423 MLX5_TXOFF_CONFIG_METADATA)
425 MLX5_TXOFF_INFO(mtiv,
426 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
427 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
428 MLX5_TXOFF_CONFIG_METADATA)
431 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
432 MLX5_TXOFF_CONFIG_METADATA)
435 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
436 MLX5_TXOFF_CONFIG_INLINE |
437 MLX5_TXOFF_CONFIG_METADATA)
440 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
441 MLX5_TXOFF_CONFIG_VLAN |
442 MLX5_TXOFF_CONFIG_METADATA)
444 MLX5_TXOFF_INFO(sciv,
445 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
446 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
447 MLX5_TXOFF_CONFIG_METADATA)
450 MLX5_TXOFF_CONFIG_INLINE |
451 MLX5_TXOFF_CONFIG_METADATA)
454 MLX5_TXOFF_CONFIG_VLAN |
455 MLX5_TXOFF_CONFIG_METADATA)
458 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
459 MLX5_TXOFF_CONFIG_METADATA)
461 MLX5_TXOFF_INFO(none_mpw,
462 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
463 MLX5_TXOFF_CONFIG_MPW)
465 MLX5_TXOFF_INFO(mci_mpw,
466 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
467 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
468 MLX5_TXOFF_CONFIG_MPW)
470 MLX5_TXOFF_INFO(mc_mpw,
471 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
472 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
474 MLX5_TXOFF_INFO(i_mpw,
475 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
476 MLX5_TXOFF_CONFIG_MPW)
480 * Configure the Tx function to use. The routine checks configured
481 * Tx offloads for the device and selects appropriate Tx burst routine.
482 * There are multiple Tx burst routines compiled from the same template
483 * in the most optimal way for the dedicated Tx offloads set.
486 * Pointer to private data structure.
489 * Pointer to selected Tx burst function.
492 mlx5_select_tx_function(struct rte_eth_dev *dev)
494 struct mlx5_priv *priv = dev->data->dev_private;
495 struct mlx5_port_config *config = &priv->config;
496 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
497 unsigned int diff = 0, olx = 0, i, m;
500 if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
501 /* We should support Multi-Segment Packets. */
502 olx |= MLX5_TXOFF_CONFIG_MULTI;
504 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
505 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
506 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
507 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
508 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
509 /* We should support TCP Send Offload. */
510 olx |= MLX5_TXOFF_CONFIG_TSO;
512 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
513 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
514 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
515 /* We should support Software Parser for Tunnels. */
516 olx |= MLX5_TXOFF_CONFIG_SWP;
518 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
519 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
520 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
521 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
522 /* We should support IP/TCP/UDP Checksums. */
523 olx |= MLX5_TXOFF_CONFIG_CSUM;
525 if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
526 /* We should support VLAN insertion. */
527 olx |= MLX5_TXOFF_CONFIG_VLAN;
529 if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
530 rte_mbuf_dynflag_lookup
531 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
532 rte_mbuf_dynfield_lookup
533 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >= 0) {
534 /* Offload configured, dynamic entities registered. */
535 olx |= MLX5_TXOFF_CONFIG_TXPP;
537 if (priv->txqs_n && (*priv->txqs)[0]) {
538 struct mlx5_txq_data *txd = (*priv->txqs)[0];
540 if (txd->inlen_send) {
542 * Check the data inline requirements. Data inline
543 * is enabled on per device basis, we can check
544 * the first Tx queue only.
546 * If device does not support VLAN insertion in WQE
547 * and some queues are requested to perform VLAN
548 * insertion offload than inline must be enabled.
550 olx |= MLX5_TXOFF_CONFIG_INLINE;
553 if (config->mps == MLX5_MPW_ENHANCED &&
554 config->txq_inline_min <= 0) {
556 * The NIC supports Enhanced Multi-Packet Write
557 * and does not require minimal inline data.
559 olx |= MLX5_TXOFF_CONFIG_EMPW;
561 if (rte_flow_dynf_metadata_avail()) {
562 /* We should support Flow metadata. */
563 olx |= MLX5_TXOFF_CONFIG_METADATA;
565 if (config->mps == MLX5_MPW) {
567 * The NIC supports Legacy Multi-Packet Write.
568 * The MLX5_TXOFF_CONFIG_MPW controls the descriptor building
569 * method in combination with MLX5_TXOFF_CONFIG_EMPW.
571 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
572 MLX5_TXOFF_CONFIG_SWP |
573 MLX5_TXOFF_CONFIG_VLAN |
574 MLX5_TXOFF_CONFIG_METADATA)))
575 olx |= MLX5_TXOFF_CONFIG_EMPW |
576 MLX5_TXOFF_CONFIG_MPW;
579 * Scan the routines table to find the minimal
580 * satisfying routine with requested offloads.
582 m = RTE_DIM(txoff_func);
583 for (i = 0; i < RTE_DIM(txoff_func); i++) {
586 tmp = txoff_func[i].olx;
588 /* Meets requested offloads exactly.*/
592 if ((tmp & olx) != olx) {
593 /* Does not meet requested offloads at all. */
596 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
597 /* Do not enable legacy MPW if not configured. */
599 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
600 /* Do not enable eMPW if not configured. */
602 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
603 /* Do not enable inlining if not configured. */
605 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_TXPP)
606 /* Do not enable scheduling if not configured. */
609 * Some routine meets the requirements.
610 * Check whether it has minimal amount
611 * of not requested offloads.
613 tmp = __builtin_popcountl(tmp & ~olx);
614 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
615 /* First or better match, save and continue. */
621 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
622 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
623 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
624 /* Lighter not requested offload. */
629 if (m >= RTE_DIM(txoff_func)) {
630 DRV_LOG(DEBUG, "port %u has no selected Tx function"
631 " for requested offloads %04X",
632 dev->data->port_id, olx);
635 DRV_LOG(DEBUG, "port %u has selected Tx function"
636 " supporting offloads %04X/%04X",
637 dev->data->port_id, olx, txoff_func[m].olx);
638 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
639 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
640 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
641 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
642 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
643 DRV_LOG(DEBUG, "\tSWP (software parser)");
644 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
645 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
646 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
647 DRV_LOG(DEBUG, "\tINLIN (inline data)");
648 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
649 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
650 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
651 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
652 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TXPP)
653 DRV_LOG(DEBUG, "\tMETAD (tx Scheduling)");
654 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
655 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
656 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
658 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
660 return txoff_func[m].func;
664 * DPDK callback to get the TX queue information.
667 * Pointer to the device structure.
670 * Tx queue identificator.
673 * Pointer to the TX queue information structure.
679 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
680 struct rte_eth_txq_info *qinfo)
682 struct mlx5_priv *priv = dev->data->dev_private;
683 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
684 struct mlx5_txq_ctrl *txq_ctrl =
685 container_of(txq, struct mlx5_txq_ctrl, txq);
689 qinfo->nb_desc = txq->elts_s;
690 qinfo->conf.tx_thresh.pthresh = 0;
691 qinfo->conf.tx_thresh.hthresh = 0;
692 qinfo->conf.tx_thresh.wthresh = 0;
693 qinfo->conf.tx_rs_thresh = 0;
694 qinfo->conf.tx_free_thresh = 0;
695 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
696 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
700 * DPDK callback to get the TX packet burst mode information.
703 * Pointer to the device structure.
706 * Tx queue identification.
709 * Pointer to the burts mode information.
712 * 0 as success, -EINVAL as failure.
715 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
716 uint16_t tx_queue_id,
717 struct rte_eth_burst_mode *mode)
719 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
720 struct mlx5_priv *priv = dev->data->dev_private;
721 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
724 for (i = 0; i < RTE_DIM(txoff_func); i++) {
725 if (pkt_burst == txoff_func[i].func) {
726 olx = txoff_func[i].olx;
727 snprintf(mode->info, sizeof(mode->info),
728 "%s%s%s%s%s%s%s%s%s%s",
729 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
730 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
731 "Legacy MPW" : "Enhanced MPW") : "No MPW",
732 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
734 (olx & MLX5_TXOFF_CONFIG_TSO) ?
736 (olx & MLX5_TXOFF_CONFIG_SWP) ?
738 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
740 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
742 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
744 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
746 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
748 (txq && txq->fast_free) ?
749 " + Fast Free" : "");