4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5_hw.h>
46 #include <infiniband/arch.h>
48 #pragma GCC diagnostic error "-pedantic"
51 /* DPDK headers don't like -pedantic. */
53 #pragma GCC diagnostic ignored "-pedantic"
56 #include <rte_mempool.h>
57 #include <rte_prefetch.h>
58 #include <rte_common.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ether.h>
62 #pragma GCC diagnostic error "-pedantic"
66 #include "mlx5_utils.h"
67 #include "mlx5_rxtx.h"
68 #include "mlx5_autoconf.h"
69 #include "mlx5_defs.h"
72 static inline volatile struct mlx5_cqe64 *
73 get_cqe64(volatile struct mlx5_cqe cqes[],
74 unsigned int cqes_n, uint16_t *ci)
75 __attribute__((always_inline));
78 rx_poll_len(struct rxq *rxq) __attribute__((always_inline));
80 static volatile struct mlx5_cqe64 *
81 get_cqe64(volatile struct mlx5_cqe cqes[],
82 unsigned int cqes_n, uint16_t *ci)
84 volatile struct mlx5_cqe64 *cqe;
88 cqe = &cqes[idx & (cqes_n - 1)].cqe64;
90 if (unlikely((op_own & MLX5_CQE_OWNER_MASK) == !(idx & cqes_n))) {
92 } else if (unlikely(op_own & 0x80)) {
93 switch (op_own >> 4) {
94 case MLX5_CQE_INVALID:
95 return NULL; /* No CQE */
96 case MLX5_CQE_REQ_ERR:
98 case MLX5_CQE_RESP_ERR:
113 * Manage TX completions.
115 * When sending a burst, mlx5_tx_burst() posts several WRs.
116 * To improve performance, a completion event is only required once every
117 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
118 * for other WRs, but this information would not be used anyway.
121 * Pointer to TX queue structure.
124 txq_complete(struct txq *txq)
126 const unsigned int elts_n = txq->elts_n;
127 const unsigned int cqe_n = txq->cqe_n;
128 uint16_t elts_free = txq->elts_tail;
130 uint16_t cq_ci = txq->cq_ci;
131 unsigned int wqe_ci = (unsigned int)-1;
135 volatile struct mlx5_cqe64 *cqe;
137 cqe = get_cqe64(*txq->cqes, cqe_n, &cq_ci);
140 wqe_ci = ntohs(cqe->wqe_counter);
142 if (unlikely(wqe_ci == (unsigned int)-1))
145 elts_tail = (wqe_ci + 1) & (elts_n - 1);
147 struct rte_mbuf *elt = (*txq->elts)[elts_free];
148 unsigned int elts_free_next =
149 (elts_free + 1) & (elts_n - 1);
150 struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
154 memset(&(*txq->elts)[elts_free],
156 sizeof((*txq->elts)[elts_free]));
158 RTE_MBUF_PREFETCH_TO_FREE(elt_next);
159 /* Only one segment needs to be freed. */
160 rte_pktmbuf_free_seg(elt);
161 elts_free = elts_free_next;
162 } while (elts_free != elts_tail);
164 txq->elts_tail = elts_tail;
165 /* Update the consumer index. */
167 *txq->cq_db = htonl(cq_ci);
171 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
172 * the cloned mbuf is allocated is returned instead.
178 * Memory pool where data is located for given mbuf.
180 static struct rte_mempool *
181 txq_mb2mp(struct rte_mbuf *buf)
183 if (unlikely(RTE_MBUF_INDIRECT(buf)))
184 return rte_mbuf_from_indirect(buf)->pool;
188 static inline uint32_t
189 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
190 __attribute__((always_inline));
193 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
194 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
195 * remove an entry first.
198 * Pointer to TX queue structure.
200 * Memory Pool for which a Memory Region lkey must be returned.
203 * mr->lkey on success, (uint32_t)-1 on failure.
205 static inline uint32_t
206 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
209 uint32_t lkey = (uint32_t)-1;
211 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
212 if (unlikely(txq->mp2mr[i].mp == NULL)) {
213 /* Unknown MP, add a new MR for it. */
216 if (txq->mp2mr[i].mp == mp) {
217 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
218 assert(htonl(txq->mp2mr[i].mr->lkey) ==
220 lkey = txq->mp2mr[i].lkey;
224 if (unlikely(lkey == (uint32_t)-1))
225 lkey = txq_mp2mr_reg(txq, mp, i);
230 * Write a regular WQE.
233 * Pointer to TX queue structure.
235 * Pointer to the WQE to fill.
237 * Buffer data address.
241 * Memory region lkey.
244 mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe,
245 uintptr_t addr, uint32_t length, uint32_t lkey)
247 wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
248 wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
249 wqe->wqe.ctrl.data[3] = 0;
250 wqe->inl.eseg.rsvd0 = 0;
251 wqe->inl.eseg.rsvd1 = 0;
252 wqe->inl.eseg.mss = 0;
253 wqe->inl.eseg.rsvd2 = 0;
254 wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE);
255 /* Copy the first 16 bytes into inline header. */
256 rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
257 (uint8_t *)(uintptr_t)addr,
258 MLX5_ETH_INLINE_HEADER_SIZE);
259 addr += MLX5_ETH_INLINE_HEADER_SIZE;
260 length -= MLX5_ETH_INLINE_HEADER_SIZE;
261 /* Store remaining data in data segment. */
262 wqe->wqe.dseg.byte_count = htonl(length);
263 wqe->wqe.dseg.lkey = lkey;
264 wqe->wqe.dseg.addr = htonll(addr);
265 /* Increment consumer index. */
270 * Write a regular WQE with VLAN.
273 * Pointer to TX queue structure.
275 * Pointer to the WQE to fill.
277 * Buffer data address.
281 * Memory region lkey.
283 * VLAN field to insert in packet.
286 mlx5_wqe_write_vlan(struct txq *txq, volatile union mlx5_wqe *wqe,
287 uintptr_t addr, uint32_t length, uint32_t lkey,
290 uint32_t vlan = htonl(0x81000000 | vlan_tci);
292 wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
293 wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
294 wqe->wqe.ctrl.data[3] = 0;
295 wqe->inl.eseg.rsvd0 = 0;
296 wqe->inl.eseg.rsvd1 = 0;
297 wqe->inl.eseg.mss = 0;
298 wqe->inl.eseg.rsvd2 = 0;
299 wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE);
301 * Copy 12 bytes of source & destination MAC address.
302 * Copy 4 bytes of VLAN.
303 * Copy 2 bytes of Ether type.
305 rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
306 (uint8_t *)(uintptr_t)addr, 12);
307 rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 12),
308 &vlan, sizeof(vlan));
309 rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 16),
310 (uint8_t *)((uintptr_t)addr + 12), 2);
311 addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
312 length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
313 /* Store remaining data in data segment. */
314 wqe->wqe.dseg.byte_count = htonl(length);
315 wqe->wqe.dseg.lkey = lkey;
316 wqe->wqe.dseg.addr = htonll(addr);
317 /* Increment consumer index. */
322 * Ring TX queue doorbell.
325 * Pointer to TX queue structure.
328 mlx5_tx_dbrec(struct txq *txq)
330 uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
332 htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
333 htonl(txq->qp_num_8s),
338 *txq->qp_db = htonl(txq->wqe_ci);
339 /* Ensure ordering between DB record and BF copy. */
341 rte_mov16(dst, (uint8_t *)data);
342 txq->bf_offset ^= txq->bf_buf_size;
349 * Pointer to TX queue structure.
351 * CQE consumer index.
354 tx_prefetch_cqe(struct txq *txq, uint16_t ci)
356 volatile struct mlx5_cqe64 *cqe;
358 cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
363 * DPDK callback for TX.
366 * Generic pointer to TX queue structure.
368 * Packets to transmit.
370 * Number of packets in array.
373 * Number of packets successfully transmitted (<= pkts_n).
376 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
378 struct txq *txq = (struct txq *)dpdk_txq;
379 uint16_t elts_head = txq->elts_head;
380 const unsigned int elts_n = txq->elts_n;
383 volatile union mlx5_wqe *wqe;
384 struct rte_mbuf *buf;
386 if (unlikely(!pkts_n))
389 /* Prefetch first packet cacheline. */
390 tx_prefetch_cqe(txq, txq->cq_ci);
391 tx_prefetch_cqe(txq, txq->cq_ci + 1);
393 /* Start processing. */
395 max = (elts_n - (elts_head - txq->elts_tail));
399 assert(max <= elts_n);
400 /* Always leave one free entry in the ring. */
406 for (i = 0; (i != max); ++i) {
407 unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
412 wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
415 rte_prefetch0(pkts[i + 1]);
416 /* Retrieve buffer information. */
417 addr = rte_pktmbuf_mtod(buf, uintptr_t);
418 length = DATA_LEN(buf);
419 /* Update element. */
420 (*txq->elts)[elts_head] = buf;
421 /* Prefetch next buffer data. */
423 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1],
425 /* Retrieve Memory Region key for this memory pool. */
426 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
427 if (buf->ol_flags & PKT_TX_VLAN_PKT)
428 mlx5_wqe_write_vlan(txq, wqe, addr, length, lkey,
431 mlx5_wqe_write(txq, wqe, addr, length, lkey);
432 /* Request completion if needed. */
433 if (unlikely(--txq->elts_comp == 0)) {
434 wqe->wqe.ctrl.data[2] = htonl(8);
435 txq->elts_comp = txq->elts_comp_cd_init;
437 wqe->wqe.ctrl.data[2] = 0;
439 /* Should we enable HW CKSUM offload */
441 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
442 wqe->wqe.eseg.cs_flags =
443 MLX5_ETH_WQE_L3_CSUM |
444 MLX5_ETH_WQE_L4_CSUM;
446 wqe->wqe.eseg.cs_flags = 0;
448 #ifdef MLX5_PMD_SOFT_COUNTERS
449 /* Increment sent bytes counter. */
450 txq->stats.obytes += length;
452 elts_head = elts_head_next;
455 /* Take a shortcut if nothing must be sent. */
456 if (unlikely(i == 0))
458 #ifdef MLX5_PMD_SOFT_COUNTERS
459 /* Increment sent packets counter. */
460 txq->stats.opackets += i;
462 /* Ring QP doorbell. */
464 txq->elts_head = elts_head;
469 * Translate RX completion flags to packet type.
474 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
477 * Packet type for struct rte_mbuf.
479 static inline uint32_t
480 rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
483 uint8_t flags = cqe->l4_hdr_type_etc;
484 uint8_t info = cqe->rsvd0[0];
486 if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
489 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
492 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
495 IBV_EXP_CQ_RX_IPV4_PACKET,
496 RTE_PTYPE_INNER_L3_IPV4) |
498 IBV_EXP_CQ_RX_IPV6_PACKET,
499 RTE_PTYPE_INNER_L3_IPV6);
503 MLX5_CQE_L3_HDR_TYPE_IPV6,
506 MLX5_CQE_L3_HDR_TYPE_IPV4,
512 * Translate RX completion flags to offload flags.
515 * Pointer to RX queue structure.
520 * Offload flags (ol_flags) for struct rte_mbuf.
522 static inline uint32_t
523 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
525 uint32_t ol_flags = 0;
526 uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
527 uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
528 uint8_t info = cqe->rsvd0[0];
530 if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
531 (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
533 (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
534 PKT_RX_IP_CKSUM_BAD);
535 if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
536 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
537 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
538 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
540 (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
541 PKT_RX_L4_CKSUM_BAD);
543 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
544 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
547 if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
549 TRANSPOSE(~cqe->l4_hdr_type_etc,
550 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
551 PKT_RX_IP_CKSUM_BAD) |
552 TRANSPOSE(~cqe->l4_hdr_type_etc,
553 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
554 PKT_RX_L4_CKSUM_BAD);
559 * Get size of the next packet.
562 * RX queue to fetch packet from.
565 * Packet size in bytes.
567 static inline int __attribute__((always_inline))
568 rx_poll_len(struct rxq *rxq)
570 volatile struct mlx5_cqe64 *cqe;
572 cqe = get_cqe64(*rxq->cqes, rxq->elts_n, &rxq->cq_ci);
574 return ntohl(cqe->byte_cnt);
579 * DPDK callback for RX.
582 * Generic pointer to RX queue structure.
584 * Array to store received packets.
586 * Maximum number of packets in array.
589 * Number of packets successfully received (<= pkts_n).
592 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
594 struct rxq *rxq = dpdk_rxq;
595 unsigned int pkts_ret = 0;
597 unsigned int rq_ci = rxq->rq_ci;
598 const unsigned int elts_n = rxq->elts_n;
599 const unsigned int wqe_cnt = elts_n - 1;
601 for (i = 0; (i != pkts_n); ++i) {
602 unsigned int idx = rq_ci & wqe_cnt;
603 struct rte_mbuf *rep;
604 struct rte_mbuf *pkt;
606 volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
607 volatile struct mlx5_cqe64 *cqe =
608 &(*rxq->cqes)[rxq->cq_ci & wqe_cnt].cqe64;
610 pkt = (*rxq->elts)[idx];
612 rep = rte_mbuf_raw_alloc(rxq->mp);
613 if (unlikely(rep == NULL)) {
614 ++rxq->stats.rx_nombuf;
617 SET_DATA_OFF(rep, RTE_PKTMBUF_HEADROOM);
619 PORT(rep) = rxq->port_id;
621 len = rx_poll_len(rxq);
622 if (unlikely(len == 0)) {
623 rte_mbuf_refcnt_set(rep, 0);
624 __rte_mbuf_raw_free(rep);
628 * Fill NIC descriptor with the new buffer. The lkey and size
629 * of the buffers are already known, only the buffer address
632 wqe->addr = htonll((uintptr_t)rep->buf_addr +
633 RTE_PKTMBUF_HEADROOM);
634 (*rxq->elts)[idx] = rep;
635 /* Update pkt information. */
636 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
639 pkt->packet_type = rxq_cq_to_pkt_type(cqe);
640 pkt->ol_flags = rxq_cq_to_ol_flags(rxq, cqe);
642 if (cqe->l4_hdr_type_etc & MLX5_CQE_VLAN_STRIPPED) {
643 pkt->ol_flags |= PKT_RX_VLAN_PKT |
644 PKT_RX_VLAN_STRIPPED;
645 pkt->vlan_tci = ntohs(cqe->vlan_info);
647 if (rxq->crc_present)
648 len -= ETHER_CRC_LEN;
652 #ifdef MLX5_PMD_SOFT_COUNTERS
653 /* Increment bytes counter. */
654 rxq->stats.ibytes += len;
661 if (unlikely((i == 0) && (rq_ci == rxq->rq_ci)))
665 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
667 /* Update the consumer index. */
670 *rxq->cq_db = htonl(rxq->cq_ci);
672 *rxq->rq_db = htonl(rxq->rq_ci);
673 #ifdef MLX5_PMD_SOFT_COUNTERS
674 /* Increment packets counter. */
675 rxq->stats.ipackets += pkts_ret;
681 * Dummy DPDK callback for TX.
683 * This function is used to temporarily replace the real callback during
684 * unsafe control operations on the queue, or in case of error.
687 * Generic pointer to TX queue structure.
689 * Packets to transmit.
691 * Number of packets in array.
694 * Number of packets successfully transmitted (<= pkts_n).
697 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
706 * Dummy DPDK callback for RX.
708 * This function is used to temporarily replace the real callback during
709 * unsafe control operations on the queue, or in case of error.
712 * Generic pointer to RX queue structure.
714 * Array to store received packets.
716 * Maximum number of packets in array.
719 * Number of packets successfully received (<= pkts_n).
722 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)