4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5_hw.h>
46 #include <infiniband/arch.h>
48 #pragma GCC diagnostic error "-pedantic"
51 /* DPDK headers don't like -pedantic. */
53 #pragma GCC diagnostic ignored "-pedantic"
56 #include <rte_mempool.h>
57 #include <rte_prefetch.h>
58 #include <rte_common.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_ether.h>
62 #pragma GCC diagnostic error "-pedantic"
66 #include "mlx5_utils.h"
67 #include "mlx5_rxtx.h"
68 #include "mlx5_autoconf.h"
69 #include "mlx5_defs.h"
72 static inline volatile struct mlx5_cqe64 *
73 get_cqe64(volatile struct mlx5_cqe cqes[],
74 unsigned int cqes_n, uint16_t *ci)
75 __attribute__((always_inline));
78 rx_poll_len(struct rxq *rxq) __attribute__((always_inline));
80 static volatile struct mlx5_cqe64 *
81 get_cqe64(volatile struct mlx5_cqe cqes[],
82 unsigned int cqes_n, uint16_t *ci)
84 volatile struct mlx5_cqe64 *cqe;
88 cqe = &cqes[idx & (cqes_n - 1)].cqe64;
90 if (unlikely((op_own & MLX5_CQE_OWNER_MASK) == !(idx & cqes_n))) {
92 } else if (unlikely(op_own & 0x80)) {
93 switch (op_own >> 4) {
94 case MLX5_CQE_INVALID:
95 return NULL; /* No CQE */
96 case MLX5_CQE_REQ_ERR:
98 case MLX5_CQE_RESP_ERR:
113 * Manage TX completions.
115 * When sending a burst, mlx5_tx_burst() posts several WRs.
116 * To improve performance, a completion event is only required once every
117 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
118 * for other WRs, but this information would not be used anyway.
121 * Pointer to TX queue structure.
124 * 0 on success, -1 on failure.
127 txq_complete(struct txq *txq)
129 unsigned int elts_comp = txq->elts_comp;
130 unsigned int elts_tail = txq->elts_tail;
131 unsigned int elts_free = txq->elts_tail;
132 const unsigned int elts_n = txq->elts_n;
135 if (unlikely(elts_comp == 0))
138 DEBUG("%p: processing %u work requests completions",
139 (void *)txq, elts_comp);
141 wcs_n = txq->poll_cnt(txq->cq, elts_comp);
142 if (unlikely(wcs_n == 0))
144 if (unlikely(wcs_n < 0)) {
145 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
150 assert(elts_comp <= txq->elts_comp);
152 * Assume WC status is successful as nothing can be done about it
155 elts_tail += wcs_n * txq->elts_comp_cd_init;
156 if (elts_tail >= elts_n)
159 while (elts_free != elts_tail) {
160 struct txq_elt *elt = &(*txq->elts)[elts_free];
161 unsigned int elts_free_next =
162 (((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
163 struct rte_mbuf *tmp = elt->buf;
164 struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
168 memset(elt, 0x66, sizeof(*elt));
170 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
171 /* Faster than rte_pktmbuf_free(). */
173 struct rte_mbuf *next = NEXT(tmp);
175 rte_pktmbuf_free_seg(tmp);
177 } while (tmp != NULL);
178 elts_free = elts_free_next;
181 txq->elts_tail = elts_tail;
182 txq->elts_comp = elts_comp;
187 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
188 * the cloned mbuf is allocated is returned instead.
194 * Memory pool where data is located for given mbuf.
196 static struct rte_mempool *
197 txq_mb2mp(struct rte_mbuf *buf)
199 if (unlikely(RTE_MBUF_INDIRECT(buf)))
200 return rte_mbuf_from_indirect(buf)->pool;
204 static inline uint32_t
205 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
206 __attribute__((always_inline));
209 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
210 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
211 * remove an entry first.
214 * Pointer to TX queue structure.
216 * Memory Pool for which a Memory Region lkey must be returned.
219 * mr->lkey on success, (uint32_t)-1 on failure.
221 static inline uint32_t
222 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
225 uint32_t lkey = (uint32_t)-1;
227 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
228 if (unlikely(txq->mp2mr[i].mp == NULL)) {
229 /* Unknown MP, add a new MR for it. */
232 if (txq->mp2mr[i].mp == mp) {
233 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
234 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
235 lkey = txq->mp2mr[i].lkey;
239 if (unlikely(lkey == (uint32_t)-1))
240 lkey = txq_mp2mr_reg(txq, mp, i);
245 * Insert VLAN using mbuf headroom space.
248 * Buffer for VLAN insertion.
251 * 0 on success, errno value on failure.
254 insert_vlan_sw(struct rte_mbuf *buf)
258 uint16_t head_room_len = rte_pktmbuf_headroom(buf);
260 if (head_room_len < 4)
263 addr = rte_pktmbuf_mtod(buf, uintptr_t);
264 vlan = htonl(0x81000000 | buf->vlan_tci);
265 memmove((void *)(addr - 4), (void *)addr, 12);
266 memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
268 SET_DATA_OFF(buf, head_room_len - 4);
275 * DPDK callback for TX.
278 * Generic pointer to TX queue structure.
280 * Packets to transmit.
282 * Number of packets in array.
285 * Number of packets successfully transmitted (<= pkts_n).
288 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
290 struct txq *txq = (struct txq *)dpdk_txq;
291 unsigned int elts_head = txq->elts_head;
292 const unsigned int elts_n = txq->elts_n;
293 unsigned int elts_comp_cd = txq->elts_comp_cd;
294 unsigned int elts_comp = 0;
298 struct rte_mbuf *buf = pkts[0];
300 assert(elts_comp_cd != 0);
301 /* Prefetch first packet cacheline. */
304 max = (elts_n - (elts_head - txq->elts_tail));
308 assert(max <= elts_n);
309 /* Always leave one free entry in the ring. */
315 for (i = 0; (i != max); ++i) {
316 struct rte_mbuf *buf_next = pkts[i + 1];
317 unsigned int elts_head_next =
318 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
319 struct txq_elt *elt = &(*txq->elts)[elts_head];
320 uint32_t send_flags = 0;
321 #ifdef HAVE_VERBS_VLAN_INSERTION
323 #endif /* HAVE_VERBS_VLAN_INSERTION */
327 uintptr_t buf_next_addr;
330 rte_prefetch0(buf_next);
331 /* Request TX completion. */
332 if (unlikely(--elts_comp_cd == 0)) {
333 elts_comp_cd = txq->elts_comp_cd_init;
335 send_flags |= IBV_EXP_QP_BURST_SIGNALED;
337 /* Should we enable HW CKSUM offload */
339 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
340 send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
341 /* HW does not support checksum offloads at arbitrary
342 * offsets but automatically recognizes the packet
343 * type. For inner L3/L4 checksums, only VXLAN (UDP)
344 * tunnels are currently supported. */
345 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
346 send_flags |= IBV_EXP_QP_BURST_TUNNEL;
348 if (buf->ol_flags & PKT_TX_VLAN_PKT) {
349 #ifdef HAVE_VERBS_VLAN_INSERTION
353 #endif /* HAVE_VERBS_VLAN_INSERTION */
355 err = insert_vlan_sw(buf);
360 /* Retrieve buffer information. */
361 addr = rte_pktmbuf_mtod(buf, uintptr_t);
362 length = DATA_LEN(buf);
363 /* Update element. */
365 if (txq->priv->sriov)
366 rte_prefetch0((volatile void *)
368 /* Prefetch next buffer data. */
371 rte_pktmbuf_mtod(buf_next, uintptr_t);
372 rte_prefetch0((volatile void *)
373 (uintptr_t)buf_next_addr);
375 /* Retrieve Memory Region key for this memory pool. */
376 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
377 if (unlikely(lkey == (uint32_t)-1)) {
378 /* MR does not exist. */
379 DEBUG("%p: unable to get MP <-> MR"
380 " association", (void *)txq);
381 /* Clean up TX element. */
385 #ifdef HAVE_VERBS_VLAN_INSERTION
387 err = txq->send_pending_vlan
395 #endif /* HAVE_VERBS_VLAN_INSERTION */
396 err = txq->send_pending
404 #ifdef MLX5_PMD_SOFT_COUNTERS
405 /* Increment sent bytes counter. */
406 txq->stats.obytes += length;
409 elts_head = elts_head_next;
412 /* Take a shortcut if nothing must be sent. */
413 if (unlikely(i == 0))
415 #ifdef MLX5_PMD_SOFT_COUNTERS
416 /* Increment sent packets counter. */
417 txq->stats.opackets += i;
419 /* Ring QP doorbell. */
420 err = txq->send_flush(txq->qp);
422 /* A nonzero value is not supposed to be returned.
423 * Nothing can be done about it. */
424 DEBUG("%p: send_flush() failed with error %d",
427 txq->elts_head = elts_head;
428 txq->elts_comp += elts_comp;
429 txq->elts_comp_cd = elts_comp_cd;
434 * Translate RX completion flags to packet type.
439 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
442 * Packet type for struct rte_mbuf.
444 static inline uint32_t
445 rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
448 uint8_t flags = cqe->l4_hdr_type_etc;
449 uint8_t info = cqe->rsvd0[0];
451 if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
454 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
457 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
460 IBV_EXP_CQ_RX_IPV4_PACKET,
461 RTE_PTYPE_INNER_L3_IPV4) |
463 IBV_EXP_CQ_RX_IPV6_PACKET,
464 RTE_PTYPE_INNER_L3_IPV6);
468 MLX5_CQE_L3_HDR_TYPE_IPV6,
471 MLX5_CQE_L3_HDR_TYPE_IPV4,
477 * Translate RX completion flags to offload flags.
480 * Pointer to RX queue structure.
485 * Offload flags (ol_flags) for struct rte_mbuf.
487 static inline uint32_t
488 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
490 uint32_t ol_flags = 0;
491 uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
492 uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
493 uint8_t info = cqe->rsvd0[0];
495 if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
496 (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
498 (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
499 PKT_RX_IP_CKSUM_BAD);
500 if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
501 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
502 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
503 (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
505 (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
506 PKT_RX_L4_CKSUM_BAD);
508 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
509 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
512 if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
514 TRANSPOSE(~cqe->l4_hdr_type_etc,
515 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
516 PKT_RX_IP_CKSUM_BAD) |
517 TRANSPOSE(~cqe->l4_hdr_type_etc,
518 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
519 PKT_RX_L4_CKSUM_BAD);
524 * Get size of the next packet.
527 * RX queue to fetch packet from.
530 * Packet size in bytes.
532 static inline int __attribute__((always_inline))
533 rx_poll_len(struct rxq *rxq)
535 volatile struct mlx5_cqe64 *cqe;
537 cqe = get_cqe64(*rxq->cqes, rxq->elts_n, &rxq->cq_ci);
539 return ntohl(cqe->byte_cnt);
544 * DPDK callback for RX.
547 * Generic pointer to RX queue structure.
549 * Array to store received packets.
551 * Maximum number of packets in array.
554 * Number of packets successfully received (<= pkts_n).
557 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
559 struct rxq *rxq = dpdk_rxq;
560 unsigned int pkts_ret = 0;
562 unsigned int rq_ci = rxq->rq_ci;
563 const unsigned int elts_n = rxq->elts_n;
564 const unsigned int wqe_cnt = elts_n - 1;
566 for (i = 0; (i != pkts_n); ++i) {
567 unsigned int idx = rq_ci & wqe_cnt;
568 struct rte_mbuf *rep;
569 struct rte_mbuf *pkt;
571 volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
572 volatile struct mlx5_cqe64 *cqe =
573 &(*rxq->cqes)[rxq->cq_ci & wqe_cnt].cqe64;
575 pkt = (*rxq->elts)[idx];
577 rep = rte_mbuf_raw_alloc(rxq->mp);
578 if (unlikely(rep == NULL)) {
579 ++rxq->stats.rx_nombuf;
582 SET_DATA_OFF(rep, RTE_PKTMBUF_HEADROOM);
584 PORT(rep) = rxq->port_id;
586 len = rx_poll_len(rxq);
587 if (unlikely(len == 0)) {
588 rte_mbuf_refcnt_set(rep, 0);
589 __rte_mbuf_raw_free(rep);
593 * Fill NIC descriptor with the new buffer. The lkey and size
594 * of the buffers are already known, only the buffer address
597 wqe->addr = htonll((uintptr_t)rep->buf_addr +
598 RTE_PKTMBUF_HEADROOM);
599 (*rxq->elts)[idx] = rep;
600 /* Update pkt information. */
601 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
604 pkt->packet_type = rxq_cq_to_pkt_type(cqe);
605 pkt->ol_flags = rxq_cq_to_ol_flags(rxq, cqe);
607 if (cqe->l4_hdr_type_etc & MLX5_CQE_VLAN_STRIPPED) {
608 pkt->ol_flags |= PKT_RX_VLAN_PKT |
609 PKT_RX_VLAN_STRIPPED;
610 pkt->vlan_tci = ntohs(cqe->vlan_info);
612 if (rxq->crc_present)
613 len -= ETHER_CRC_LEN;
617 #ifdef MLX5_PMD_SOFT_COUNTERS
618 /* Increment bytes counter. */
619 rxq->stats.ibytes += len;
626 if (unlikely((i == 0) && (rq_ci == rxq->rq_ci)))
630 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
632 /* Update the consumer index. */
635 *rxq->cq_db = htonl(rxq->cq_ci);
637 *rxq->rq_db = htonl(rxq->rq_ci);
638 #ifdef MLX5_PMD_SOFT_COUNTERS
639 /* Increment packets counter. */
640 rxq->stats.ipackets += pkts_ret;
646 * Dummy DPDK callback for TX.
648 * This function is used to temporarily replace the real callback during
649 * unsafe control operations on the queue, or in case of error.
652 * Generic pointer to TX queue structure.
654 * Packets to transmit.
656 * Number of packets in array.
659 * Number of packets successfully transmitted (<= pkts_n).
662 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
671 * Dummy DPDK callback for RX.
673 * This function is used to temporarily replace the real callback during
674 * unsafe control operations on the queue, or in case of error.
677 * Generic pointer to RX queue structure.
679 * Array to store received packets.
681 * Maximum number of packets in array.
684 * Number of packets successfully received (<= pkts_n).
687 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)