X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.c;h=7ca7b49415d006224127c6b377493b4d8b5e0efb;hb=84121f197187;hp=eccbbb954b56e298f18109c871670ba9169c4136;hpb=87011737b715ff7c3d4054777f702315d31b4982;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index eccbbb954b..7ca7b49415 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -55,6 +55,7 @@ #include #include #include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -62,6 +63,7 @@ #include "mlx5.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" #include "mlx5_defs.h" /** @@ -83,6 +85,7 @@ txq_complete(struct txq *txq) { unsigned int elts_comp = txq->elts_comp; unsigned int elts_tail = txq->elts_tail; + unsigned int elts_free = txq->elts_tail; const unsigned int elts_n = txq->elts_n; int wcs_n; @@ -92,7 +95,7 @@ txq_complete(struct txq *txq) DEBUG("%p: processing %u work requests completions", (void *)txq, elts_comp); #endif - wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp); + wcs_n = txq->poll_cnt(txq->cq, elts_comp); if (unlikely(wcs_n == 0)) return 0; if (unlikely(wcs_n < 0)) { @@ -109,11 +112,98 @@ txq_complete(struct txq *txq) elts_tail += wcs_n * txq->elts_comp_cd_init; if (elts_tail >= elts_n) elts_tail -= elts_n; + + while (elts_free != elts_tail) { + struct txq_elt *elt = &(*txq->elts)[elts_free]; + unsigned int elts_free_next = + (((elts_free + 1) == elts_n) ? 0 : elts_free + 1); + struct rte_mbuf *tmp = elt->buf; + struct txq_elt *elt_next = &(*txq->elts)[elts_free_next]; + +#ifndef NDEBUG + /* Poisoning. */ + memset(elt, 0x66, sizeof(*elt)); +#endif + RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + /* Faster than rte_pktmbuf_free(). */ + do { + struct rte_mbuf *next = NEXT(tmp); + + rte_pktmbuf_free_seg(tmp); + tmp = next; + } while (tmp != NULL); + elts_free = elts_free_next; + } + txq->elts_tail = elts_tail; txq->elts_comp = elts_comp; return 0; } +/* For best performance, this function should not be inlined. */ +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *) + __attribute__((noinline)); + +/** + * Register mempool as a memory region. + * + * @param pd + * Pointer to protection domain. + * @param mp + * Pointer to memory pool. + * + * @return + * Memory region pointer, NULL in case of error. + */ +struct ibv_mr * +mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + uintptr_t start = (uintptr_t)STAILQ_FIRST(&mp->mem_list)->addr; + uintptr_t end = start + STAILQ_FIRST(&mp->mem_list)->len; + unsigned int i; + + DEBUG("mempool %p area start=%p end=%p size=%zu", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + /* Round start and end to page boundary if found in memory segments. */ + for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { + uintptr_t addr = (uintptr_t)ms[i].addr; + size_t len = ms[i].len; + unsigned int align = ms[i].hugepage_sz; + + if ((start > addr) && (start < addr + len)) + start = RTE_ALIGN_FLOOR(start, align); + if ((end > addr) && (end < addr + len)) + end = RTE_ALIGN_CEIL(end, align); + } + DEBUG("mempool %p using start=%p end=%p size=%zu for MR", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + return ibv_reg_mr(pd, + (void *)start, + end - start, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); +} + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which + * the cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static struct rte_mempool * +txq_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_INDIRECT(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + /** * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, @@ -128,7 +218,7 @@ txq_complete(struct txq *txq) * mr->lkey on success, (uint32_t)-1 on failure. */ static uint32_t -txq_mp2mr(struct txq *txq, struct rte_mempool *mp) +txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) { unsigned int i; struct ibv_mr *mr; @@ -145,11 +235,9 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp) } } /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp); - mr = ibv_reg_mr(txq->priv->pd, - (void *)mp->elt_va_start, - (mp->elt_va_end - mp->elt_va_start), - (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); + DEBUG("%p: discovered new memory pool \"%s\" (%p)", + (void *)txq, mp->name, (const void *)mp); + mr = mlx5_mp2mr(txq->priv->pd, mp); if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", (void *)txq); @@ -160,7 +248,7 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp) DEBUG("%p: MR <-> MP table full, dropping oldest entry.", (void *)txq); --i; - claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); memmove(&txq->mp2mr[0], &txq->mp2mr[1], (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } @@ -168,11 +256,96 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp) txq->mp2mr[i].mp = mp; txq->mp2mr[i].mr = mr; txq->mp2mr[i].lkey = mr->lkey; - DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32, - (void *)txq, (void *)mp, txq->mp2mr[i].lkey); + DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, + (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey); return txq->mp2mr[i].lkey; } +struct txq_mp2mr_mbuf_check_data { + int ret; +}; + +/** + * Callback function for rte_mempool_obj_iter() to check whether a given + * mempool object looks like a mbuf. + * + * @param[in] mp + * The mempool pointer + * @param[in] arg + * Context data (struct txq_mp2mr_mbuf_check_data). Contains the + * return value. + * @param[in] obj + * Object address. + * @param index + * Object index, unused. + */ +static void +txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, + uint32_t index __rte_unused) +{ + struct txq_mp2mr_mbuf_check_data *data = arg; + struct rte_mbuf *buf = obj; + + /* Check whether mbuf structure fits element size and whether mempool + * pointer is valid. */ + if (sizeof(*buf) > mp->elt_size || buf->pool != mp) + data->ret = -1; +} + +/** + * Iterator function for rte_mempool_walk() to register existing mempools and + * fill the MP to MR cache of a TX queue. + * + * @param[in] mp + * Memory Pool to register. + * @param *arg + * Pointer to TX queue structure. + */ +void +txq_mp2mr_iter(struct rte_mempool *mp, void *arg) +{ + struct txq *txq = arg; + struct txq_mp2mr_mbuf_check_data data = { + .ret = 0, + }; + + /* Register mempool only if the first element looks like a mbuf. */ + if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || + data.ret == -1) + return; + txq_mp2mr(txq, mp); +} + +/** + * Insert VLAN using mbuf headroom space. + * + * @param buf + * Buffer for VLAN insertion. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +insert_vlan_sw(struct rte_mbuf *buf) +{ + uintptr_t addr; + uint32_t vlan; + uint16_t head_room_len = rte_pktmbuf_headroom(buf); + + if (head_room_len < 4) + return EINVAL; + + addr = rte_pktmbuf_mtod(buf, uintptr_t); + vlan = htonl(0x81000000 | buf->vlan_tci); + memmove((void *)(addr - 4), (void *)addr, 12); + memcpy((void *)(addr + 8), &vlan, sizeof(vlan)); + + SET_DATA_OFF(buf, head_room_len - 4); + DATA_LEN(buf) += 4; + + return 0; +} + #if MLX5_PMD_SGE_WR_N > 1 /** @@ -254,7 +427,7 @@ tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, uint32_t lkey; /* Retrieve Memory Region key for this memory pool. */ - lkey = txq_mp2mr(txq, buf->pool); + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); if (unlikely(lkey == (uint32_t)-1)) { /* MR does not exist. */ DEBUG("%p: unable to get MP <-> MR association", @@ -307,6 +480,8 @@ tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, sge->length = size; sge->lkey = txq->mr_linear->lkey; sent_size += size; + /* Include last segment. */ + segs++; } return (struct tx_burst_sg_ret){ .length = sent_size, @@ -339,17 +514,19 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct txq *txq = (struct txq *)dpdk_txq; unsigned int elts_head = txq->elts_head; - const unsigned int elts_tail = txq->elts_tail; const unsigned int elts_n = txq->elts_n; unsigned int elts_comp_cd = txq->elts_comp_cd; unsigned int elts_comp = 0; unsigned int i; unsigned int max; int err; + struct rte_mbuf *buf = pkts[0]; assert(elts_comp_cd != 0); + /* Prefetch first packet cacheline. */ + rte_prefetch0(buf); txq_complete(txq); - max = (elts_n - (elts_head - elts_tail)); + max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; assert(max >= 1); @@ -361,75 +538,121 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max > pkts_n) max = pkts_n; for (i = 0; (i != max); ++i) { - struct rte_mbuf *buf = pkts[i]; + struct rte_mbuf *buf_next = pkts[i + 1]; unsigned int elts_head_next = (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); - struct txq_elt *elt_next = &(*txq->elts)[elts_head_next]; struct txq_elt *elt = &(*txq->elts)[elts_head]; unsigned int segs = NB_SEGS(buf); #ifdef MLX5_PMD_SOFT_COUNTERS unsigned int sent_size = 0; #endif uint32_t send_flags = 0; +#ifdef HAVE_VERBS_VLAN_INSERTION + int insert_vlan = 0; +#endif /* HAVE_VERBS_VLAN_INSERTION */ - /* Clean up old buffer. */ - if (likely(elt->buf != NULL)) { - struct rte_mbuf *tmp = elt->buf; - - /* Faster than rte_pktmbuf_free(). */ - do { - struct rte_mbuf *next = NEXT(tmp); - - rte_pktmbuf_free_seg(tmp); - tmp = next; - } while (tmp != NULL); - } + if (i + 1 < max) + rte_prefetch0(buf_next); /* Request TX completion. */ if (unlikely(--elts_comp_cd == 0)) { elts_comp_cd = txq->elts_comp_cd_init; ++elts_comp; send_flags |= IBV_EXP_QP_BURST_SIGNALED; } + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + send_flags |= IBV_EXP_QP_BURST_IP_CSUM; + /* HW does not support checksum offloads at arbitrary + * offsets but automatically recognizes the packet + * type. For inner L3/L4 checksums, only VXLAN (UDP) + * tunnels are currently supported. */ + if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) + send_flags |= IBV_EXP_QP_BURST_TUNNEL; + } + if (buf->ol_flags & PKT_TX_VLAN_PKT) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (!txq->priv->mps) + insert_vlan = 1; + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + { + err = insert_vlan_sw(buf); + if (unlikely(err)) + goto stop; + } + } if (likely(segs == 1)) { uintptr_t addr; uint32_t length; uint32_t lkey; + uintptr_t buf_next_addr; /* Retrieve buffer information. */ addr = rte_pktmbuf_mtod(buf, uintptr_t); length = DATA_LEN(buf); - /* Retrieve Memory Region key for this memory pool. */ - lkey = txq_mp2mr(txq, buf->pool); - if (unlikely(lkey == (uint32_t)-1)) { - /* MR does not exist. */ - DEBUG("%p: unable to get MP <-> MR" - " association", (void *)txq); - /* Clean up TX element. */ - elt->buf = NULL; - goto stop; - } /* Update element. */ elt->buf = buf; if (txq->priv->vf) rte_prefetch0((volatile void *) (uintptr_t)addr); - RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + /* Prefetch next buffer data. */ + if (i + 1 < max) { + buf_next_addr = + rte_pktmbuf_mtod(buf_next, uintptr_t); + rte_prefetch0((volatile void *) + (uintptr_t)buf_next_addr); + } /* Put packet into send queue. */ #if MLX5_PMD_MAX_INLINE > 0 - if (length <= txq->max_inline) - err = txq->if_qp->send_pending_inline - (txq->qp, - (void *)addr, - length, - send_flags); - else + if (length <= txq->max_inline) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_inline_vlan + (txq->qp, + (void *)addr, + length, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_inline + (txq->qp, + (void *)addr, + length, + send_flags); + } else #endif - err = txq->if_qp->send_pending - (txq->qp, - addr, - length, - lkey, - send_flags); + { + /* Retrieve Memory Region key for this + * memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR" + " association", (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_vlan + (txq->qp, + addr, + length, + lkey, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending + (txq->qp, + addr, + length, + lkey, + send_flags); + } if (unlikely(err)) goto stop; #ifdef MLX5_PMD_SOFT_COUNTERS @@ -444,13 +667,22 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) &sges); if (ret.length == (unsigned int)-1) goto stop; - RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); /* Put SG list into send queue. */ - err = txq->if_qp->send_pending_sg_list - (txq->qp, - sges, - ret.num, - send_flags); +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_sg_list_vlan + (txq->qp, + sges, + ret.num, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_sg_list + (txq->qp, + sges, + ret.num, + send_flags); if (unlikely(err)) goto stop; #ifdef MLX5_PMD_SOFT_COUNTERS @@ -463,6 +695,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) #endif /* MLX5_PMD_SGE_WR_N > 1 */ } elts_head = elts_head_next; + buf = buf_next; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ txq->stats.obytes += sent_size; @@ -477,7 +710,7 @@ stop: txq->stats.opackets += i; #endif /* Ring QP doorbell. */ - err = txq->if_qp->send_flush(txq->qp); + err = txq->send_flush(txq->qp); if (unlikely(err)) { /* A nonzero value is not supposed to be returned. * Nothing can be done about it. */ @@ -490,6 +723,97 @@ stop: return i; } +/** + * Translate RX completion flags to packet type. + * + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @note: fix mlx5_dev_supported_ptypes_get() if any change here. + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(uint32_t flags) +{ + uint32_t pkt_type; + + if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, + RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, + RTE_PTYPE_L3_IPV6) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, + RTE_PTYPE_INNER_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, + RTE_PTYPE_INNER_L3_IPV6); + else + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, + RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, + RTE_PTYPE_L3_IPV6); + return pkt_type; +} + +/** + * Translate RX completion flags to offload flags. + * + * @param[in] rxq + * Pointer to RX queue structure. + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @return + * Offload flags (ol_flags) for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) +{ + uint32_t ol_flags = 0; + + if (rxq->csum) { + /* Set IP checksum flag only for IPv4/IPv6 packets. */ + if (flags & + (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET)) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD); +#ifdef HAVE_EXP_CQ_RX_TCP_PACKET + /* Set L4 checksum flag only for TCP/UDP packets. */ + if (flags & + (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET)) +#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */ + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + } + /* + * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place + * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional + * (its value is 0). + */ + if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD) | + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + return ol_flags; +} + /** * DPDK callback for RX with scattered packets support. * @@ -510,9 +834,6 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; const unsigned int elts_n = rxq->elts_n; unsigned int elts_head = rxq->elts_head; - struct ibv_recv_wr head; - struct ibv_recv_wr **next = &head.next; - struct ibv_recv_wr *bad_wr; unsigned int i; unsigned int pkts_ret = 0; int ret; @@ -523,8 +844,6 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) return 0; for (i = 0; (i != pkts_n); ++i) { struct rxq_elt_sp *elt = &(*elts)[elts_head]; - struct ibv_recv_wr *wr = &elt->wr; - uint64_t wr_id = wr->wr_id; unsigned int len; unsigned int pkt_buf_len; struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */ @@ -532,18 +851,12 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM; unsigned int j = 0; uint32_t flags; + uint16_t vlan_tci; /* Sanity checks. */ -#ifdef NDEBUG - (void)wr_id; -#endif - assert(wr_id < rxq->elts_n); - assert(wr->sg_list == elt->sges); - assert(wr->num_sge == RTE_DIM(elt->sges)); assert(elts_head < rxq->elts_n); assert(rxq->elts_head < rxq->elts_n); - ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL, - &flags); + ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); if (unlikely(ret < 0)) { struct ibv_wc wc; int wcs_n; @@ -570,20 +883,15 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Increment dropped packets counter. */ ++rxq->stats.idropped; #endif - /* Link completed WRs together for repost. */ - *next = wr; - next = &wr->next; goto repost; } ret = wc.byte_len; } if (ret == 0) break; - len = ret; + assert(ret >= (rxq->crc_present << 2)); + len = ret - (rxq->crc_present << 2); pkt_buf_len = len; - /* Link completed WRs together for repost. */ - *next = wr; - next = &wr->next; /* * Replace spent segments with new ones, concatenate and * return them as pkt_buf. @@ -594,20 +902,20 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) struct rte_mbuf *rep; unsigned int seg_tailroom; + assert(seg != NULL); /* * Fetch initial bytes of packet descriptor into a * cacheline while allocating rep. */ rte_prefetch0(seg); - rep = __rte_mbuf_raw_alloc(rxq->mp); + rep = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(rep == NULL)) { /* * Unable to allocate a replacement mbuf, * repost WR. */ - DEBUG("rxq=%p, wr_id=%" PRIu64 ":" - " can't allocate a new mbuf", - (void *)rxq, wr_id); + DEBUG("rxq=%p: can't allocate a new mbuf", + (void *)rxq); if (pkt_buf != NULL) { *pkt_buf_next = NULL; rte_pktmbuf_free(pkt_buf); @@ -669,6 +977,16 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) NB_SEGS(pkt_buf) = j; PORT(pkt_buf) = rxq->port_id; PKT_LEN(pkt_buf) = pkt_buf_len; + if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { + pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); + pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { + pkt_buf->ol_flags |= PKT_RX_VLAN_PKT; + pkt_buf->vlan_tci = vlan_tci; + } +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + } /* Return packet. */ *(pkts++) = pkt_buf; @@ -678,26 +996,20 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rxq->stats.ibytes += pkt_buf_len; #endif repost: + ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges)); + if (unlikely(ret)) { + /* Inability to repost WRs is fatal. */ + DEBUG("%p: recv_sg_list(): failed (ret=%d)", + (void *)rxq->priv, + ret); + abort(); + } if (++elts_head >= elts_n) elts_head = 0; continue; } if (unlikely(i == 0)) return 0; - *next = NULL; - /* Repost WRs. */ -#ifdef DEBUG_RECV - DEBUG("%p: reposting %d WRs", (void *)rxq, i); -#endif - ret = ibv_post_recv(rxq->qp, head.next, &bad_wr); - if (unlikely(ret)) { - /* Inability to repost WRs is fatal. */ - DEBUG("%p: ibv_post_recv(): failed for WR %p: %s", - (void *)rxq->priv, - (void *)bad_wr, - strerror(ret)); - abort(); - } rxq->elts_head = elts_head; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ @@ -739,18 +1051,14 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n); for (i = 0; (i != pkts_n); ++i) { struct rxq_elt *elt = &(*elts)[elts_head]; - struct ibv_recv_wr *wr = &elt->wr; - uint64_t wr_id = wr->wr_id; unsigned int len; - struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr - - WR_ID(wr_id).offset); + struct rte_mbuf *seg = elt->buf; struct rte_mbuf *rep; uint32_t flags; + uint16_t vlan_tci; /* Sanity checks. */ - assert(WR_ID(wr_id).id < rxq->elts_n); - assert(wr->sg_list == &elt->sge); - assert(wr->num_sge == 1); + assert(seg != NULL); assert(elts_head < rxq->elts_n); assert(rxq->elts_head < rxq->elts_n); /* @@ -759,8 +1067,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ rte_prefetch0(seg); rte_prefetch0(&seg->cacheline1); - ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL, - &flags); + ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); if (unlikely(ret < 0)) { struct ibv_wc wc; int wcs_n; @@ -795,16 +1102,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } if (ret == 0) break; - len = ret; - rep = __rte_mbuf_raw_alloc(rxq->mp); + assert(ret >= (rxq->crc_present << 2)); + len = ret - (rxq->crc_present << 2); + rep = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(rep == NULL)) { /* * Unable to allocate a replacement mbuf, * repost WR. */ - DEBUG("rxq=%p, wr_id=%" PRIu32 ":" - " can't allocate a new mbuf", - (void *)rxq, WR_ID(wr_id).id); + DEBUG("rxq=%p: can't allocate a new mbuf", + (void *)rxq); /* Increment out of memory counters. */ ++rxq->stats.rx_nombuf; ++rxq->priv->dev->data->rx_mbuf_alloc_failed; @@ -814,10 +1121,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Reconfigure sge to use rep instead of seg. */ elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM; assert(elt->sge.lkey == rxq->mr->lkey); - WR_ID(wr->wr_id).offset = - (((uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM) - - (uintptr_t)rep); - assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id); + elt->buf = rep; /* Add SGE to array for repost. */ sges[i] = elt->sge; @@ -829,7 +1133,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) NEXT(seg) = NULL; PKT_LEN(seg) = len; DATA_LEN(seg) = len; - + if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { + seg->packet_type = rxq_cq_to_pkt_type(flags); + seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { + seg->ol_flags |= PKT_RX_VLAN_PKT; + seg->vlan_tci = vlan_tci; + } +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + } /* Return packet. */ *(pkts++) = seg; ++pkts_ret; @@ -848,7 +1161,7 @@ repost: #ifdef DEBUG_RECV DEBUG("%p: reposting %u WRs", (void *)rxq, i); #endif - ret = rxq->if_qp->recv_burst(rxq->qp, sges, i); + ret = rxq->recv(rxq->wq, sges, i); if (unlikely(ret)) { /* Inability to repost WRs is fatal. */ DEBUG("%p: recv_burst(): failed (ret=%d)",