X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.c;h=8399a914436d75c121ab021206a5e9a3ba67a522;hb=b268a3ee3c46833bd692aded0789b35e8d2d1a14;hp=017742859d8ce31e98e6b0eae08bdf63b6232e3e;hpb=9a7fa9f76d9eb94a7986338ae1898cd2cb4b6e4f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 017742859d..8399a91443 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -69,6 +69,35 @@ #include "mlx5_defs.h" #include "mlx5_prm.h" +static inline int +check_cqe(volatile struct mlx5_cqe *cqe, + unsigned int cqes_n, const uint16_t ci) + __attribute__((always_inline)); + +static inline void +txq_complete(struct txq *txq) __attribute__((always_inline)); + +static inline uint32_t +txq_mp2mr(struct txq *txq, struct rte_mempool *mp) + __attribute__((always_inline)); + +static inline void +mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe) + __attribute__((always_inline)); + +static inline uint32_t +rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) + __attribute__((always_inline)); + +static inline int +mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, + uint16_t cqe_cnt, uint32_t *rss_hash) + __attribute__((always_inline)); + +static inline uint32_t +rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) + __attribute__((always_inline)); + #ifndef NDEBUG /** @@ -98,11 +127,6 @@ check_cqe_seen(volatile struct mlx5_cqe *cqe) #endif /* NDEBUG */ -static inline int -check_cqe(volatile struct mlx5_cqe *cqe, - unsigned int cqes_n, const uint16_t ci) - __attribute__((always_inline)); - /** * Check whether CQE is valid. * @@ -170,9 +194,6 @@ tx_mlx5_wqe(struct txq *txq, uint16_t ci) return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE); } -static inline void -txq_complete(struct txq *txq) __attribute__((always_inline)); - /** * Manage TX completions. * @@ -217,8 +238,9 @@ txq_complete(struct txq *txq) } while (1); if (unlikely(cqe == NULL)) return; + txq->wqe_pi = ntohs(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) - tx_mlx5_wqe(txq, ntohs(cqe->wqe_counter)); + tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; assert(elts_tail < (1 << txq->wqe_n)); /* Free buffers. */ @@ -264,10 +286,6 @@ txq_mb2mp(struct rte_mbuf *buf) return buf->pool; } -static inline uint32_t -txq_mp2mr(struct txq *txq, struct rte_mempool *mp) - __attribute__((always_inline)); - /** * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, @@ -310,40 +328,96 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp) * * @param txq * Pointer to TX queue structure. + * @param wqe + * Pointer to the last WQE posted in the NIC. */ static inline void -mlx5_tx_dbrec(struct txq *txq) +mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe) { - uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset); - uint32_t data[4] = { - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s), - 0, - 0, - }; + uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); + volatile uint64_t *src = ((volatile uint64_t *)wqe); + rte_wmb(); *txq->qp_db = htonl(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); - memcpy(dst, (uint8_t *)data, 16); - txq->bf_offset ^= (1 << txq->bf_buf_size); + *dst = *src; } /** - * Prefetch a CQE. + * DPDK callback to check the status of a tx descriptor. * - * @param txq - * Pointer to TX queue structure. - * @param cqe_ci - * CQE consumer index. + * @param tx_queue + * The tx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. */ -static inline void -tx_prefetch_cqe(struct txq *txq, uint16_t ci) +int +mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct txq *txq = tx_queue; + const unsigned int elts_n = 1 << txq->elts_n; + const unsigned int elts_cnt = elts_n - 1; + unsigned int used; + + txq_complete(txq); + used = (txq->elts_head - txq->elts_tail) & elts_cnt; + if (offset < used) + return RTE_ETH_TX_DESC_FULL; + return RTE_ETH_TX_DESC_DONE; +} + +/** + * DPDK callback to check the status of a rx descriptor. + * + * @param rx_queue + * The rx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. + */ +int +mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) { + struct rxq *rxq = rx_queue; + struct rxq_zip *zip = &rxq->zip; volatile struct mlx5_cqe *cqe; + const unsigned int cqe_n = (1 << rxq->cqe_n); + const unsigned int cqe_cnt = cqe_n - 1; + unsigned int cq_ci; + unsigned int used; - cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)]; - rte_prefetch0(cqe); + /* if we are processing a compressed cqe */ + if (zip->ai) { + used = zip->cqe_cnt - zip->ca; + cq_ci = zip->cq_ci; + } else { + used = 0; + cq_ci = rxq->cq_ci; + } + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + while (check_cqe(cqe, cqe_n, cq_ci) == 0) { + int8_t op_own; + unsigned int n; + + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) + n = ntohl(cqe->byte_cnt); + else + n = 1; + cq_ci += n; + used += n; + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + } + used = RTE_MIN(used, (1U << rxq->elts_n) - 1); + if (offset < used) + return RTE_ETH_RX_DESC_DONE; + return RTE_ETH_RX_DESC_AVAIL; } /** @@ -368,6 +442,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int i = 0; unsigned int j = 0; unsigned int max; + uint16_t max_wqe; unsigned int comp; volatile struct mlx5_wqe_v *wqe = NULL; unsigned int segs_n = 0; @@ -377,22 +452,23 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); - tx_prefetch_cqe(txq, txq->cq_ci + 1); rte_prefetch0(*pkts); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); + if (unlikely(!max_wqe)) + return 0; do { volatile rte_v128u32_t *dseg = NULL; uint32_t length; unsigned int ds = 0; uintptr_t addr; uint64_t naddr; - uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE; - uint8_t ehdr[2]; + uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; + uint16_t ehdr; uint8_t cs_flags = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; @@ -412,6 +488,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) --segs_n; if (!segs_n) --pkts_n; + if (unlikely(--max_wqe == 0)) + break; wqe = (volatile struct mlx5_wqe_v *) tx_mlx5_wqe(txq, txq->wqe_ci); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); @@ -419,8 +497,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch0(*pkts); addr = rte_pktmbuf_mtod(buf, uintptr_t); length = DATA_LEN(buf); - ehdr[0] = ((uint8_t *)addr)[0]; - ehdr[1] = ((uint8_t *)addr)[1]; + ehdr = (((uint8_t *)addr)[1] << 8) | + ((uint8_t *)addr)[0]; #ifdef MLX5_PMD_SOFT_COUNTERS total_length = length; #endif @@ -441,69 +519,85 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; } raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; - /* - * Start by copying the Ethernet header minus the first two - * bytes which will be appended at the end of the Ethernet - * segment. - */ - memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, 16); - length -= MLX5_WQE_DWORD_SIZE; - addr += MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); - - memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE - 2 - - sizeof(vlan)), - &vlan, sizeof(vlan)); - addr -= sizeof(vlan); - length += sizeof(vlan); + unsigned int len = 2 * ETHER_ADDR_LEN - 2; + + addr += 2; + length -= 2; + /* Copy Destination and source mac address. */ + memcpy((uint8_t *)raw, ((uint8_t *)addr), len); + /* Copy VLAN. */ + memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan)); + /* Copy missing two bytes to end the DSeg. */ + memcpy((uint8_t *)raw + len + sizeof(vlan), + ((uint8_t *)addr) + len, 2); + addr += len + 2; + length -= (len + 2); + } else { + memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, + MLX5_WQE_DWORD_SIZE); + length -= pkt_inline_sz; + addr += pkt_inline_sz; } /* Inline if enough room. */ - if (txq->max_inline != 0) { + if (txq->max_inline) { uintptr_t end = (uintptr_t) (((uintptr_t)txq->wqes) + (1 << txq->wqe_n) * MLX5_WQE_SIZE); - uint16_t max_inline = - txq->max_inline * RTE_CACHE_LINE_SIZE; - uint16_t room; - - /* - * raw starts two bytes before the boundary to - * continue the above copy of packet data. - */ - raw += MLX5_WQE_DWORD_SIZE - 2; - room = end - (uintptr_t)raw; - if (room > max_inline) { - uintptr_t addr_end = (addr + max_inline) & - ~(RTE_CACHE_LINE_SIZE - 1); - uint16_t copy_b = ((addr_end - addr) > length) ? - length : - (addr_end - addr); + unsigned int max_inline = txq->max_inline * + RTE_CACHE_LINE_SIZE - + MLX5_WQE_DWORD_SIZE; + uintptr_t addr_end = (addr + max_inline) & + ~(RTE_CACHE_LINE_SIZE - 1); + unsigned int copy_b = (addr_end > addr) ? + RTE_MIN((addr_end - addr), length) : + 0; + + raw += MLX5_WQE_DWORD_SIZE; + if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { + /* + * One Dseg remains in the current WQE. To + * keep the computation positive, it is + * removed after the bytes to Dseg conversion. + */ + uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; + if (unlikely(max_wqe < n)) + break; + max_wqe -= n; rte_memcpy((void *)raw, (void *)addr, copy_b); addr += copy_b; length -= copy_b; pkt_inline_sz += copy_b; - /* Sanity check. */ - assert(addr <= addr_end); } /* - * 2 DWORDs consumed by the WQE header + 1 DSEG + + * 2 DWORDs consumed by the WQE header + ETH segment + * the size of the inline part of the packet. */ ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2); if (length > 0) { - dseg = (volatile rte_v128u32_t *) - ((uintptr_t)wqe + - (ds * MLX5_WQE_DWORD_SIZE)); - if ((uintptr_t)dseg >= end) + if (ds % (MLX5_WQE_SIZE / + MLX5_WQE_DWORD_SIZE) == 0) { + if (unlikely(--max_wqe == 0)) + break; + dseg = (volatile rte_v128u32_t *) + tx_mlx5_wqe(txq, txq->wqe_ci + + ds / 4); + } else { dseg = (volatile rte_v128u32_t *) - txq->wqes; + ((uintptr_t)wqe + + (ds * MLX5_WQE_DWORD_SIZE)); + } goto use_dseg; } else if (!segs_n) { goto next_pkt; } else { + /* dseg will be advance as part of next_seg */ + dseg = (volatile rte_v128u32_t *) + ((uintptr_t)wqe + + ((ds - 1) * MLX5_WQE_DWORD_SIZE)); goto next_seg; } } else { @@ -538,12 +632,12 @@ next_seg: */ assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE)); if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) { - unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) & - ((1 << txq->wqe_n) - 1); - + if (unlikely(--max_wqe == 0)) + break; dseg = (volatile rte_v128u32_t *) - tx_mlx5_wqe(txq, n); - rte_prefetch0(tx_mlx5_wqe(txq, n + 1)); + tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4); + rte_prefetch0(tx_mlx5_wqe(txq, + txq->wqe_ci + ds / 4 + 1)); } else { ++dseg; } @@ -583,8 +677,7 @@ next_pkt: 0, cs_flags, 0, - (ehdr[1] << 24) | (ehdr[0] << 16) | - htons(pkt_inline_sz), + (ehdr << 16) | htons(pkt_inline_sz), }; txq->wqe_ci += (ds + 3) / 4; #ifdef MLX5_PMD_SOFT_COUNTERS @@ -614,7 +707,7 @@ next_pkt: txq->stats.opackets += i; #endif /* Ring QP doorbell. */ - mlx5_tx_dbrec(txq); + mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe); txq->elts_head = elts_head; return i; } @@ -709,6 +802,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int i = 0; unsigned int j = 0; unsigned int max; + uint16_t max_wqe; unsigned int comp; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, @@ -717,7 +811,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ @@ -725,6 +818,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); + if (unlikely(!max_wqe)) + return 0; do { struct rte_mbuf *buf = *(pkts++); unsigned int elts_head_next; @@ -758,6 +854,14 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) (mpw.wqe->eseg.cs_flags != cs_flags))) mlx5_mpw_close(txq, &mpw); if (mpw.state == MLX5_MPW_STATE_CLOSED) { + /* + * Multi-Packet WQE consumes at most two WQE. + * mlx5_mpw_new() expects to be able to use such + * resources. + */ + if (unlikely(max_wqe < 2)) + break; + max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } @@ -822,7 +926,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Ring QP doorbell. */ if (mpw.state == MLX5_MPW_STATE_OPENED) mlx5_mpw_close(txq, &mpw); - mlx5_tx_dbrec(txq); + mlx5_tx_dbrec(txq, mpw.wqe); txq->elts_head = elts_head; return i; } @@ -913,16 +1017,28 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, unsigned int i = 0; unsigned int j = 0; unsigned int max; + uint16_t max_wqe; unsigned int comp; unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; + /* + * Compute the maximum number of WQE which can be consumed by inline + * code. + * - 2 DSEG for: + * - 1 control segment, + * - 1 Ethernet segment, + * - N Dseg from the inline request. + */ + const unsigned int wqe_inl_n = + ((2 * MLX5_WQE_DWORD_SIZE + + txq->max_inline * RTE_CACHE_LINE_SIZE) + + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE; if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ @@ -950,6 +1066,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, break; max -= segs_n; --pkts_n; + /* + * Compute max_wqe in case less WQE were consumed in previous + * iteration. + */ + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) @@ -975,9 +1096,20 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (mpw.state == MLX5_MPW_STATE_CLOSED) { if ((segs_n != 1) || (length > inline_room)) { + /* + * Multi-Packet WQE consumes at most two WQE. + * mlx5_mpw_new() expects to be able to use + * such resources. + */ + if (unlikely(max_wqe < 2)) + break; + max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } else { + if (unlikely(max_wqe < wqe_inl_n)) + break; + max_wqe -= wqe_inl_n; mlx5_mpw_inline_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } @@ -1042,12 +1174,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, rte_memcpy((void *)(uintptr_t)mpw.data.raw, (void *)addr, length); - mpw.data.raw += length; + + if (length == max) + mpw.data.raw = + (volatile void *)txq->wqes; + else + mpw.data.raw += length; } - if ((uintptr_t)mpw.data.raw == - (uintptr_t)tx_mlx5_wqe(txq, 1 << txq->wqe_n)) - mpw.data.raw = (volatile void *)txq->wqes; ++mpw.pkts_n; + mpw.total_len += length; ++j; if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) { mlx5_mpw_inline_close(txq, &mpw); @@ -1057,7 +1192,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, inline_room -= length; } } - mpw.total_len += length; elts_head = elts_head_next; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ @@ -1091,7 +1225,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, mlx5_mpw_inline_close(txq, &mpw); else if (mpw.state == MLX5_MPW_STATE_OPENED) mlx5_mpw_close(txq, &mpw); - mlx5_tx_dbrec(txq); + mlx5_tx_dbrec(txq, mpw.wqe); txq->elts_head = elts_head; return i; } @@ -1111,30 +1245,28 @@ static inline uint32_t rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) { uint32_t pkt_type; - uint8_t flags = cqe->l4_hdr_type_etc; + uint16_t flags = ntohs(cqe->hdr_type_etc); - if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) + if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) { pkt_type = - TRANSPOSE(flags, - MLX5_CQE_RX_OUTER_IPV4_PACKET, - RTE_PTYPE_L3_IPV4) | - TRANSPOSE(flags, - MLX5_CQE_RX_OUTER_IPV6_PACKET, - RTE_PTYPE_L3_IPV6) | TRANSPOSE(flags, MLX5_CQE_RX_IPV4_PACKET, - RTE_PTYPE_INNER_L3_IPV4) | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) | TRANSPOSE(flags, MLX5_CQE_RX_IPV6_PACKET, - RTE_PTYPE_INNER_L3_IPV6); - else + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN); + pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ? + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN); + } else { pkt_type = TRANSPOSE(flags, MLX5_CQE_L3_HDR_TYPE_IPV6, - RTE_PTYPE_L3_IPV6) | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) | TRANSPOSE(flags, MLX5_CQE_L3_HDR_TYPE_IPV4, - RTE_PTYPE_L3_IPV4); + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN); + } return pkt_type; } @@ -1161,6 +1293,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, struct rxq_zip *zip = &rxq->zip; uint16_t cqe_n = cqe_cnt + 1; int len = 0; + uint16_t idx, end; /* Process compressed data in the CQE and mini arrays. */ if (zip->ai) { @@ -1171,6 +1304,14 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, len = ntohl((*mc)[zip->ai & 7].byte_cnt); *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { + /* Invalidate consumed CQEs */ + idx = zip->ca; + end = zip->na; + while (idx != end) { + (*rxq->cqes)[idx & cqe_cnt].op_own = + MLX5_CQE_INVALIDATE; + ++idx; + } /* * Increment consumer index to skip the number of * CQEs consumed. Hardware leaves holes in the CQ @@ -1180,8 +1321,9 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, zip->na += 8; } if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { - uint16_t idx = rxq->cq_ci + 1; - uint16_t end = zip->cq_ci; + /* Invalidate the rest */ + idx = zip->ca; + end = zip->cq_ci; while (idx != end) { (*rxq->cqes)[idx & cqe_cnt].op_own = @@ -1217,7 +1359,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, * special case the second one is located 7 CQEs after * the initial CQE instead of 8 for subsequent ones. */ - zip->ca = rxq->cq_ci & cqe_cnt; + zip->ca = rxq->cq_ci; zip->na = zip->ca + 7; /* Compute the next non compressed CQE. */ --rxq->cq_ci; @@ -1226,6 +1368,13 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, len = ntohl((*mc)[0].byte_cnt); *rss_hash = ntohl((*mc)[0].rx_hash_result); zip->ai = 1; + /* Prefetch all the entries to be invalidated */ + idx = zip->ca; + end = zip->cq_ci; + while (idx != end) { + rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]); + ++idx; + } } else { len = ntohl(cqe->byte_cnt); *rss_hash = ntohl(cqe->rx_hash_res); @@ -1252,28 +1401,22 @@ static inline uint32_t rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK; - uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK; - - if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) || - (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6)) - ol_flags |= TRANSPOSE(cqe->hds_ip_ext, - MLX5_CQE_L3_OK, - PKT_RX_IP_CKSUM_GOOD); - if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) || - (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) || - (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) || - (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP)) - ol_flags |= TRANSPOSE(cqe->hds_ip_ext, - MLX5_CQE_L4_OK, - PKT_RX_L4_CKSUM_GOOD); + uint16_t flags = ntohs(cqe->hdr_type_etc); + + ol_flags = + TRANSPOSE(flags, + MLX5_CQE_RX_L3_HDR_VALID, + PKT_RX_IP_CKSUM_GOOD) | + TRANSPOSE(flags, + MLX5_CQE_RX_L4_HDR_VALID, + PKT_RX_L4_CKSUM_GOOD); if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) ol_flags |= - TRANSPOSE(cqe->l4_hdr_type_etc, - MLX5_CQE_RX_OUTER_IP_CSUM_OK, + TRANSPOSE(flags, + MLX5_CQE_RX_L3_HDR_VALID, PKT_RX_IP_CKSUM_GOOD) | - TRANSPOSE(cqe->l4_hdr_type_etc, - MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK, + TRANSPOSE(flags, + MLX5_CQE_RX_L4_HDR_VALID, PKT_RX_L4_CKSUM_GOOD); return ol_flags; } @@ -1362,15 +1505,17 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt->hash.rss = rss_hash_res; pkt->ol_flags = PKT_RX_RSS_HASH; } - if (rxq->mark && - ((cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_INVALID)) || - (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)))) { - pkt->hash.fdir.hi = - mlx5_flow_mark_get(cqe->sop_drop_qpn); - pkt->ol_flags &= ~PKT_RX_RSS_HASH; - pkt->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + if (rxq->mark && (cqe->sop_drop_qpn != + htonl(MLX5_FLOW_MARK_INVALID))) { + pkt->ol_flags |= PKT_RX_FDIR; + if (cqe->sop_drop_qpn != + htonl(MLX5_FLOW_MARK_DEFAULT)) { + uint32_t mark = cqe->sop_drop_qpn; + + pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->hash.fdir.hi = + mlx5_flow_mark_get(mark); + } } if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | rxq->crc_present) { @@ -1380,7 +1525,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); } - if (cqe->l4_hdr_type_etc & + if (ntohs(cqe->hdr_type_etc) & MLX5_CQE_VLAN_STRIPPED) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;