X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.c;h=5ac63da8039d17cfb5ce4fd1b465dc4d5e3c7575;hb=1b7b9f170fcebbbd0708fab554dcb5a7badef8cf;hp=37a2084b70d732a0c1069c82e414bf38864a3503;hpb=cacb44a09962fb91031f00c8693a09129ce4e9bf;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 37a2084b70..5ac63da803 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -3,7 +3,6 @@ * Copyright 2015-2019 Mellanox Technologies, Ltd */ -#include #include #include #include @@ -537,6 +536,89 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) return RTE_ETH_RX_DESC_AVAIL; } +/** + * DPDK callback to get the RX queue information + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * Rx queue identificator. + * + * @param qinfo + * Pointer to the RX queue information structure. + * + * @return + * None. + */ + +void +mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + if (!rxq) + return; + qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq->mprq_mp : rxq->mp; + qinfo->conf.rx_thresh.pthresh = 0; + qinfo->conf.rx_thresh.hthresh = 0; + qinfo->conf.rx_thresh.wthresh = 0; + qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh; + qinfo->conf.rx_drop_en = 1; + qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1; + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = 1 << rxq->elts_n; +} + +/** + * DPDK callback to get the RX packet burst mode information + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * Rx queue identificatior. + * + * @param mode + * Pointer to the burts mode information. + * + * @return + * 0 as success, -EINVAL as failure. + */ + +int +mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, + uint16_t rx_queue_id __rte_unused, + struct rte_eth_burst_mode *mode) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + + if (pkt_burst == mlx5_rx_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", "Scalar"); + } else if (pkt_burst == mlx5_rx_burst_mprq) { + snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ"); + } else if (pkt_burst == mlx5_rx_burst_vec) { +#if defined RTE_ARCH_X86_64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE"); +#elif defined RTE_ARCH_ARM64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon"); +#elif defined RTE_ARCH_PPC_64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec"); +#else + return -EINVAL; +#endif + } else { + return -EINVAL; + } + return 0; +} + /** * DPDK callback to get the number of used descriptors in a RX queue * @@ -784,7 +866,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) byte_count = DATA_LEN(buf); } /* scat->addr must be able to store a pointer. */ - assert(sizeof(scat->addr) >= sizeof(uintptr_t)); + MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ .addr = rte_cpu_to_be_64(addr), .byte_count = rte_cpu_to_be_32(byte_count), @@ -1325,7 +1407,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; } while (pkt != seg) { - assert(pkt != (*rxq->elts)[idx]); + MLX5_ASSERT(pkt != (*rxq->elts)[idx]); rep = NEXT(pkt); NEXT(pkt) = NULL; NB_SEGS(pkt) = 1; @@ -1342,7 +1424,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; } pkt = seg; - assert(len >= (rxq->crc_present << 2)); + MLX5_ASSERT(len >= (rxq->crc_present << 2)); pkt->ol_flags &= EXT_ATTACHED_MBUF; /* If compressed, take hash result from mini-CQE. */ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? @@ -1533,7 +1615,7 @@ mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; void *addr; - assert(rep != NULL); + MLX5_ASSERT(rep != NULL); /* Replace MPRQ buf. */ (*rxq->mprq_bufs)[rq_idx] = rep; /* Replace WQE. */ @@ -1623,7 +1705,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) byte_cnt = ret; strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> MLX5_MPRQ_STRIDE_NUM_SHIFT; - assert(strd_cnt); + MLX5_ASSERT(strd_cnt); consumed_strd += strd_cnt; if (byte_cnt & MLX5_MPRQ_FILLER_MASK) continue; @@ -1634,8 +1716,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /* mini-CQE for MPRQ doesn't have hash result. */ strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); } - assert(strd_idx < strd_n); - assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); + MLX5_ASSERT(strd_idx < strd_n); + MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & + wq_mask)); lro_num_seg = cqe->lro_num_seg; /* * Currently configured to receive a packet per a stride. But if @@ -1654,7 +1737,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; } len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; - assert((int)len >= (rxq->crc_present << 2)); + MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); if (rxq->crc_present) len -= RTE_ETHER_CRC_LEN; offset = strd_idx * strd_sz + strd_shift; @@ -1684,8 +1767,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Increment the refcnt of the whole chunk. */ rte_atomic16_add_return(&buf->refcnt, 1); - assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= - strd_n + 1); + MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <= + strd_n + 1); buf_addr = RTE_PTR_SUB(addr, headroom_sz); /* * MLX5 device doesn't use iova but it is necessary in a @@ -1706,7 +1789,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) buf_len, shinfo); /* Set mbuf head-room. */ pkt->data_off = headroom_sz; - assert(pkt->ol_flags == EXT_ATTACHED_MBUF); + MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF); /* * Prevent potential overflow due to MTU change through * kernel interface. @@ -1872,8 +1955,8 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, * copying pointers to temporary array * for rte_mempool_put_bulk() calls. */ - assert(pkts); - assert(pkts_n); + MLX5_ASSERT(pkts); + MLX5_ASSERT(pkts_n); for (;;) { for (;;) { /* @@ -1882,7 +1965,7 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, */ mbuf = rte_pktmbuf_prefree_seg(*pkts); if (likely(mbuf != NULL)) { - assert(mbuf == *pkts); + MLX5_ASSERT(mbuf == *pkts); if (likely(n_free != 0)) { if (unlikely(pool != mbuf->pool)) /* From different pool. */ @@ -1919,9 +2002,9 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, * This loop is implemented to avoid multiple * inlining of rte_mempool_put_bulk(). */ - assert(pool); - assert(p_free); - assert(n_free); + MLX5_ASSERT(pool); + MLX5_ASSERT(p_free); + MLX5_ASSERT(n_free); /* * Free the array of pre-freed mbufs * belonging to the same memory pool. @@ -1969,8 +2052,8 @@ mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq, { uint16_t n_elts = tail - txq->elts_tail; - assert(n_elts); - assert(n_elts <= txq->elts_s); + MLX5_ASSERT(n_elts); + MLX5_ASSERT(n_elts <= txq->elts_s); /* * Implement a loop to support ring buffer wraparound * with single inlining of mlx5_tx_free_mbuf(). @@ -1980,8 +2063,8 @@ mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq, part = txq->elts_s - (txq->elts_tail & txq->elts_m); part = RTE_MIN(part, n_elts); - assert(part); - assert(part <= txq->elts_s); + MLX5_ASSERT(part); + MLX5_ASSERT(part <= txq->elts_s); mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m], part, olx); txq->elts_tail += part; @@ -2012,11 +2095,11 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, unsigned int part; struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts; - assert(pkts); - assert(pkts_n); + MLX5_ASSERT(pkts); + MLX5_ASSERT(pkts_n); part = txq->elts_s - (txq->elts_head & txq->elts_m); - assert(part); - assert(part <= txq->elts_s); + MLX5_ASSERT(part); + MLX5_ASSERT(part <= txq->elts_s); /* This code is a good candidate for vectorizing with SIMD. */ rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)), (void *)pkts, @@ -2052,7 +2135,7 @@ mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m]; if (likely(tail != txq->elts_tail)) { mlx5_tx_free_elts(txq, tail, olx); - assert(tail == txq->elts_tail); + MLX5_ASSERT(tail == txq->elts_tail); } } } @@ -2090,7 +2173,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { if (likely(ret != MLX5_CQE_STATUS_ERR)) { /* No new CQEs in completion queue. */ - assert(ret == MLX5_CQE_STATUS_HW_OWN); + MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN); break; } /* @@ -2122,8 +2205,9 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, continue; } /* Normal transmit completion. */ - assert(ci != txq->cq_pi); - assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter); + MLX5_ASSERT(ci != txq->cq_pi); + MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) == + cqe->wqe_counter); ++ci; last_cqe = cqe; /* @@ -2178,6 +2262,7 @@ mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) { volatile struct mlx5_wqe *last = loc->wqe_last; + MLX5_ASSERT(last); txq->elts_comp = head; if (MLX5_TXOFF_CONFIG(INLINE)) txq->wqe_comp = txq->wqe_ci; @@ -2185,14 +2270,14 @@ mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); /* Save elts_head in dedicated free on completion queue. */ -#ifdef NDEBUG - txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head; -#else +#ifdef RTE_LIBRTE_MLX5_DEBUG txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head | - (last->cseg.opcode >> 8) << 16; + (last->cseg.opcode >> 8) << 16; +#else + txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head; #endif /* A CQE slot must always be available. */ - assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s); + MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s); } } @@ -2306,7 +2391,7 @@ mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused, * We should get here only if device support * this feature correctly. */ - assert(txq->vlan_en); + MLX5_ASSERT(txq->vlan_en); es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT | loc->mbuf->vlan_tci); } else { @@ -2384,7 +2469,7 @@ mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused, loc->mbuf->vlan_tci); pdst += sizeof(struct rte_vlan_hdr); /* Copy the rest two bytes from packet data. */ - assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; } else { /* Fill the gap in the title WQEBB with inline data. */ @@ -2477,7 +2562,7 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq, loc->mbuf->vlan_tci); pdst += sizeof(struct rte_vlan_hdr); /* Copy the rest two bytes from packet data. */ - assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; psrc += sizeof(uint16_t); } else { @@ -2486,11 +2571,11 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq, psrc += sizeof(rte_v128u32_t); } pdst = (uint8_t *)(es + 2); - assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); - assert(pdst < (uint8_t *)txq->wqes_end); + MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end); inlen -= MLX5_ESEG_MIN_INLINE_SIZE; if (!inlen) { - assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); return (struct mlx5_wqe_dseg *)pdst; } /* @@ -2553,8 +2638,8 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, unsigned int part, dlen, copy = 0; uint8_t *psrc; - assert(len); - assert(must <= len); + MLX5_ASSERT(len); + MLX5_ASSERT(must <= len); do { /* Allow zero length packets, must check first. */ dlen = rte_pktmbuf_data_len(loc->mbuf); @@ -2564,8 +2649,8 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, loc->mbuf = mbuf->next; rte_pktmbuf_free_seg(mbuf); loc->mbuf_off = 0; - assert(loc->mbuf_nseg > 1); - assert(loc->mbuf); + MLX5_ASSERT(loc->mbuf_nseg > 1); + MLX5_ASSERT(loc->mbuf); --loc->mbuf_nseg; if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) { unsigned int diff; @@ -2604,7 +2689,7 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, loc->mbuf = mbuf->next; rte_pktmbuf_free_seg(mbuf); loc->mbuf_off = 0; - assert(loc->mbuf_nseg >= 1); + MLX5_ASSERT(loc->mbuf_nseg >= 1); --loc->mbuf_nseg; } return copy; @@ -2684,7 +2769,7 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq, sizeof(struct rte_vlan_hdr) + 2 * RTE_ETHER_ADDR_LEN), "invalid Ethernet Segment data size"); - assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); pdst = (uint8_t *)&es->inline_data; if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { /* Implement VLAN tag insertion as part inline data. */ @@ -2698,14 +2783,14 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq, pdst += sizeof(struct rte_vlan_hdr); tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr); } - assert(pdst < (uint8_t *)txq->wqes_end); + MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end); /* * The WQEBB space availability is checked by caller. * Here we should be aware of WQE ring buffer wraparound only. */ part = (uint8_t *)txq->wqes_end - pdst; part = RTE_MIN(part, inlen - tlen); - assert(part); + MLX5_ASSERT(part); do { unsigned int copy; @@ -2753,7 +2838,7 @@ mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq, unsigned int olx __rte_unused) { - assert(len); + MLX5_ASSERT(len); dseg->bcount = rte_cpu_to_be_32(len); dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); @@ -2789,7 +2874,7 @@ mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq, { uintptr_t dst, src; - assert(len); + MLX5_ASSERT(len); if (len > MLX5_DSEG_MIN_INLINE_SIZE) { dseg->bcount = rte_cpu_to_be_32(len); dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); @@ -2803,7 +2888,7 @@ mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq, src = (uintptr_t)buf; if (len & 0x08) { #ifdef RTE_ARCH_STRICT_ALIGN - assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t))); + MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t))); *(uint32_t *)dst = *(unaligned_uint32_t *)src; dst += sizeof(uint32_t); src += sizeof(uint32_t); @@ -2865,8 +2950,14 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, unsigned int part; uint8_t *pdst; - dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); - pdst = &dseg->inline_data[0]; + if (!MLX5_TXOFF_CONFIG(MPW)) { + /* Store the descriptor byte counter for eMPW sessions. */ + dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); + pdst = &dseg->inline_data[0]; + } else { + /* The entire legacy MPW session counter is stored on close. */ + pdst = (uint8_t *)dseg; + } /* * The WQEBB space availability is checked by caller. * Here we should be aware of WQE ring buffer wraparound only. @@ -2878,7 +2969,8 @@ mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, len -= part; if (likely(!len)) { pdst += part; - pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + if (!MLX5_TXOFF_CONFIG(MPW)) + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); /* Note: no final wraparound check here. */ return (struct mlx5_wqe_dseg *)pdst; } @@ -2922,19 +3014,26 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, unsigned int part; uint8_t *pdst; - assert(len > MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE); static_assert(MLX5_DSEG_MIN_INLINE_SIZE == (2 * RTE_ETHER_ADDR_LEN), "invalid Data Segment data size"); - dseg->bcount = rte_cpu_to_be_32((len + sizeof(struct rte_vlan_hdr)) | - MLX5_ETH_WQE_DATA_INLINE); - pdst = &dseg->inline_data[0]; + if (!MLX5_TXOFF_CONFIG(MPW)) { + /* Store the descriptor byte counter for eMPW sessions. */ + dseg->bcount = rte_cpu_to_be_32 + ((len + sizeof(struct rte_vlan_hdr)) | + MLX5_ETH_WQE_DATA_INLINE); + pdst = &dseg->inline_data[0]; + } else { + /* The entire legacy MPW session counter is stored on close. */ + pdst = (uint8_t *)dseg; + } memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); buf += MLX5_DSEG_MIN_INLINE_SIZE; pdst += MLX5_DSEG_MIN_INLINE_SIZE; len -= MLX5_DSEG_MIN_INLINE_SIZE; /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */ - assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) pdst = (uint8_t *)txq->wqes; *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) | @@ -2951,7 +3050,8 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, len -= part; if (likely(!len)) { pdst += part; - pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + if (!MLX5_TXOFF_CONFIG(MPW)) + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); /* Note: no final wraparound check here. */ return (struct mlx5_wqe_dseg *)pdst; } @@ -3002,7 +3102,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, struct mlx5_wqe_dseg *restrict dseg; unsigned int ds; - assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen); + MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen); loc->mbuf_nseg = NB_SEGS(loc->mbuf); loc->mbuf_off = 0; @@ -3023,8 +3123,8 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, * Non-zero offset means there are some data * remained in the packet. */ - assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf)); - assert(rte_pktmbuf_data_len(loc->mbuf)); + MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf)); + MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf)); dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, loc->mbuf_off); dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off; @@ -3036,7 +3136,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, dseg = (struct mlx5_wqe_dseg *)txq->wqes; mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx); /* Store the mbuf to be freed on completion. */ - assert(loc->elts_free); + MLX5_ASSERT(loc->elts_free); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; ++dseg; @@ -3062,7 +3162,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, (txq, loc, dseg, rte_pktmbuf_mtod(loc->mbuf, uint8_t *), rte_pktmbuf_data_len(loc->mbuf), olx); - assert(loc->elts_free); + MLX5_ASSERT(loc->elts_free); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; ++dseg; @@ -3129,7 +3229,7 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, inlen <= MLX5_ESEG_MIN_INLINE_SIZE || inlen > (dlen + vlan))) return MLX5_TXCMP_CODE_ERROR; - assert(inlen >= txq->inlen_mode); + MLX5_ASSERT(inlen >= txq->inlen_mode); /* * Check whether there are enough free WQEBBs: * - Control Segment @@ -3201,7 +3301,7 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, struct mlx5_wqe *restrict wqe; unsigned int ds, nseg; - assert(NB_SEGS(loc->mbuf) > 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); /* * No inline at all, it means the CPU cycles saving * is prioritized at configuration, we should not @@ -3308,8 +3408,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, struct mlx5_wqe *restrict wqe; unsigned int ds, inlen, dlen, vlan = 0; - assert(MLX5_TXOFF_CONFIG(INLINE)); - assert(NB_SEGS(loc->mbuf) > 1); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); /* * First calculate data length to be inlined * to estimate the required space for WQE. @@ -3321,7 +3421,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, /* Check against minimal length. */ if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) return MLX5_TXCMP_CODE_ERROR; - assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); if (inlen > txq->inlen_send || loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) { struct rte_mbuf *mbuf; @@ -3334,8 +3434,9 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, * inlining is required. */ if (txq->inlen_mode) { - assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); - assert(txq->inlen_mode <= txq->inlen_send); + MLX5_ASSERT(txq->inlen_mode >= + MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send); inlen = txq->inlen_mode; } else { if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE || @@ -3368,7 +3469,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, do { smlen = nxlen; mbuf = NEXT(mbuf); - assert(mbuf); + MLX5_ASSERT(mbuf); nxlen = rte_pktmbuf_data_len(mbuf); nxlen += smlen; } while (unlikely(nxlen < inlen)); @@ -3384,7 +3485,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, inlen = nxlen; mbuf = NEXT(mbuf); /* There should be not end of packet. */ - assert(mbuf); + MLX5_ASSERT(mbuf); nxlen = inlen + rte_pktmbuf_data_len(mbuf); } while (unlikely(nxlen < txq->inlen_send)); } @@ -3412,7 +3513,7 @@ do_align: * Estimate the number of Data Segments conservatively, * supposing no any mbufs is being freed during inlining. */ - assert(inlen <= txq->inlen_send); + MLX5_ASSERT(inlen <= txq->inlen_send); ds = NB_SEGS(loc->mbuf) + 2 + (inlen - MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WSEG_SIZE + @@ -3471,14 +3572,14 @@ mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, unsigned int olx) { - assert(loc->elts_free && loc->wqe_free); - assert(pkts_n > loc->pkts_sent); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; for (;;) { enum mlx5_txcmp_code ret; - assert(NB_SEGS(loc->mbuf) > 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); /* * Estimate the number of free elts quickly but * conservatively. Some segment may be fully inlined @@ -3518,7 +3619,7 @@ mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq, return MLX5_TXCMP_CODE_TSO; return MLX5_TXCMP_CODE_SINGLE; } - assert(false); + MLX5_ASSERT(false); } /** @@ -3560,8 +3661,8 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, unsigned int olx) { - assert(loc->elts_free && loc->wqe_free); - assert(pkts_n > loc->pkts_sent); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; for (;;) { @@ -3570,7 +3671,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, unsigned int ds, dlen, hlen, ntcp, vlan = 0; uint8_t *dptr; - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); dlen = rte_pktmbuf_data_len(loc->mbuf); if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { @@ -3654,7 +3755,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, return MLX5_TXCMP_CODE_SINGLE; /* Continue with the next TSO packet. */ } - assert(false); + MLX5_ASSERT(false); } /** @@ -3761,7 +3862,7 @@ mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused, return false; /* There must be no VLAN packets in eMPW loop. */ if (MLX5_TXOFF_CONFIG(VLAN)) - assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)); + MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)); return true; } @@ -3793,7 +3894,7 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, unsigned int slen, unsigned int olx __rte_unused) { - assert(!MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ txq->stats.obytes += slen; @@ -3821,6 +3922,8 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, * Total size of descriptor/data in bytes. * @param slen * Accumulated statistics, data bytes sent. + * @param wqem + * The base WQE for the eMPW/MPW descriptor. * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. @@ -3834,20 +3937,40 @@ mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, unsigned int len, unsigned int slen, + struct mlx5_wqe *restrict wqem, unsigned int olx __rte_unused) { - assert(MLX5_TXOFF_CONFIG(INLINE)); - assert((len % MLX5_WSEG_SIZE) == 0); + struct mlx5_wqe_dseg *dseg = &wqem->dseg[0]; + + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ txq->stats.obytes += slen; #else (void)slen; #endif - len = len / MLX5_WSEG_SIZE + 2; - loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); + if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) { + /* + * If the legacy MPW session contains the inline packets + * we should set the only inline data segment length + * and align the total length to the segment size. + */ + MLX5_ASSERT(len > sizeof(dseg->bcount)); + dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) | + MLX5_ETH_WQE_DATA_INLINE); + len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2; + } else { + /* + * The session is not legacy MPW or contains the + * data buffer pointer segments. + */ + MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0); + len = len / MLX5_WSEG_SIZE + 2; + } + wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); txq->wqe_ci += (len + 3) / 4; loc->wqe_free -= (len + 3) / 4; + loc->wqe_last = wqem; } /** @@ -3909,10 +4032,10 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, * and sends single-segment packet with eMPW opcode * without data inlining. */ - assert(!MLX5_TXOFF_CONFIG(INLINE)); - assert(MLX5_TXOFF_CONFIG(EMPW)); - assert(loc->elts_free && loc->wqe_free); - assert(pkts_n > loc->pkts_sent); + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW)); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; @@ -3924,7 +4047,7 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, unsigned int slen = 0; next_empw: - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? MLX5_MPW_MAX_PACKETS : MLX5_EMPW_MAX_PACKETS); @@ -3990,7 +4113,7 @@ next_empw: return MLX5_TXCMP_CODE_EXIT; return MLX5_TXCMP_CODE_MULTI; } - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); if (ret == MLX5_TXCMP_CODE_TSO) { part -= loop; mlx5_tx_sdone_empw(txq, loc, part, slen, olx); @@ -4008,7 +4131,7 @@ next_empw: return MLX5_TXCMP_CODE_SINGLE; } if (ret != MLX5_TXCMP_CODE_EMPW) { - assert(false); + MLX5_ASSERT(false); part -= loop; mlx5_tx_sdone_empw(txq, loc, part, slen, olx); return MLX5_TXCMP_CODE_ERROR; @@ -4022,7 +4145,7 @@ next_empw: * - packets length (legacy MPW only) */ if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) { - assert(loop); + MLX5_ASSERT(loop); part -= loop; mlx5_tx_sdone_empw(txq, loc, part, slen, olx); if (unlikely(!loc->elts_free || @@ -4037,8 +4160,8 @@ next_empw: dseg = (struct mlx5_wqe_dseg *)txq->wqes; } /* eMPW is built successfully, update loop parameters. */ - assert(!loop); - assert(pkts_n >= part); + MLX5_ASSERT(!loop); + MLX5_ASSERT(pkts_n >= part); #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ txq->stats.obytes += slen; @@ -4056,7 +4179,7 @@ next_empw: return ret; /* Continue sending eMPW batches. */ } - assert(false); + MLX5_ASSERT(false); } /** @@ -4075,21 +4198,21 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, * and sends single-segment packet with eMPW opcode * with data inlining. */ - assert(MLX5_TXOFF_CONFIG(INLINE)); - assert(MLX5_TXOFF_CONFIG(EMPW)); - assert(loc->elts_free && loc->wqe_free); - assert(pkts_n > loc->pkts_sent); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW)); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; for (;;) { struct mlx5_wqe_dseg *restrict dseg; - struct mlx5_wqe_eseg *restrict eseg; + struct mlx5_wqe *restrict wqem; enum mlx5_txcmp_code ret; unsigned int room, part, nlim; unsigned int slen = 0; - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); /* * Limits the amount of packets in one WQE * to improve CQE latency generation. @@ -4103,26 +4226,34 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, return MLX5_TXCMP_CODE_EXIT; if (likely(pkts_n > 1)) rte_prefetch0(*pkts); - loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); + wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m); /* * Build eMPW title WQEBB: * - Control Segment, eMPW opcode, zero DS * - Ethernet Segment, no inline */ - mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0, + mlx5_tx_cseg_init(txq, loc, wqem, 0, MLX5_OPCODE_ENHANCED_MPSW, olx); - mlx5_tx_eseg_none(txq, loc, loc->wqe_last, + mlx5_tx_eseg_none(txq, loc, wqem, olx & ~MLX5_TXOFF_CONFIG_VLAN); - eseg = &loc->wqe_last->eseg; - dseg = &loc->wqe_last->dseg[0]; + dseg = &wqem->dseg[0]; /* Store the packet length for legacy MPW. */ if (MLX5_TXOFF_CONFIG(MPW)) - eseg->mss = rte_cpu_to_be_16 - (rte_pktmbuf_data_len(loc->mbuf)); + wqem->eseg.mss = rte_cpu_to_be_16 + (rte_pktmbuf_data_len(loc->mbuf)); room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE, loc->wqe_free) * MLX5_WQE_SIZE - MLX5_WQE_CSEG_SIZE - MLX5_WQE_ESEG_SIZE; + /* Limit the room for legacy MPW sessions for performance. */ + if (MLX5_TXOFF_CONFIG(MPW)) + room = RTE_MIN(room, + RTE_MAX(txq->inlen_empw + + sizeof(dseg->bcount) + + (MLX5_TXOFF_CONFIG(VLAN) ? + sizeof(struct rte_vlan_hdr) : 0), + MLX5_MPW_INLINE_MAX_PACKETS * + MLX5_WQE_DSEG_SIZE)); /* Build WQE till we have space, packets and resources. */ part = room; for (;;) { @@ -4130,9 +4261,9 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); unsigned int tlen; - assert(room >= MLX5_WQE_DSEG_SIZE); - assert((room % MLX5_WQE_DSEG_SIZE) == 0); - assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end); + MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE); + MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0); + MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end); /* * Some Tx offloads may cause an error if * packet is not long enough, check against @@ -4146,15 +4277,36 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, * We have some successfully built * packet Data Segments to send. */ - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); return MLX5_TXCMP_CODE_ERROR; } /* Inline or not inline - that's the Question. */ if (dlen > txq->inlen_empw || loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) goto pointer_empw; + if (MLX5_TXOFF_CONFIG(MPW)) { + if (dlen > txq->inlen_send) + goto pointer_empw; + tlen = dlen; + if (part == room) { + /* Open new inline MPW session. */ + tlen += sizeof(dseg->bcount); + dseg->bcount = RTE_BE32(0); + dseg = RTE_PTR_ADD + (dseg, sizeof(dseg->bcount)); + } else { + /* + * No pointer and inline descriptor + * intermix for legacy MPW sessions. + */ + if (wqem->dseg[0].bcount) + break; + } + } else { + tlen = sizeof(dseg->bcount) + dlen; + } /* Inline entire packet, optional VLAN insertion. */ - tlen = sizeof(dseg->bcount) + dlen; if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { /* @@ -4162,8 +4314,9 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, * mlx5_tx_able_to_empw() and packet * fits into inline length guaranteed. */ - assert((dlen + sizeof(struct rte_vlan_hdr)) <= - txq->inlen_empw); + MLX5_ASSERT((dlen + + sizeof(struct rte_vlan_hdr)) <= + txq->inlen_empw); tlen += sizeof(struct rte_vlan_hdr); if (room < tlen) break; @@ -4179,8 +4332,9 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, dseg = mlx5_tx_dseg_empw(txq, loc, dseg, dptr, dlen, olx); } - tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE); - assert(room >= tlen); + if (!MLX5_TXOFF_CONFIG(MPW)) + tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE); + MLX5_ASSERT(room >= tlen); room -= tlen; /* * Packet data are completely inlined, @@ -4189,14 +4343,22 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, rte_pktmbuf_free_seg(loc->mbuf); goto next_mbuf; pointer_empw: + /* + * No pointer and inline descriptor + * intermix for legacy MPW sessions. + */ + if (MLX5_TXOFF_CONFIG(MPW) && + part != room && + wqem->dseg[0].bcount == RTE_BE32(0)) + break; /* * Not inlinable VLAN packets are * proceeded outside of this routine. */ - assert(room >= MLX5_WQE_DSEG_SIZE); + MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE); if (MLX5_TXOFF_CONFIG(VLAN)) - assert(!(loc->mbuf->ol_flags & - PKT_TX_VLAN_PKT)); + MLX5_ASSERT(!(loc->mbuf->ol_flags & + PKT_TX_VLAN_PKT)); mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); /* We have to store mbuf in elts.*/ txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; @@ -4217,7 +4379,8 @@ next_mbuf: * continue build descriptors. */ part -= room; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); return MLX5_TXCMP_CODE_EXIT; } loc->mbuf = *pkts++; @@ -4231,16 +4394,18 @@ next_mbuf: */ if (ret == MLX5_TXCMP_CODE_MULTI) { part -= room; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); if (unlikely(!loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; return MLX5_TXCMP_CODE_MULTI; } - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); if (ret == MLX5_TXCMP_CODE_TSO) { part -= room; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); if (unlikely(!loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; @@ -4248,16 +4413,18 @@ next_mbuf: } if (ret == MLX5_TXCMP_CODE_SINGLE) { part -= room; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); if (unlikely(!loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; return MLX5_TXCMP_CODE_SINGLE; } if (ret != MLX5_TXCMP_CODE_EMPW) { - assert(false); + MLX5_ASSERT(false); part -= room; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); return MLX5_TXCMP_CODE_ERROR; } /* Check if we have minimal room left. */ @@ -4272,7 +4439,8 @@ next_mbuf: * - software parser settings * - packets length (legacy MPW only) */ - if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) + if (!mlx5_tx_match_empw(txq, &wqem->eseg, + loc, dlen, olx)) break; /* Packet attributes match, continue the same eMPW. */ if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) @@ -4282,17 +4450,17 @@ next_mbuf: * We get here to close an existing eMPW * session and start the new one. */ - assert(pkts_n); + MLX5_ASSERT(pkts_n); part -= room; if (unlikely(!part)) return MLX5_TXCMP_CODE_EXIT; - mlx5_tx_idone_empw(txq, loc, part, slen, olx); + mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx); if (unlikely(!loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; /* Continue the loop with new eMPW session. */ } - assert(false); + MLX5_ASSERT(false); } /** @@ -4310,15 +4478,15 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, * Subroutine is the part of mlx5_tx_burst_single() * and sends single-segment packet with SEND opcode. */ - assert(loc->elts_free && loc->wqe_free); - assert(pkts_n > loc->pkts_sent); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; for (;;) { struct mlx5_wqe *restrict wqe; enum mlx5_txcmp_code ret; - assert(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); if (MLX5_TXOFF_CONFIG(INLINE)) { unsigned int inlen, vlan = 0; @@ -4338,7 +4506,8 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, * Otherwise we would do extra check for data * size to avoid crashes due to length overflow. */ - assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(txq->inlen_send >= + MLX5_ESEG_MIN_INLINE_SIZE); if (inlen <= txq->inlen_send) { unsigned int seg_n, wqe_n; @@ -4424,10 +4593,10 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, * WQE ring buffer to inline partially. */ single_min_inline: - assert(txq->inlen_send >= txq->inlen_mode); - assert(inlen > txq->inlen_mode); - assert(txq->inlen_mode >= - MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode); + MLX5_ASSERT(inlen > txq->inlen_mode); + MLX5_ASSERT(txq->inlen_mode >= + MLX5_ESEG_MIN_INLINE_SIZE); /* * Check whether there are enough free WQEBBs: * - Control Segment @@ -4470,7 +4639,7 @@ single_min_inline: txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; /* We have to store mbuf in elts.*/ - assert(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; @@ -4504,14 +4673,14 @@ single_part_inline: * comparing with txq->inlen_send. We should * not get overflow here. */ - assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE); dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE; mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1], dptr, dlen, olx); ++txq->wqe_ci; --loc->wqe_free; /* We have to store mbuf in elts.*/ - assert(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; @@ -4549,7 +4718,7 @@ single_no_inline: * if no inlining is configured, this is done * by calling routine in a batch copy. */ - assert(!MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); --loc->elts_free; #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ @@ -4571,7 +4740,7 @@ single_no_inline: if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE)) return ret; } - assert(false); + MLX5_ASSERT(false); } static __rte_always_inline enum mlx5_txcmp_code @@ -4586,7 +4755,7 @@ mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq, ret = mlx5_tx_able_to_empw(txq, loc, olx, false); if (ret == MLX5_TXCMP_CODE_SINGLE) goto ordinary_send; - assert(ret == MLX5_TXCMP_CODE_EMPW); + MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW); for (;;) { /* Optimize for inline/no inline eMPW send. */ ret = (MLX5_TXOFF_CONFIG(INLINE)) ? @@ -4597,14 +4766,14 @@ mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq, if (ret != MLX5_TXCMP_CODE_SINGLE) return ret; /* The resources to send one packet should remain. */ - assert(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); ordinary_send: ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx); - assert(ret != MLX5_TXCMP_CODE_SINGLE); + MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE); if (ret != MLX5_TXCMP_CODE_EMPW) return ret; /* The resources to send one packet should remain. */ - assert(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); } } @@ -4638,8 +4807,8 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, enum mlx5_txcmp_code ret; unsigned int part; - assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); - assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); if (unlikely(!pkts_n)) return 0; loc.pkts_sent = 0; @@ -4665,10 +4834,10 @@ send_loop: * - data inlining into WQEs, one packet may require multiple * WQEBBs, the WQEs become the limiting factor. */ - assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); loc.elts_free = txq->elts_s - (uint16_t)(txq->elts_head - txq->elts_tail); - assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); loc.wqe_free = txq->wqe_s - (uint16_t)(txq->wqe_ci - txq->wqe_pi); if (unlikely(!loc.elts_free || !loc.wqe_free)) @@ -4690,7 +4859,7 @@ send_loop: * per WQE, do it in dedicated routine. */ enter_send_multi: - assert(loc.pkts_sent >= loc.pkts_copy); + MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy); part = loc.pkts_sent - loc.pkts_copy; if (!MLX5_TXOFF_CONFIG(INLINE) && part) { /* @@ -4704,7 +4873,7 @@ enter_send_multi: part, olx); loc.pkts_copy = loc.pkts_sent; } - assert(pkts_n > loc.pkts_sent); + MLX5_ASSERT(pkts_n > loc.pkts_sent); ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx); if (!MLX5_TXOFF_CONFIG(INLINE)) loc.pkts_copy = loc.pkts_sent; @@ -4746,7 +4915,7 @@ enter_send_multi: goto enter_send_tso; } /* We must not get here. Something is going wrong. */ - assert(false); + MLX5_ASSERT(false); txq->stats.oerrors++; break; } @@ -4760,8 +4929,8 @@ enter_send_multi: * in dedicated branch. */ enter_send_tso: - assert(NB_SEGS(loc.mbuf) == 1); - assert(pkts_n > loc.pkts_sent); + MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1); + MLX5_ASSERT(pkts_n > loc.pkts_sent); ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx); /* * These returned code checks are supposed @@ -4784,7 +4953,7 @@ enter_send_tso: goto enter_send_multi; } /* We must not get here. Something is going wrong. */ - assert(false); + MLX5_ASSERT(false); txq->stats.oerrors++; break; } @@ -4797,7 +4966,7 @@ enter_send_tso: * offloads are requested at SQ configuration time). */ enter_send_single: - assert(pkts_n > loc.pkts_sent); + MLX5_ASSERT(pkts_n > loc.pkts_sent); ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx); /* * These returned code checks are supposed @@ -4826,7 +4995,7 @@ enter_send_single: goto enter_send_tso; } /* We must not get here. Something is going wrong. */ - assert(false); + MLX5_ASSERT(false); txq->stats.oerrors++; break; } @@ -4836,7 +5005,8 @@ enter_send_single: * - doorbell the hardware * - copy the rest of mbufs to elts (if any) */ - assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) || + loc.pkts_sent >= loc.pkts_copy); /* Take a shortcut if nothing is sent. */ if (unlikely(loc.pkts_sent == loc.pkts_loop)) goto burst_exit; @@ -4889,8 +5059,8 @@ enter_send_single: mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx); loc.pkts_copy = loc.pkts_sent; } - assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); - assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); if (pkts_n > loc.pkts_sent) { /* * If burst size is large there might be no enough CQE @@ -5043,7 +5213,7 @@ MLX5_TXOFF_DECL(iv, /* * Generate routines with Legacy Multi-Packet Write support. - * This mode is supported by ConnectX-4LX only and imposes + * This mode is supported by ConnectX-4 Lx only and imposes * offload limitations, not supported: * - ACL/Flows (metadata are becoming meaningless) * - WQE Inline headers @@ -5261,7 +5431,7 @@ mlx5_select_tx_function(struct rte_eth_dev *dev) "invalid WQE Data Segment size"); static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE, "invalid WQE size"); - assert(priv); + MLX5_ASSERT(priv); if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { /* We should support Multi-Segment Packets. */ olx |= MLX5_TXOFF_CONFIG_MULTI; @@ -5409,3 +5579,92 @@ mlx5_select_tx_function(struct rte_eth_dev *dev) } return txoff_func[m].func; } + +/** + * DPDK callback to get the TX queue information + * + * @param dev + * Pointer to the device structure. + * + * @param tx_queue_id + * Tx queue identificator. + * + * @param qinfo + * Pointer to the TX queue information structure. + * + * @return + * None. + */ + +void +mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + + if (!txq) + return; + qinfo->nb_desc = txq->elts_s; + qinfo->conf.tx_thresh.pthresh = 0; + qinfo->conf.tx_thresh.hthresh = 0; + qinfo->conf.tx_thresh.wthresh = 0; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.tx_free_thresh = 0; + qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1; + qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; +} + +/** + * DPDK callback to get the TX packet burst mode information + * + * @param dev + * Pointer to the device structure. + * + * @param tx_queue_id + * Tx queue identificatior. + * + * @param mode + * Pointer to the burts mode information. + * + * @return + * 0 as success, -EINVAL as failure. + */ + +int +mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, + uint16_t tx_queue_id __rte_unused, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + unsigned int i, olx; + + for (i = 0; i < RTE_DIM(txoff_func); i++) { + if (pkt_burst == txoff_func[i].func) { + olx = txoff_func[i].olx; + snprintf(mode->info, sizeof(mode->info), + "%s%s%s%s%s%s%s%s", + (olx & MLX5_TXOFF_CONFIG_EMPW) ? + ((olx & MLX5_TXOFF_CONFIG_MPW) ? + "Legacy MPW" : "Enhanced MPW") : "No MPW", + (olx & MLX5_TXOFF_CONFIG_MULTI) ? + " + MULTI" : "", + (olx & MLX5_TXOFF_CONFIG_TSO) ? + " + TSO" : "", + (olx & MLX5_TXOFF_CONFIG_SWP) ? + " + SWP" : "", + (olx & MLX5_TXOFF_CONFIG_CSUM) ? + " + CSUM" : "", + (olx & MLX5_TXOFF_CONFIG_INLINE) ? + " + INLINE" : "", + (olx & MLX5_TXOFF_CONFIG_VLAN) ? + " + VLAN" : "", + (olx & MLX5_TXOFF_CONFIG_METADATA) ? + " + METADATA" : ""); + return 0; + } + } + return -EINVAL; +}