X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.c;h=7174ffc91c687f9b640417935c09c10f6c510faf;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=a4e15c5193651cfbf870f23a1550ef08718609f6;hpb=2e633f1f6d3d94c55b4381628c17e5ab910cba7e;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index a4e15c5193..7174ffc91c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -417,20 +417,17 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) } /** - * DPDK callback to check the status of a rx descriptor. + * Internal function to compute the number of used descriptors in an RX queue * - * @param rx_queue - * The rx queue. - * @param[in] offset - * The index of the descriptor in the ring. + * @param rxq + * The Rx queue. * * @return - * The status of the tx descriptor. + * The number of used rx descriptor. */ -int -mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) +static uint32_t +rx_queue_count(struct mlx5_rxq_data *rxq) { - struct mlx5_rxq_data *rxq = rx_queue; struct rxq_zip *zip = &rxq->zip; volatile struct mlx5_cqe *cqe; const unsigned int cqe_n = (1 << rxq->cqe_n); @@ -461,11 +458,72 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; } used = RTE_MIN(used, (1U << rxq->elts_n) - 1); - if (offset < used) + return used; +} + +/** + * DPDK callback to check the status of a rx descriptor. + * + * @param rx_queue + * The Rx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. + */ +int +mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct mlx5_rxq_data *rxq = rx_queue; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); + + if (dev->rx_pkt_burst != mlx5_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } + if (offset >= (1 << rxq->elts_n)) { + rte_errno = EINVAL; + return -rte_errno; + } + if (offset < rx_queue_count(rxq)) return RTE_ETH_RX_DESC_DONE; return RTE_ETH_RX_DESC_AVAIL; } +/** + * DPDK callback to get the number of used descriptors in a RX queue + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * The Rx queue. + * + * @return + * The number of used rx descriptor. + * -EINVAL if the queue is invalid + */ +uint32_t +mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq; + + if (dev->rx_pkt_burst != mlx5_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } + rxq = (*priv->rxqs)[rx_queue_id]; + if (!rxq) { + rte_errno = EINVAL; + return -rte_errno; + } + return rx_queue_count(rxq); +} + /** * DPDK callback for TX. * @@ -495,6 +553,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; const unsigned int max_inline = txq->max_inline; + uint64_t addr_64; if (unlikely(!pkts_n)) return 0; @@ -503,8 +562,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -524,6 +581,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); uint32_t swp_offsets = 0; uint8_t swp_types = 0; + rte_be32_t metadata; uint16_t tso_segsz = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; @@ -567,11 +625,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) cs_flags = txq_ol_cksum_to_cs(buf); txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; + /* Copy metadata from mbuf if valid */ + metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata : + 0; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { uint32_t vlan = rte_cpu_to_be_32(0x81000000 | buf->vlan_tci); - unsigned int len = 2 * ETHER_ADDR_LEN - 2; + unsigned int len = 2 * RTE_ETHER_ADDR_LEN - 2; addr += 2; length -= 2; @@ -632,7 +693,8 @@ pkt_inline: RTE_CACHE_LINE_SIZE); copy_b = (addr_end > addr) ? RTE_MIN((addr_end - addr), length) : 0; - if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { + if (copy_b && ((end - (uintptr_t)raw) > + (copy_b + sizeof(inl)))) { /* * One Dseg remains in the current WQE. To * keep the computation positive, it is @@ -711,12 +773,12 @@ pkt_inline: ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - addr = rte_cpu_to_be_64(addr); + addr_64 = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; ++ds; if (!segs_n) @@ -750,12 +812,12 @@ next_seg: total_length += length; #endif /* Store segment information. */ - addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; (*txq->elts)[++elts_head & elts_m] = buf; if (--segs_n) @@ -782,7 +844,7 @@ next_pkt: swp_offsets, cs_flags | (swp_types << 8) | (rte_cpu_to_be_16(tso_segsz) << 16), - 0, + metadata, (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { @@ -796,7 +858,7 @@ next_pkt: wqe->eseg = (rte_v128u32_t){ swp_offsets, cs_flags | (swp_types << 8), - 0, + metadata, (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } @@ -816,14 +878,13 @@ next_wqe: /* Check whether completion threshold has been reached. */ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -863,7 +924,7 @@ mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length) mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; - mpw->wqe->eseg.rsvd2 = 0; + mpw->wqe->eseg.flow_table_metadata = 0; mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); @@ -942,8 +1003,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -952,6 +1011,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint32_t length; unsigned int segs_n = buf->nb_segs; uint32_t cs_flags; + rte_be32_t metadata; /* * Make sure there is enough room to store this packet and @@ -968,6 +1028,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) max_elts -= segs_n; --pkts_n; cs_flags = txq_ol_cksum_to_cs(buf); + /* Copy metadata from mbuf if valid */ + metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata : + 0; /* Retrieve packet information. */ length = PKT_LEN(buf); assert(length); @@ -975,6 +1038,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if ((mpw.state == MLX5_MPW_STATE_OPENED) && ((mpw.len != length) || (segs_n != 1) || + (mpw.wqe->eseg.flow_table_metadata != metadata) || (mpw.wqe->eseg.cs_flags != cs_flags))) mlx5_mpw_close(txq, &mpw); if (mpw.state == MLX5_MPW_STATE_CLOSED) { @@ -988,6 +1052,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; + mpw.wqe->eseg.flow_table_metadata = metadata; } /* Multi-segment packets must be alone in their MPW. */ assert((segs_n == 1) || (mpw.pkts_n == 0)); @@ -1032,14 +1097,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1087,7 +1151,7 @@ mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; - mpw->wqe->eseg.rsvd2 = 0; + mpw->wqe->eseg.flow_table_metadata = 0; inl = (struct mlx5_wqe_inl_small *) (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE); mpw->data.raw = (uint8_t *)&inl->raw; @@ -1171,14 +1235,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; uint32_t length; unsigned int segs_n = buf->nb_segs; uint8_t cs_flags; + rte_be32_t metadata; /* * Make sure there is enough room to store this packet and @@ -1200,18 +1263,23 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, */ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); cs_flags = txq_ol_cksum_to_cs(buf); + /* Copy metadata from mbuf if valid */ + metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata : + 0; /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if packet differs. */ if (mpw.state == MLX5_MPW_STATE_OPENED) { if ((mpw.len != length) || (segs_n != 1) || + (mpw.wqe->eseg.flow_table_metadata != metadata) || (mpw.wqe->eseg.cs_flags != cs_flags)) mlx5_mpw_close(txq, &mpw); } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) { if ((mpw.len != length) || (segs_n != 1) || (length > inline_room) || + (mpw.wqe->eseg.flow_table_metadata != metadata) || (mpw.wqe->eseg.cs_flags != cs_flags)) { mlx5_mpw_inline_close(txq, &mpw); inline_room = @@ -1231,12 +1299,14 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; + mpw.wqe->eseg.flow_table_metadata = metadata; } else { if (unlikely(max_wqe < wqe_inl_n)) break; max_wqe -= wqe_inl_n; mlx5_mpw_inline_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; + mpw.wqe->eseg.flow_table_metadata = metadata; } } /* Multi-segment packets must be alone in their MPW. */ @@ -1329,14 +1399,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1450,6 +1519,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, unsigned int mpw_room = 0; unsigned int inl_pad = 0; uint32_t inl_hdr; + uint64_t addr_64; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; @@ -1459,8 +1529,6 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -1470,6 +1538,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, unsigned int do_inline = 0; /* Whether inline is possible. */ uint32_t length; uint8_t cs_flags; + rte_be32_t metadata; /* Multi-segmented packet is handled in slow-path outside. */ assert(NB_SEGS(buf) == 1); @@ -1477,6 +1546,9 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, if (max_elts - j == 0) break; cs_flags = txq_ol_cksum_to_cs(buf); + /* Copy metadata from mbuf if valid */ + metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata : + 0; /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if: @@ -1491,6 +1563,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (length <= txq->inline_max_packet_sz && inl_pad + sizeof(inl_hdr) + length > mpw_room) || + (mpw.wqe->eseg.flow_table_metadata != metadata) || (mpw.wqe->eseg.cs_flags != cs_flags)) max_wqe -= mlx5_empw_close(txq, &mpw); } @@ -1514,6 +1587,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, sizeof(inl_hdr) + length <= mpw_room && !txq->mpw_hdr_dseg; mpw.wqe->eseg.cs_flags = cs_flags; + mpw.wqe->eseg.flow_table_metadata = metadata; } else { /* Evaluate whether the next packet can be inlined. * Inlininig is possible when: @@ -1586,13 +1660,13 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, ((uintptr_t)mpw.data.raw + inl_pad); (*txq->elts)[elts_head++ & elts_m] = buf; - addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, - uintptr_t)); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)); *dseg = (rte_v128u32_t) { rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; mpw.data.raw = (volatile void *)(dseg + 1); mpw.total_len += (inl_pad + sizeof(*dseg)); @@ -1616,15 +1690,14 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; txq->mpw_comp = txq->wqe_ci; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp += j; } @@ -1985,7 +2058,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) mcqe->rx_hash_result); rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); if (rxq->crc_present) - len -= ETHER_CRC_LEN; + len -= RTE_ETHER_CRC_LEN; PKT_LEN(pkt) = len; } DATA_LEN(rep) = DATA_LEN(seg); @@ -2107,8 +2180,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) const unsigned int wq_mask = (1 << rxq->elts_n) - 1; volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; unsigned int i = 0; - uint16_t rq_ci = rxq->rq_ci; - uint16_t strd_idx = rxq->strd_ci; + uint32_t rq_ci = rxq->rq_ci; + uint16_t consumed_strd = rxq->consumed_strd; struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; while (i < pkts_n) { @@ -2116,13 +2189,14 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) void *addr; int ret; unsigned int len; - uint16_t consumed_strd; + uint16_t strd_cnt; + uint16_t strd_idx; uint32_t offset; uint32_t byte_cnt; volatile struct mlx5_mini_cqe8 *mcqe = NULL; - uint32_t rss_hash_res; + uint32_t rss_hash_res = 0; - if (strd_idx == strd_n) { + if (consumed_strd == strd_n) { /* Replace WQE only if the buffer is still in use. */ if (rte_atomic16_read(&buf->refcnt) > 1) { mprq_buf_replace(rxq, rq_ci & wq_mask); @@ -2142,7 +2216,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rxq->mprq_repl = rep; } /* Advance to the next WQE. */ - strd_idx = 0; + consumed_strd = 0; ++rq_ci; buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; } @@ -2156,14 +2230,21 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) continue; } byte_cnt = ret; - consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> - MLX5_MPRQ_STRIDE_NUM_SHIFT; - assert(consumed_strd); - /* Calculate offset before adding up stride index. */ - offset = strd_idx * strd_sz + strd_shift; - strd_idx += consumed_strd; + strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> + MLX5_MPRQ_STRIDE_NUM_SHIFT; + assert(strd_cnt); + consumed_strd += strd_cnt; if (byte_cnt & MLX5_MPRQ_FILLER_MASK) continue; + if (mcqe == NULL) { + rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); + strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); + } else { + /* mini-CQE for MPRQ doesn't have hash result. */ + strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); + } + assert(strd_idx < strd_n); + assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); /* * Currently configured to receive a packet per a stride. But if * MTU is adjusted through kernel interface, device could @@ -2171,7 +2252,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * case, the packet should be dropped because it is bigger than * the max_rx_pkt_len. */ - if (unlikely(consumed_strd > 1)) { + if (unlikely(strd_cnt > 1)) { ++rxq->stats.idropped; continue; } @@ -2183,7 +2264,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; assert((int)len >= (rxq->crc_present << 2)); if (rxq->crc_present) - len -= ETHER_CRC_LEN; + len -= RTE_ETHER_CRC_LEN; + offset = strd_idx * strd_sz + strd_shift; addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset); /* Initialize the offload flag. */ pkt->ol_flags = 0; @@ -2206,7 +2288,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } else { rte_iova_t buf_iova; struct rte_mbuf_ext_shared_info *shinfo; - uint16_t buf_len = consumed_strd * strd_sz; + uint16_t buf_len = strd_cnt * strd_sz; /* Increment the refcnt of the whole chunk. */ rte_atomic16_add_return(&buf->refcnt, 1); @@ -2242,10 +2324,6 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) continue; } } - /* If compressed, take hash result from mini-CQE. */ - rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? - cqe->rx_hash_res : - mcqe->rx_hash_result); rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); PKT_LEN(pkt) = len; DATA_LEN(pkt) = len; @@ -2259,7 +2337,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) ++i; } /* Update the consumer indexes. */ - rxq->strd_ci = strd_idx; + rxq->consumed_strd = consumed_strd; rte_cio_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); if (rq_ci != rxq->rq_ci) { @@ -2295,6 +2373,7 @@ removed_tx_burst(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t pkts_n __rte_unused) { + rte_mb(); return 0; } @@ -2319,6 +2398,7 @@ removed_rx_burst(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t pkts_n __rte_unused) { + rte_mb(); return 0; } @@ -2329,7 +2409,7 @@ removed_rx_burst(void *dpdk_txq __rte_unused, * (e.g. mlx5_rxtx_vec_sse.c for x86). */ -uint16_t __attribute__((weak)) +__rte_weak uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t pkts_n __rte_unused) @@ -2337,7 +2417,7 @@ mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t pkts_n __rte_unused) @@ -2345,7 +2425,7 @@ mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t pkts_n __rte_unused) @@ -2353,25 +2433,25 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, return 0; } -int __attribute__((weak)) +__rte_weak int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } -int __attribute__((weak)) +__rte_weak int mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP; } -int __attribute__((weak)) +__rte_weak int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { return -ENOTSUP; } -int __attribute__((weak)) +__rte_weak int mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) { return -ENOTSUP;