-
-/**
- * TX with Enhanced MPW support.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- unsigned int i = 0;
- unsigned int j = 0;
- uint16_t max_elts;
- uint16_t max_wqe;
- unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
- unsigned int mpw_room = 0;
- unsigned int inl_pad = 0;
- uint32_t inl_hdr;
- struct mlx5_mpw mpw = {
- .state = MLX5_MPW_STATE_CLOSED,
- };
-
- if (unlikely(!pkts_n))
- return 0;
- /* Start processing. */
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- if (unlikely(!max_wqe))
- return 0;
- do {
- struct rte_mbuf *buf = *(pkts++);
- uintptr_t addr;
- unsigned int do_inline = 0; /* Whether inline is possible. */
- uint32_t length;
- uint8_t cs_flags;
-
- /* Multi-segmented packet is handled in slow-path outside. */
- assert(NB_SEGS(buf) == 1);
- /* Make sure there is enough room to store this packet. */
- if (max_elts - j == 0)
- break;
- cs_flags = txq_ol_cksum_to_cs(buf);
- /* Retrieve packet information. */
- length = PKT_LEN(buf);
- /* Start new session if:
- * - multi-segment packet
- * - no space left even for a dseg
- * - next packet can be inlined with a new WQE
- * - cs_flag differs
- */
- if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
- if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
- mpw_room) ||
- (length <= txq->inline_max_packet_sz &&
- inl_pad + sizeof(inl_hdr) + length >
- mpw_room) ||
- (mpw.wqe->eseg.cs_flags != cs_flags))
- max_wqe -= mlx5_empw_close(txq, &mpw);
- }
- if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
- /* In Enhanced MPW, inline as much as the budget is
- * allowed. The remaining space is to be filled with
- * dsegs. If the title WQEBB isn't padded, it will have
- * 2 dsegs there.
- */
- mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
- (max_inline ? max_inline :
- pkts_n * MLX5_WQE_DWORD_SIZE) +
- MLX5_WQE_SIZE);
- if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
- break;
- /* Don't pad the title WQEBB to not waste WQ. */
- mlx5_empw_new(txq, &mpw, 0);
- mpw_room -= mpw.total_len;
- inl_pad = 0;
- do_inline = length <= txq->inline_max_packet_sz &&
- sizeof(inl_hdr) + length <= mpw_room &&
- !txq->mpw_hdr_dseg;
- mpw.wqe->eseg.cs_flags = cs_flags;
- } else {
- /* Evaluate whether the next packet can be inlined.
- * Inlininig is possible when:
- * - length is less than configured value
- * - length fits for remaining space
- * - not required to fill the title WQEBB with dsegs
- */
- do_inline =
- length <= txq->inline_max_packet_sz &&
- inl_pad + sizeof(inl_hdr) + length <=
- mpw_room &&
- (!txq->mpw_hdr_dseg ||
- mpw.total_len >= MLX5_WQE_SIZE);
- }
- if (max_inline && do_inline) {
- /* Inline packet into WQE. */
- unsigned int max;
-
- assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
- assert(length == DATA_LEN(buf));
- inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- mpw.data.raw = (volatile void *)
- ((uintptr_t)mpw.data.raw + inl_pad);
- max = tx_mlx5_wq_tailroom(txq,
- (void *)(uintptr_t)mpw.data.raw);
- /* Copy inline header. */
- mpw.data.raw = (volatile void *)
- mlx5_copy_to_wq(
- (void *)(uintptr_t)mpw.data.raw,
- &inl_hdr,
- sizeof(inl_hdr),
- (void *)(uintptr_t)txq->wqes,
- max);
- max = tx_mlx5_wq_tailroom(txq,
- (void *)(uintptr_t)mpw.data.raw);
- /* Copy packet data. */
- mpw.data.raw = (volatile void *)
- mlx5_copy_to_wq(
- (void *)(uintptr_t)mpw.data.raw,
- (void *)addr,
- length,
- (void *)(uintptr_t)txq->wqes,
- max);
- ++mpw.pkts_n;
- mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
- /* No need to get completion as the entire packet is
- * copied to WQ. Free the buf right away.
- */
- rte_pktmbuf_free_seg(buf);
- mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
- /* Add pad in the next packet if any. */
- inl_pad = (((uintptr_t)mpw.data.raw +
- (MLX5_WQE_DWORD_SIZE - 1)) &
- ~(MLX5_WQE_DWORD_SIZE - 1)) -
- (uintptr_t)mpw.data.raw;
- } else {
- /* No inline. Load a dseg of packet pointer. */
- volatile rte_v128u32_t *dseg;
-
- assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
- assert((inl_pad + sizeof(*dseg)) <= mpw_room);
- assert(length == DATA_LEN(buf));
- if (!tx_mlx5_wq_tailroom(txq,
- (void *)((uintptr_t)mpw.data.raw
- + inl_pad)))
- dseg = (volatile void *)txq->wqes;
- else
- dseg = (volatile void *)
- ((uintptr_t)mpw.data.raw +
- inl_pad);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t));
- *dseg = (rte_v128u32_t) {
- rte_cpu_to_be_32(length),
- mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
- };
- mpw.data.raw = (volatile void *)(dseg + 1);
- mpw.total_len += (inl_pad + sizeof(*dseg));
- ++j;
- ++mpw.pkts_n;
- mpw_room -= (inl_pad + sizeof(*dseg));
- inl_pad = 0;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += length;
-#endif
- ++i;
- } while (i < pkts_n);
- /* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
- return 0;
- /* Check whether completion threshold has been reached. */
- if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
- (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
- (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
- volatile struct mlx5_wqe *wqe = mpw.wqe;
-
- /* Request completion on last WQE. */
- wqe->ctrl[2] = rte_cpu_to_be_32(8);
- /* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
- txq->elts_comp = 0;
- txq->mpw_comp = txq->wqe_ci;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- } else {
- txq->elts_comp += j;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
- mlx5_empw_close(txq, &mpw);
- /* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, mpw.wqe);
- txq->elts_head = elts_head;
- return i;
-}
-
-/**
- * DPDK callback for TX with Enhanced MPW support.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint16_t n;
- uint16_t ret;
-
- n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
- if (n) {
- ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
- if (!ret)
- break;
- nb_tx += ret;
- }
- n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
- if (n) {
- ret = txq_burst_empw(txq, &pkts[nb_tx], n);
- if (!ret)
- break;
- nb_tx += ret;
- }
- }
- return nb_tx;
-}
-
-/**
- * Translate RX completion flags to packet type.
- *
- * @param[in] rxq
- * Pointer to RX queue structure.
- * @param[in] cqe
- * Pointer to CQE.
- *
- * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
- *
- * @return
- * Packet type for struct rte_mbuf.
- */
-static inline uint32_t
-rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
-{
- uint8_t idx;
- uint8_t pinfo = cqe->pkt_info;
- uint16_t ptype = cqe->hdr_type_etc;
-
- /*
- * The index to the array should have:
- * bit[1:0] = l3_hdr_type
- * bit[4:2] = l4_hdr_type
- * bit[5] = ip_frag
- * bit[6] = tunneled
- * bit[7] = outer_l3_type
- */
- idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
- return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
-}
-
-/**
- * Get size of the next packet for a given CQE. For compressed CQEs, the
- * consumer index is updated only once all packets of the current one have
- * been processed.
- *
- * @param rxq
- * Pointer to RX queue.
- * @param cqe
- * CQE to process.
- * @param[out] mcqe
- * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
- * written.
- *
- * @return
- * Packet size in bytes (0 if there is none), -1 in case of completion
- * with error.
- */
-static inline int
-mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
-{
- struct rxq_zip *zip = &rxq->zip;
- uint16_t cqe_n = cqe_cnt + 1;
- int len = 0;
- uint16_t idx, end;
-
- /* Process compressed data in the CQE and mini arrays. */
- if (zip->ai) {
- volatile struct mlx5_mini_cqe8 (*mc)[8] =
- (volatile struct mlx5_mini_cqe8 (*)[8])
- (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
-
- len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
- *mcqe = &(*mc)[zip->ai & 7];
- if ((++zip->ai & 7) == 0) {
- /* Invalidate consumed CQEs */
- idx = zip->ca;
- end = zip->na;
- while (idx != end) {
- (*rxq->cqes)[idx & cqe_cnt].op_own =
- MLX5_CQE_INVALIDATE;
- ++idx;
- }
- /*
- * Increment consumer index to skip the number of
- * CQEs consumed. Hardware leaves holes in the CQ
- * ring for software use.
- */
- zip->ca = zip->na;
- zip->na += 8;
- }
- if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
- /* Invalidate the rest */
- idx = zip->ca;
- end = zip->cq_ci;
-
- while (idx != end) {
- (*rxq->cqes)[idx & cqe_cnt].op_own =
- MLX5_CQE_INVALIDATE;
- ++idx;
- }
- rxq->cq_ci = zip->cq_ci;
- zip->ai = 0;
- }
- /* No compressed data, get next CQE and verify if it is compressed. */
- } else {
- int ret;
- int8_t op_own;
-
- ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
- if (unlikely(ret == 1))
- return 0;
- ++rxq->cq_ci;
- op_own = cqe->op_own;
- rte_cio_rmb();
- if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
- volatile struct mlx5_mini_cqe8 (*mc)[8] =
- (volatile struct mlx5_mini_cqe8 (*)[8])
- (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
- cqe_cnt].pkt_info);
-
- /* Fix endianness. */
- zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
- /*
- * Current mini array position is the one returned by
- * check_cqe64().
- *
- * If completion comprises several mini arrays, as a
- * special case the second one is located 7 CQEs after
- * the initial CQE instead of 8 for subsequent ones.
- */
- zip->ca = rxq->cq_ci;
- zip->na = zip->ca + 7;
- /* Compute the next non compressed CQE. */
- --rxq->cq_ci;
- zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
- /* Get packet size to return. */
- len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
- *mcqe = &(*mc)[0];
- zip->ai = 1;
- /* Prefetch all the entries to be invalidated */
- idx = zip->ca;
- end = zip->cq_ci;
- while (idx != end) {
- rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
- ++idx;
- }
- } else {
- len = rte_be_to_cpu_32(cqe->byte_cnt);
- }
- /* Error while receiving packet. */
- if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
- return -1;
- }
- return len;
-}
-
-/**
- * Translate RX completion flags to offload flags.
- *
- * @param[in] cqe
- * Pointer to CQE.
- *
- * @return
- * Offload flags (ol_flags) for struct rte_mbuf.
- */
-static inline uint32_t
-rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
-{
- uint32_t ol_flags = 0;
- uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
-
- ol_flags =
- TRANSPOSE(flags,
- MLX5_CQE_RX_L3_HDR_VALID,
- PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_L4_HDR_VALID,
- PKT_RX_L4_CKSUM_GOOD);
- return ol_flags;
-}
-
-/**
- * Fill in mbuf fields from RX completion flags.
- * Note that pkt->ol_flags should be initialized outside of this function.
- *
- * @param rxq
- * Pointer to RX queue.
- * @param pkt
- * mbuf to fill.
- * @param cqe
- * CQE to process.
- * @param rss_hash_res
- * Packet RSS Hash result.
- */
-static inline void
-rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
- volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
-{
- /* Update packet information. */
- pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
- if (rss_hash_res && rxq->rss_hash) {
- pkt->hash.rss = rss_hash_res;
- pkt->ol_flags |= PKT_RX_RSS_HASH;
- }
- if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
- pkt->ol_flags |= PKT_RX_FDIR;
- if (cqe->sop_drop_qpn !=
- rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
- uint32_t mark = cqe->sop_drop_qpn;
-
- pkt->ol_flags |= PKT_RX_FDIR_ID;
- pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
- }
- }
- if (rxq->csum)
- pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
- if (rxq->vlan_strip &&
- (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
- pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
- }
- if (rxq->hw_timestamp) {
- pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
- pkt->ol_flags |= PKT_RX_TIMESTAMP;
- }
-}
-
-/**
- * DPDK callback for RX.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_rxq_data *rxq = dpdk_rxq;
- const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
- const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
- const unsigned int sges_n = rxq->sges_n;
- struct rte_mbuf *pkt = NULL;
- struct rte_mbuf *seg = NULL;
- volatile struct mlx5_cqe *cqe =
- &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
- unsigned int i = 0;
- unsigned int rq_ci = rxq->rq_ci << sges_n;
- int len = 0; /* keep its value across iterations. */
-
- while (pkts_n) {
- unsigned int idx = rq_ci & wqe_cnt;
- volatile struct mlx5_wqe_data_seg *wqe =
- &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
- struct rte_mbuf *rep = (*rxq->elts)[idx];
- volatile struct mlx5_mini_cqe8 *mcqe = NULL;
- uint32_t rss_hash_res;
-
- if (pkt)
- NEXT(seg) = rep;
- seg = rep;
- rte_prefetch0(seg);
- rte_prefetch0(cqe);
- rte_prefetch0(wqe);
- rep = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(rep == NULL)) {
- ++rxq->stats.rx_nombuf;
- if (!pkt) {
- /*
- * no buffers before we even started,
- * bail out silently.
- */
- break;
- }
- while (pkt != seg) {
- assert(pkt != (*rxq->elts)[idx]);
- rep = NEXT(pkt);
- NEXT(pkt) = NULL;
- NB_SEGS(pkt) = 1;
- rte_mbuf_raw_free(pkt);
- pkt = rep;
- }
- break;
- }
- if (!pkt) {
- cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
- if (!len) {
- rte_mbuf_raw_free(rep);
- break;
- }
- if (unlikely(len == -1)) {
- /* RX error, packet is likely too large. */
- rte_mbuf_raw_free(rep);
- ++rxq->stats.idropped;
- goto skip;
- }
- pkt = seg;
- assert(len >= (rxq->crc_present << 2));
- pkt->ol_flags = 0;
- /* If compressed, take hash result from mini-CQE. */
- rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
- cqe->rx_hash_res :
- mcqe->rx_hash_result);
- rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
- if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
- PKT_LEN(pkt) = len;
- }
- DATA_LEN(rep) = DATA_LEN(seg);
- PKT_LEN(rep) = PKT_LEN(seg);
- SET_DATA_OFF(rep, DATA_OFF(seg));
- PORT(rep) = PORT(seg);
- (*rxq->elts)[idx] = rep;
- /*
- * Fill NIC descriptor with the new buffer. The lkey and size
- * of the buffers are already known, only the buffer address
- * changes.
- */
- wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
- /* If there's only one MR, no need to replace LKey in WQE. */
- if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
- wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
- if (len > DATA_LEN(seg)) {
- len -= DATA_LEN(seg);
- ++NB_SEGS(pkt);
- ++rq_ci;
- continue;
- }
- DATA_LEN(seg) = len;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment bytes counter. */
- rxq->stats.ibytes += PKT_LEN(pkt);
-#endif
- /* Return packet. */
- *(pkts++) = pkt;
- pkt = NULL;
- --pkts_n;
- ++i;
-skip:
- /* Align consumer index to the next stride. */
- rq_ci >>= sges_n;
- ++rq_ci;
- rq_ci <<= sges_n;
- }
- if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
- return 0;
- /* Update the consumer index. */
- rxq->rq_ci = rq_ci >> sges_n;
- rte_cio_wmb();
- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- rte_cio_wmb();
- *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment packets counter. */
- rxq->stats.ipackets += i;
-#endif
- return i;
-}
-
-void
-mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
-{
- struct mlx5_mprq_buf *buf = opaque;
-
- if (rte_atomic16_read(&buf->refcnt) == 1) {
- rte_mempool_put(buf->mp, buf);
- } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
- rte_atomic16_set(&buf->refcnt, 1);
- rte_mempool_put(buf->mp, buf);
- }
-}
-
-void
-mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
-{
- mlx5_mprq_buf_free_cb(NULL, buf);
-}
-
-static inline void
-mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
-{
- struct mlx5_mprq_buf *rep = rxq->mprq_repl;
- volatile struct mlx5_wqe_data_seg *wqe =
- &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
- void *addr;
-
- assert(rep != NULL);
- /* Replace MPRQ buf. */
- (*rxq->mprq_bufs)[rq_idx] = rep;
- /* Replace WQE. */
- addr = mlx5_mprq_buf_addr(rep);
- wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
- /* If there's only one MR, no need to replace LKey in WQE. */
- if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
- wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
- /* Stash a mbuf for next replacement. */
- if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
- rxq->mprq_repl = rep;
- else
- rxq->mprq_repl = NULL;
-}
-
-/**
- * DPDK callback for RX with Multi-Packet RQ support.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_rxq_data *rxq = dpdk_rxq;
- const unsigned int strd_n = 1 << rxq->strd_num_n;
- const unsigned int strd_sz = 1 << rxq->strd_sz_n;
- const unsigned int strd_shift =
- MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
- const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
- const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
- volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
- unsigned int i = 0;
- uint16_t rq_ci = rxq->rq_ci;
- uint16_t strd_idx = rxq->strd_ci;
- struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
-
- while (i < pkts_n) {
- struct rte_mbuf *pkt;
- void *addr;
- int ret;
- unsigned int len;
- uint16_t consumed_strd;
- uint32_t offset;
- uint32_t byte_cnt;
- volatile struct mlx5_mini_cqe8 *mcqe = NULL;
- uint32_t rss_hash_res;
-
- if (strd_idx == strd_n) {
- /* Replace WQE only if the buffer is still in use. */
- if (rte_atomic16_read(&buf->refcnt) > 1) {
- mprq_buf_replace(rxq, rq_ci & wq_mask);
- /* Release the old buffer. */
- mlx5_mprq_buf_free(buf);
- } else if (unlikely(rxq->mprq_repl == NULL)) {
- struct mlx5_mprq_buf *rep;
-
- /*
- * Currently, the MPRQ mempool is out of buffer
- * and doing memcpy regardless of the size of Rx
- * packet. Retry allocation to get back to
- * normal.
- */
- if (!rte_mempool_get(rxq->mprq_mp,
- (void **)&rep))
- rxq->mprq_repl = rep;
- }
- /* Advance to the next WQE. */
- strd_idx = 0;
- ++rq_ci;
- buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
- }
- cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
- ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
- if (!ret)
- break;
- if (unlikely(ret == -1)) {
- /* RX error, packet is likely too large. */
- ++rxq->stats.idropped;
- continue;
- }
- byte_cnt = ret;
- consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
- MLX5_MPRQ_STRIDE_NUM_SHIFT;
- assert(consumed_strd);
- /* Calculate offset before adding up stride index. */
- offset = strd_idx * strd_sz + strd_shift;
- strd_idx += consumed_strd;
- if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
- continue;
- /*
- * Currently configured to receive a packet per a stride. But if
- * MTU is adjusted through kernel interface, device could
- * consume multiple strides without raising an error. In this
- * case, the packet should be dropped because it is bigger than
- * the max_rx_pkt_len.
- */
- if (unlikely(consumed_strd > 1)) {
- ++rxq->stats.idropped;
- continue;
- }
- pkt = rte_pktmbuf_alloc(rxq->mp);
- if (unlikely(pkt == NULL)) {
- ++rxq->stats.rx_nombuf;
- break;
- }
- len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
- assert((int)len >= (rxq->crc_present << 2));
- if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
- addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
- /* Initialize the offload flag. */
- pkt->ol_flags = 0;
- /*
- * Memcpy packets to the target mbuf if:
- * - The size of packet is smaller than mprq_max_memcpy_len.
- * - Out of buffer in the Mempool for Multi-Packet RQ.
- */
- if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
- /*
- * When memcpy'ing packet due to out-of-buffer, the
- * packet must be smaller than the target mbuf.
- */
- if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
- rte_pktmbuf_free_seg(pkt);
- ++rxq->stats.idropped;
- continue;
- }
- rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
- } else {
- rte_iova_t buf_iova;
- struct rte_mbuf_ext_shared_info *shinfo;
- uint16_t buf_len = consumed_strd * strd_sz;
-
- /* Increment the refcnt of the whole chunk. */
- rte_atomic16_add_return(&buf->refcnt, 1);
- assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
- strd_n + 1);
- addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
- /*
- * MLX5 device doesn't use iova but it is necessary in a
- * case where the Rx packet is transmitted via a
- * different PMD.
- */
- buf_iova = rte_mempool_virt2iova(buf) +
- RTE_PTR_DIFF(addr, buf);
- shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
- &buf_len, mlx5_mprq_buf_free_cb, buf);
- /*
- * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
- * attaching the stride to mbuf and more offload flags
- * will be added below by calling rxq_cq_to_mbuf().
- * Other fields will be overwritten.
- */
- rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
- shinfo);
- rte_pktmbuf_reset_headroom(pkt);
- assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
- /*
- * Prevent potential overflow due to MTU change through
- * kernel interface.
- */
- if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
- rte_pktmbuf_free_seg(pkt);
- ++rxq->stats.idropped;
- continue;
- }
- }
- /* If compressed, take hash result from mini-CQE. */
- rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
- cqe->rx_hash_res :
- mcqe->rx_hash_result);
- rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
- PKT_LEN(pkt) = len;
- DATA_LEN(pkt) = len;
- PORT(pkt) = rxq->port_id;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment bytes counter. */
- rxq->stats.ibytes += PKT_LEN(pkt);
-#endif
- /* Return packet. */
- *(pkts++) = pkt;
- ++i;
- }
- /* Update the consumer indexes. */
- rxq->strd_ci = strd_idx;
- rte_cio_wmb();
- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- if (rq_ci != rxq->rq_ci) {
- rxq->rq_ci = rq_ci;
- rte_cio_wmb();
- *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment packets counter. */
- rxq->stats.ipackets += i;
-#endif
- return i;
-}
-
-/**
- * Dummy DPDK callback for TX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-removed_tx_burst(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-/**
- * Dummy DPDK callback for RX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-removed_rx_burst(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-/*
- * Vectorized Rx/Tx routines are not compiled in when required vector
- * instructions are not supported on a target architecture. The following null
- * stubs are needed for linkage when those are not included outside of this file
- * (e.g. mlx5_rxtx_vec_sse.c for x86).
- */
-
-uint16_t __attribute__((weak))
-mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-uint16_t __attribute__((weak))
-mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-uint16_t __attribute__((weak))
-mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- return 0;
-}
-
-int __attribute__((weak))
-mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
-{
- return -ENOTSUP;
-}
-
-int __attribute__((weak))
-mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
-{
- return -ENOTSUP;
-}
-
-int __attribute__((weak))
-mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
-{
- return -ENOTSUP;
-}
-
-int __attribute__((weak))
-mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
-{
- return -ENOTSUP;
-}