X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=534aaeb467f67c5def152eed9755daf7ea8488bd;hb=a1c1746d9964cedaabd6f47335de59622c75c73b;hp=fde0ca2515a648e092d9209e3be5fc0fd673e55c;hpb=f3db9489188a71529788dc90f363c310a7cffb34;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index fde0ca2515..534aaeb467 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -40,27 +40,30 @@ /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include +#include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #include +#include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif #include "mlx5_utils.h" #include "mlx5.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" +#include "mlx5_prm.h" struct mlx5_rxq_stats { unsigned int idx; /**< Mapping index. */ @@ -81,55 +84,65 @@ struct mlx5_txq_stats { uint64_t odropped; /**< Total of packets not sent when TX ring full. */ }; -/* RX element (scattered packets). */ -struct rxq_elt_sp { - struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ - struct rte_mbuf *bufs[MLX5_PMD_SGE_WR_N]; /* SGEs buffers. */ -}; - -/* RX element. */ -struct rxq_elt { - struct ibv_sge sge; /* Scatter/Gather Element. */ - struct rte_mbuf *buf; /* SGE buffer. */ -}; - /* Flow director queue structure. */ struct fdir_queue { struct ibv_qp *qp; /* Associated RX QP. */ struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */ + struct ibv_exp_wq *wq; /* Work queue. */ + struct ibv_cq *cq; /* Completion queue. */ }; struct priv; +/* Compressed CQE context. */ +struct rxq_zip { + uint16_t ai; /* Array index. */ + uint16_t ca; /* Current array index. */ + uint16_t na; /* Next array index. */ + uint16_t cq_ci; /* The next CQE. */ + uint32_t cqe_cnt; /* Number of CQEs. */ +}; + /* RX queue descriptor. */ struct rxq { - struct priv *priv; /* Back pointer to private data. */ - struct rte_mempool *mp; /* Memory Pool for allocations. */ - struct ibv_mr *mr; /* Memory Region (for mp). */ - struct ibv_cq *cq; /* Completion Queue. */ - struct ibv_exp_wq *wq; /* Work Queue. */ - struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ -#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - unsigned int port_id; /* Port ID for incoming packets. */ - unsigned int elts_n; /* (*elts)[] length. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - union { - struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ - struct rxq_elt (*no_sp)[]; /* RX elements. */ - } elts; - unsigned int sp:1; /* Use scattered RX elements. */ unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ - uint32_t mb_len; /* Length of a mp-issued mbuf. */ - struct mlx5_rxq_stats stats; /* RX queue counters. */ + unsigned int crc_present:1; /* CRC must be subtracted. */ + unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ + unsigned int cqe_n:4; /* Log 2 of CQ elements. */ + unsigned int elts_n:4; /* Log 2 of Mbufs. */ + unsigned int port_id:8; + unsigned int rss_hash:1; /* RSS hash result is enabled. */ + unsigned int mark:1; /* Marked flow available on the queue. */ + unsigned int pending_err:1; /* CQE error needs to be handled. */ + unsigned int trim_elts:1; /* Whether elts needs clean-up. */ + unsigned int :6; /* Remaining bits. */ + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; + uint16_t rq_ci; + uint16_t rq_pi; + uint16_t cq_ci; + volatile struct mlx5_wqe_data_seg(*wqes)[]; + volatile struct mlx5_cqe(*cqes)[]; + struct rxq_zip zip; /* Compressed context. */ + struct rte_mbuf *(*elts)[]; + struct rte_mempool *mp; + struct mlx5_rxq_stats stats; + uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ + struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ +} __rte_cache_aligned; + +/* RX queue control descriptor. */ +struct rxq_ctrl { + struct priv *priv; /* Back pointer to private data. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_exp_wq *wq; /* Work Queue. */ + struct fdir_queue *fdir_queue; /* Flow director queue. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + struct ibv_comp_channel *channel; unsigned int socket; /* CPU socket ID for allocations. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ - struct fdir_queue fdir_queue; /* Flow director queue. */ + struct rxq rxq; /* Data path structure. */ }; /* Hash RX queue types. */ @@ -137,11 +150,9 @@ enum hash_rxq_type { HASH_RXQ_TCPV4, HASH_RXQ_UDPV4, HASH_RXQ_IPV4, -#ifdef HAVE_FLOW_SPEC_IPV6 HASH_RXQ_TCPV6, HASH_RXQ_UDPV6, HASH_RXQ_IPV6, -#endif /* HAVE_FLOW_SPEC_IPV6 */ HASH_RXQ_ETH, }; @@ -172,9 +183,7 @@ struct hash_rxq_init { } hdr; struct ibv_exp_flow_spec_tcp_udp tcp_udp; struct ibv_exp_flow_spec_ipv4 ipv4; -#ifdef HAVE_FLOW_SPEC_IPV6 struct ibv_exp_flow_spec_ipv6 ipv6; -#endif /* HAVE_FLOW_SPEC_IPV6 */ struct ibv_exp_flow_spec_eth eth; } flow_spec; /* Flow specification template. */ const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ @@ -193,6 +202,7 @@ struct special_flow_init { uint8_t dst_mac_val[6]; uint8_t dst_mac_mask[6]; unsigned int hash_types; + unsigned int per_vlan:1; }; enum hash_rxq_flow_type { @@ -228,50 +238,58 @@ struct hash_rxq { struct ibv_qp *qp; /* Hash RX QP. */ enum hash_rxq_type type; /* Hash RX queue type. */ /* MAC flow steering rules, one per VLAN ID. */ - struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; - struct ibv_exp_flow *special_flow[MLX5_MAX_SPECIAL_FLOWS]; -}; - -/* TX element. */ -struct txq_elt { - struct rte_mbuf *buf; + struct ibv_exp_flow *mac_flow + [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; + struct ibv_exp_flow *special_flow + [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS]; }; -/* Linear buffer type. It is used when transmitting buffers with too many - * segments that do not fit the hardware queue (see max_send_sge). - * Extra segments are copied (linearized) in such buffers, replacing the - * last SGE during TX. - * The size is arbitrary but large enough to hold a jumbo frame with - * 8 segments considering mbuf.buf_len is about 2048 bytes. */ -typedef uint8_t linear_t[16384]; - /* TX queue descriptor. */ +__extension__ struct txq { - struct priv *priv; /* Back pointer to private data. */ + uint16_t elts_head; /* Current counter in (*elts)[]. */ + uint16_t elts_tail; /* Counter of first element awaiting completion. */ + uint16_t elts_comp; /* Counter since last completion request. */ + uint16_t mpw_comp; /* WQ index since last completion request. */ + uint16_t cq_ci; /* Consumer index for completion queue. */ + uint16_t cq_pi; /* Producer index for completion queue. */ + uint16_t wqe_ci; /* Consumer index for work queue. */ + uint16_t wqe_pi; /* Producer index for work queue. */ + uint16_t elts_n:4; /* (*elts)[] length (in log2). */ + uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ + uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ + uint16_t inline_en:1; /* When set inline is enabled. */ + uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint16_t tunnel_en:1; + /* When set TX offload for tunneled packets are supported. */ + uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ + uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + uint32_t flags; /* Flags for Tx Queue. */ + volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ + volatile void *wqes; /* Work queue (use volatile to write into). */ + volatile uint32_t *qp_db; /* Work queue doorbell. */ + volatile uint32_t *cq_db; /* Completion queue doorbell. */ + volatile void *bf_reg; /* Blueflame register. */ struct { - const struct rte_mempool *mp; /* Cached Memory Pool. */ + uintptr_t start; /* Start address of MR */ + uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* mr->lkey */ + uint32_t lkey; /* htonl(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ + uint16_t mr_cache_idx; /* Index of last hit entry. */ + struct rte_mbuf *(*elts)[]; /* TX elements. */ + struct mlx5_txq_stats stats; /* TX queue counters. */ +} __rte_cache_aligned; + +/* TX queue control descriptor. */ +struct txq_ctrl { + struct priv *priv; /* Back pointer to private data. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ - struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ -#if MLX5_PMD_MAX_INLINE > 0 - uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */ -#endif - unsigned int elts_n; /* (*elts)[] length. */ - struct txq_elt (*elts)[]; /* TX elements. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - unsigned int elts_tail; /* First element awaiting completion. */ - unsigned int elts_comp; /* Number of completion requests. */ - unsigned int elts_comp_cd; /* Countdown for next completion request. */ - unsigned int elts_comp_cd_init; /* Initial value for countdown. */ - struct mlx5_txq_stats stats; /* TX queue counters. */ - linear_t (*elts_linear)[]; /* Linearized buffers. */ - struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ unsigned int socket; /* CPU socket ID for allocations. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ + struct txq txq; /* Data path structure. */ }; /* mlx5_rxq.c */ @@ -288,28 +306,322 @@ int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_rehash_flows(struct priv *); -void rxq_cleanup(struct rxq *); -int rxq_rehash(struct rte_eth_dev *, struct rxq *); -int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); +void rxq_cleanup(struct rxq_ctrl *); +int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *); +int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t, + unsigned int, const struct rte_eth_rxconf *, + struct rte_mempool *); int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_rxconf *, struct rte_mempool *); void mlx5_rx_queue_release(void *); +uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); +int priv_rx_intr_vec_enable(struct priv *priv); +void priv_rx_intr_vec_disable(struct priv *priv); +#ifdef HAVE_UPDATE_CQ_CI +int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); +#endif /* HAVE_UPDATE_CQ_CI */ /* mlx5_txq.c */ -void txq_cleanup(struct txq *); +void txq_cleanup(struct txq_ctrl *); +int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t, + unsigned int, const struct rte_eth_txconf *); int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_txconf *); void mlx5_tx_queue_release(void *); +uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); /* mlx5_rxtx.c */ -void txq_mp2mr_iter(const struct rte_mempool *, void *); +extern const uint32_t mlx5_ptype_table[]; + uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); +int mlx5_rx_descriptor_status(void *, uint16_t); +int mlx5_tx_descriptor_status(void *, uint16_t); + +/* Vectorized version of mlx5_rxtx.c */ +int priv_check_raw_vec_tx_support(struct priv *); +int priv_check_vec_tx_support(struct priv *); +int rxq_check_vec_support(struct rxq *); +int priv_check_vec_rx_support(struct priv *); +void priv_prep_vec_rx_function(struct priv *); +uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); + +/* mlx5_mr.c */ + +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *); +void txq_mp2mr_iter(struct rte_mempool *, void *); +uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int); + +#ifndef NDEBUG +/** + * Verify or set magic value in CQE. + * + * @param cqe + * Pointer to CQE. + * + * @return + * 0 the first time. + */ +static inline int +check_cqe_seen(volatile struct mlx5_cqe *cqe) +{ + static const uint8_t magic[] = "seen"; + volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; + int ret = 1; + unsigned int i; + + for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i) + if (!ret || (*buf)[i] != magic[i]) { + ret = 0; + (*buf)[i] = magic[i]; + } + return ret; +} +#endif /* NDEBUG */ + +/** + * Check whether CQE is valid. + * + * @param cqe + * Pointer to CQE. + * @param cqes_n + * Size of completion queue. + * @param ci + * Consumer index. + * + * @return + * 0 on success, 1 on failure. + */ +static __rte_always_inline int +check_cqe(volatile struct mlx5_cqe *cqe, + unsigned int cqes_n, const uint16_t ci) +{ + uint16_t idx = ci & cqes_n; + uint8_t op_own = cqe->op_own; + uint8_t op_owner = MLX5_CQE_OWNER(op_own); + uint8_t op_code = MLX5_CQE_OPCODE(op_own); + + if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) + return 1; /* No CQE. */ +#ifndef NDEBUG + if ((op_code == MLX5_CQE_RESP_ERR) || + (op_code == MLX5_CQE_REQ_ERR)) { + volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe; + uint8_t syndrome = err_cqe->syndrome; + + if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || + (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) + return 0; + if (!check_cqe_seen(cqe)) + ERROR("unexpected CQE error %u (0x%02x)" + " syndrome 0x%02x", + op_code, op_code, syndrome); + return 1; + } else if ((op_code != MLX5_CQE_RESP_SEND) && + (op_code != MLX5_CQE_REQ)) { + if (!check_cqe_seen(cqe)) + ERROR("unexpected CQE opcode %u (0x%02x)", + op_code, op_code); + return 1; + } +#endif /* NDEBUG */ + return 0; +} + +/** + * Return the address of the WQE. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe_ci + * WQE consumer index. + * + * @return + * WQE address. + */ +static inline uintptr_t * +tx_mlx5_wqe(struct txq *txq, uint16_t ci) +{ + ci &= ((1 << txq->wqe_n) - 1); + return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE); +} + +/** + * Manage TX completions. + * + * When sending a burst, mlx5_tx_burst() posts several WRs. + * + * @param txq + * Pointer to TX queue structure. + */ +static __rte_always_inline void +mlx5_tx_complete(struct txq *txq) +{ + const uint16_t elts_n = 1 << txq->elts_n; + const uint16_t elts_m = elts_n - 1; + const unsigned int cqe_n = 1 << txq->cqe_n; + const unsigned int cqe_cnt = cqe_n - 1; + uint16_t elts_free = txq->elts_tail; + uint16_t elts_tail; + uint16_t cq_ci = txq->cq_ci; + volatile struct mlx5_cqe *cqe = NULL; + volatile struct mlx5_wqe_ctrl *ctrl; + struct rte_mbuf *m, *free[elts_n]; + struct rte_mempool *pool = NULL; + unsigned int blk_n = 0; + + do { + volatile struct mlx5_cqe *tmp; + + tmp = &(*txq->cqes)[cq_ci & cqe_cnt]; + if (check_cqe(tmp, cqe_n, cq_ci)) + break; + cqe = tmp; +#ifndef NDEBUG + if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) { + if (!check_cqe_seen(cqe)) + ERROR("unexpected compressed CQE, TX stopped"); + return; + } + if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || + (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { + if (!check_cqe_seen(cqe)) + ERROR("unexpected error CQE, TX stopped"); + return; + } +#endif /* NDEBUG */ + ++cq_ci; + } while (1); + if (unlikely(cqe == NULL)) + return; + txq->wqe_pi = ntohs(cqe->wqe_counter); + ctrl = (volatile struct mlx5_wqe_ctrl *) + tx_mlx5_wqe(txq, txq->wqe_pi); + elts_tail = ctrl->ctrl3; + assert((elts_tail & elts_m) < (1 << txq->wqe_n)); + /* Free buffers. */ + while (elts_free != elts_tail) { + m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]); + if (likely(m != NULL)) { + if (likely(m->pool == pool)) { + free[blk_n++] = m; + } else { + if (likely(pool != NULL)) + rte_mempool_put_bulk(pool, + (void *)free, + blk_n); + free[0] = m; + pool = m->pool; + blk_n = 1; + } + } + } + if (blk_n) + rte_mempool_put_bulk(pool, (void *)free, blk_n); +#ifndef NDEBUG + elts_free = txq->elts_tail; + /* Poisoning. */ + while (elts_free != elts_tail) { + memset(&(*txq->elts)[elts_free & elts_m], + 0x66, + sizeof((*txq->elts)[elts_free & elts_m])); + ++elts_free; + } +#endif + txq->cq_ci = cq_ci; + txq->elts_tail = elts_tail; + /* Update the consumer index. */ + rte_wmb(); + *txq->cq_db = htonl(cq_ci); +} + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which + * the cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static struct rte_mempool * +mlx5_tx_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_INDIRECT(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[]. + * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, + * remove an entry first. + * + * @param txq + * Pointer to TX queue structure. + * @param[in] mp + * Memory Pool for which a Memory Region lkey must be returned. + * + * @return + * mr->lkey on success, (uint32_t)-1 on failure. + */ +static __rte_always_inline uint32_t +mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb) +{ + uint16_t i = txq->mr_cache_idx; + uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t); + + assert(i < RTE_DIM(txq->mp2mr)); + if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr)) + return txq->mp2mr[i].lkey; + for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { + if (unlikely(txq->mp2mr[i].mr == NULL)) { + /* Unknown MP, add a new MR for it. */ + break; + } + if (txq->mp2mr[i].start <= addr && + txq->mp2mr[i].end >= addr) { + assert(txq->mp2mr[i].lkey != (uint32_t)-1); + assert(htonl(txq->mp2mr[i].mr->lkey) == + txq->mp2mr[i].lkey); + txq->mr_cache_idx = i; + return txq->mp2mr[i].lkey; + } + } + txq->mr_cache_idx = 0; + return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i); +} + +/** + * Ring TX queue doorbell. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the last WQE posted in the NIC. + */ +static __rte_always_inline void +mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe) +{ + uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); + volatile uint64_t *src = ((volatile uint64_t *)wqe); + + rte_wmb(); + *txq->qp_db = htonl(txq->wqe_ci); + /* Ensure ordering between DB record and BF copy. */ + rte_wmb(); + *dst = *src; +} #endif /* RTE_PMD_MLX5_RXTX_H_ */