X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=2ca0a4797988a3e66c1d95f651cdc6e8fc94200c;hb=b8cc58c140cce5601d97272d33b0bb701104b2b9;hp=75eedff8adbcf4c11103ec2c5407a823523f86a0;hpb=675911d0330b0802e845657c3f4edc5886cf8685;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 75eedff8ad..2ca0a47979 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -30,6 +30,7 @@ #include "mlx5_utils.h" #include "mlx5.h" #include "mlx5_autoconf.h" +#include "mlx5_mr.h" /* Support tunnel matching. */ #define MLX5_FLOW_TUNNEL 10 @@ -68,7 +69,7 @@ struct rxq_zip { /* Multi-Packet RQ buffer header. */ struct mlx5_mprq_buf { struct rte_mempool *mp; - rte_atomic16_t refcnt; /* Atomically accessed refcnt. */ + uint16_t refcnt; /* Atomically accessed refcnt. */ uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */ struct rte_mbuf_ext_shared_info shinfos[]; /* @@ -94,6 +95,12 @@ enum mlx5_rxq_err_state { MLX5_RXQ_ERR_STATE_NEED_READY, }; +enum mlx5_rqx_code { + MLX5_RXQ_CODE_EXIT = 0, + MLX5_RXQ_CODE_NOMBUF, + MLX5_RXQ_CODE_DROPPED, +}; + /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ @@ -116,6 +123,7 @@ struct mlx5_rxq_data { volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; + uint32_t elts_ci; uint32_t rq_ci; uint16_t consumed_strd; /* Number of consumed strides in WQE. */ uint32_t rq_pi; @@ -130,11 +138,8 @@ struct mlx5_rxq_data { uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ volatile void *wqes; volatile struct mlx5_cqe(*cqes)[]; - RTE_STD_C11 - union { - struct rte_mbuf *(*elts)[]; - struct mlx5_mprq_buf *(*mprq_bufs)[]; - }; + struct rte_mbuf *(*elts)[]; + struct mlx5_mprq_buf *(*mprq_bufs)[]; struct rte_mempool *mp; struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ @@ -151,6 +156,8 @@ struct mlx5_rxq_data { /* CQ (UAR) access lock required for 32bit implementations */ #endif uint32_t tunnel; /* Tunnel information. */ + int timestamp_offset; /* Dynamic mbuf field for timestamp. */ + uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ uint64_t flow_meta_mask; int32_t flow_meta_offset; } __rte_cache_aligned; @@ -165,7 +172,7 @@ enum mlx5_rxq_type { struct mlx5_rxq_ctrl { struct mlx5_rxq_data rxq; /* Data path structure. */ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ struct mlx5_priv *priv; /* Back pointer to private data. */ enum mlx5_rxq_type type; /* Rxq type. */ @@ -186,43 +193,6 @@ struct mlx5_rxq_ctrl { struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ }; -enum mlx5_ind_tbl_type { - MLX5_IND_TBL_TYPE_IBV, - MLX5_IND_TBL_TYPE_DEVX, -}; - -/* Indirection table. */ -struct mlx5_ind_table_obj { - LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ - enum mlx5_ind_tbl_type type; - RTE_STD_C11 - union { - void *ind_table; /**< Indirection table. */ - struct mlx5_devx_obj *rqt; /* DevX RQT object. */ - }; - uint32_t queues_n; /**< Number of queues in the list. */ - uint16_t queues[]; /**< Queue list. */ -}; - -/* Hash Rx queue. */ -struct mlx5_hrxq { - ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ - struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ - RTE_STD_C11 - union { - void *qp; /* Verbs queue pair. */ - struct mlx5_devx_obj *tir; /* DevX TIR object. */ - }; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - void *action; /* DV QP action pointer. */ -#endif - uint64_t hash_fields; /* Verbs Hash fields. */ - uint32_t rss_key_len; /* Hash key length in bytes. */ - uint8_t rss_key[]; /* Hash key. */ -}; - /* TX queue send local data. */ __extension__ struct mlx5_txq_local { @@ -298,55 +268,15 @@ struct mlx5_txq_data { /* Storage for queued packets, must be the last field. */ } __rte_cache_aligned; -enum mlx5_txq_obj_type { - MLX5_TXQ_OBJ_TYPE_IBV, /* mlx5_txq_obj with ibv_wq. */ - MLX5_TXQ_OBJ_TYPE_DEVX_SQ, /* mlx5_txq_obj with mlx5_devx_sq. */ - MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN, - /* mlx5_txq_obj with mlx5_devx_tq and hairpin support. */ -}; - enum mlx5_txq_type { MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */ MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ }; -/* Verbs/DevX Tx queue elements. */ -struct mlx5_txq_obj { - LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ - struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */ - enum mlx5_txq_obj_type type; /* The txq object type. */ - RTE_STD_C11 - union { - struct { - void *cq; /* Completion Queue. */ - void *qp; /* Queue Pair. */ - }; - struct { - struct mlx5_devx_obj *sq; - /* DevX object for Sx queue. */ - struct mlx5_devx_obj *tis; /* The TIS object. */ - }; - struct { - struct rte_eth_dev *dev; - struct mlx5_devx_obj *cq_devx; - void *cq_umem; - void *cq_buf; - int64_t cq_dbrec_offset; - struct mlx5_devx_dbr_page *cq_dbrec_page; - struct mlx5_devx_obj *sq_devx; - void *sq_umem; - void *sq_buf; - int64_t sq_dbrec_offset; - struct mlx5_devx_dbr_page *sq_dbrec_page; - }; - }; -}; - /* TX queue control descriptor. */ struct mlx5_txq_ctrl { LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ enum mlx5_txq_type type; /* The txq ctrl type. */ unsigned int max_inline_data; /* Max inline data. */ @@ -402,11 +332,16 @@ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); +struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl); uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused); + int tunnel, bool shared); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, @@ -414,12 +349,15 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); int mlx5_hrxq_verify(struct rte_eth_dev *dev); enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); -struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); -void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); +void mlx5_drop_action_destroy(struct rte_eth_dev *dev); uint64_t mlx5_get_rx_port_offloads(void); uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev); - +int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n); /* mlx5_txq.c */ @@ -433,12 +371,9 @@ int mlx5_tx_hairpin_queue_setup (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); void mlx5_tx_queue_release(void *dpdk_txq); +void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl); int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev); -struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, - enum mlx5_txq_obj_type type); -struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx); -int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_ibv); int mlx5_txq_obj_verify(struct rte_eth_dev *dev); struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, @@ -496,6 +431,8 @@ int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); +uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); /* mlx5_mr.c */ @@ -659,7 +596,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, uint64_t *dst = MLX5_TX_BFREG(txq); volatile uint64_t *src = ((volatile uint64_t *)wqe); - rte_cio_wmb(); + rte_io_wmb(); *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); @@ -756,4 +693,204 @@ mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts) return ci; } +/** + * Set timestamp in mbuf dynamic field. + * + * @param mbuf + * Structure to write into. + * @param offset + * Dynamic field offset in mbuf structure. + * @param timestamp + * Value to write. + */ +static __rte_always_inline void +mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, + rte_mbuf_timestamp_t timestamp) +{ + *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) = timestamp; +} + +/** + * Replace MPRQ buffer. + * + * @param rxq + * Pointer to Rx queue structure. + * @param rq_idx + * RQ index to replace. + */ +static __rte_always_inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) +{ + const uint32_t strd_n = 1 << rxq->strd_num_n; + struct mlx5_mprq_buf *rep = rxq->mprq_repl; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; + struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx]; + void *addr; + + if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) { + MLX5_ASSERT(rep != NULL); + /* Replace MPRQ buf. */ + (*rxq->mprq_bufs)[rq_idx] = rep; + /* Replace WQE. */ + addr = mlx5_mprq_buf_addr(rep, strd_n); + wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); + /* Stash a mbuf for next replacement. */ + if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) + rxq->mprq_repl = rep; + else + rxq->mprq_repl = NULL; + /* Release the old buffer. */ + mlx5_mprq_buf_free(buf); + } else if (unlikely(rxq->mprq_repl == NULL)) { + struct mlx5_mprq_buf *rep; + + /* + * Currently, the MPRQ mempool is out of buffer + * and doing memcpy regardless of the size of Rx + * packet. Retry allocation to get back to + * normal. + */ + if (!rte_mempool_get(rxq->mprq_mp, (void **)&rep)) + rxq->mprq_repl = rep; + } +} + +/** + * Attach or copy MPRQ buffer content to a packet. + * + * @param rxq + * Pointer to Rx queue structure. + * @param pkt + * Pointer to a packet to fill. + * @param len + * Packet length. + * @param buf + * Pointer to a MPRQ buffer to take the data from. + * @param strd_idx + * Stride index to start from. + * @param strd_cnt + * Number of strides to consume. + */ +static __rte_always_inline enum mlx5_rqx_code +mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, + struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) +{ + const uint32_t strd_n = 1 << rxq->strd_num_n; + const uint16_t strd_sz = 1 << rxq->strd_sz_n; + const uint16_t strd_shift = + MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; + const int32_t hdrm_overlap = + len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; + const uint32_t offset = strd_idx * strd_sz + strd_shift; + void *addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); + + /* + * Memcpy packets to the target mbuf if: + * - The size of packet is smaller than mprq_max_memcpy_len. + * - Out of buffer in the Mempool for Multi-Packet RQ. + * - The packet's stride overlaps a headroom and scatter is off. + */ + if (len <= rxq->mprq_max_memcpy_len || + rxq->mprq_repl == NULL || + (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { + if (likely(len <= + (uint32_t)(pkt->buf_len - RTE_PKTMBUF_HEADROOM))) { + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), + addr, len); + DATA_LEN(pkt) = len; + } else if (rxq->strd_scatter_en) { + struct rte_mbuf *prev = pkt; + uint32_t seg_len = RTE_MIN(len, (uint32_t) + (pkt->buf_len - RTE_PKTMBUF_HEADROOM)); + uint32_t rem_len = len - seg_len; + + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), + addr, seg_len); + DATA_LEN(pkt) = seg_len; + while (rem_len) { + struct rte_mbuf *next = + rte_pktmbuf_alloc(rxq->mp); + + if (unlikely(next == NULL)) + return MLX5_RXQ_CODE_NOMBUF; + NEXT(prev) = next; + SET_DATA_OFF(next, 0); + addr = RTE_PTR_ADD(addr, seg_len); + seg_len = RTE_MIN(rem_len, (uint32_t) + (next->buf_len - RTE_PKTMBUF_HEADROOM)); + rte_memcpy + (rte_pktmbuf_mtod(next, void *), + addr, seg_len); + DATA_LEN(next) = seg_len; + rem_len -= seg_len; + prev = next; + ++NB_SEGS(pkt); + } + } else { + return MLX5_RXQ_CODE_DROPPED; + } + } else { + rte_iova_t buf_iova; + struct rte_mbuf_ext_shared_info *shinfo; + uint16_t buf_len = strd_cnt * strd_sz; + void *buf_addr; + + /* Increment the refcnt of the whole chunk. */ + __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED); + MLX5_ASSERT(__atomic_load_n(&buf->refcnt, + __ATOMIC_RELAXED) <= strd_n + 1); + buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); + /* + * MLX5 device doesn't use iova but it is necessary in a + * case where the Rx packet is transmitted via a + * different PMD. + */ + buf_iova = rte_mempool_virt2iova(buf) + + RTE_PTR_DIFF(buf_addr, buf); + shinfo = &buf->shinfos[strd_idx]; + rte_mbuf_ext_refcnt_set(shinfo, 1); + /* + * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when + * attaching the stride to mbuf and more offload flags + * will be added below by calling rxq_cq_to_mbuf(). + * Other fields will be overwritten. + */ + rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, + buf_len, shinfo); + /* Set mbuf head-room. */ + SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); + MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF); + MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= + len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); + DATA_LEN(pkt) = len; + /* + * Copy the last fragment of a packet (up to headroom + * size bytes) in case there is a stride overlap with + * a next packet's headroom. Allocate a separate mbuf + * to store this fragment and link it. Scatter is on. + */ + if (hdrm_overlap > 0) { + MLX5_ASSERT(rxq->strd_scatter_en); + struct rte_mbuf *seg = + rte_pktmbuf_alloc(rxq->mp); + + if (unlikely(seg == NULL)) + return MLX5_RXQ_CODE_NOMBUF; + SET_DATA_OFF(seg, 0); + rte_memcpy(rte_pktmbuf_mtod(seg, void *), + RTE_PTR_ADD(addr, len - hdrm_overlap), + hdrm_overlap); + DATA_LEN(seg) = hdrm_overlap; + DATA_LEN(pkt) = len - hdrm_overlap; + NEXT(pkt) = seg; + NB_SEGS(pkt) = 2; + } + } + return MLX5_RXQ_CODE_EXIT; +} + #endif /* RTE_PMD_MLX5_RXTX_H_ */