1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 /* Verbs headers do not support -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/mlx4dv.h>
17 #include <infiniband/verbs.h>
19 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_ethdev_driver.h>
24 #include <rte_mempool.h>
30 /** Rx queue counters. */
31 struct mlx4_rxq_stats {
32 unsigned int idx; /**< Mapping index. */
33 uint64_t ipackets; /**< Total of successfully received packets. */
34 uint64_t ibytes; /**< Total of successfully received bytes. */
35 uint64_t idropped; /**< Total of packets dropped when Rx ring full. */
36 uint64_t rx_nombuf; /**< Total of Rx mbuf allocation failures. */
39 /** Rx queue descriptor. */
41 struct priv *priv; /**< Back pointer to private data. */
42 struct rte_mempool *mp; /**< Memory pool for allocations. */
43 struct ibv_cq *cq; /**< Completion queue. */
44 struct ibv_wq *wq; /**< Work queue. */
45 struct ibv_comp_channel *channel; /**< Rx completion channel. */
46 uint16_t rq_ci; /**< Saved RQ consumer index. */
47 uint16_t port_id; /**< Port ID for incoming packets. */
48 uint16_t sges_n; /**< Number of segments per packet (log2 value). */
49 uint16_t elts_n; /**< Mbuf queue size (log2 value). */
50 struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
51 struct rte_mbuf *(*elts)[]; /**< Rx elements. */
52 volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
53 volatile uint32_t *rq_db; /**< RQ doorbell record. */
54 uint32_t csum:1; /**< Enable checksum offloading. */
55 uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
56 uint32_t crc_present:1; /**< CRC must be subtracted. */
57 uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
58 struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
59 struct mlx4_rxq_stats stats; /**< Rx queue counters. */
60 unsigned int socket; /**< CPU socket ID for allocations. */
61 uint32_t usecnt; /**< Number of users relying on queue resources. */
62 uint8_t data[]; /**< Remaining queue resources. */
65 /** Shared flow target for Rx queues. */
67 LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
68 struct priv *priv; /**< Back pointer to private data. */
69 uint32_t refcnt; /**< Reference count for this object. */
70 uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
71 struct ibv_qp *qp; /**< Queue pair. */
72 struct ibv_rwq_ind_table *ind; /**< Indirection table. */
73 uint64_t fields; /**< Fields for RSS processing (Verbs format). */
74 uint8_t key[MLX4_RSS_HASH_KEY_SIZE]; /**< Hash key to use. */
75 uint16_t queues; /**< Number of target queues. */
76 uint16_t queue_id[]; /**< Target queues. */
81 struct rte_mbuf *buf; /**< Buffer. */
83 volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */
84 volatile uint32_t *eocb; /**< End of completion burst. */
88 /** Tx queue counters. */
89 struct mlx4_txq_stats {
90 unsigned int idx; /**< Mapping index. */
91 uint64_t opackets; /**< Total of successfully sent packets. */
92 uint64_t obytes; /**< Total of successfully sent bytes. */
93 uint64_t odropped; /**< Total of packets not sent when Tx ring full. */
96 /** Tx queue descriptor. */
98 struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */
99 struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
100 unsigned int elts_head; /**< Current index in (*elts)[]. */
101 unsigned int elts_tail; /**< First element awaiting completion. */
102 int elts_comp_cd; /**< Countdown for next completion. */
103 unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
104 unsigned int elts_n; /**< (*elts)[] length. */
105 struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
106 struct txq_elt (*elts)[]; /**< Tx elements. */
107 struct mlx4_txq_stats stats; /**< Tx queue counters. */
108 uint32_t max_inline; /**< Max inline send size. */
109 uint32_t csum:1; /**< Enable checksum offloading. */
110 uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
111 uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
113 /**< Memory used for storing the first DWORD of data TXBBs. */
114 struct priv *priv; /**< Back pointer to private data. */
115 unsigned int socket; /**< CPU socket ID for allocations. */
116 struct ibv_cq *cq; /**< Completion queue. */
117 struct ibv_qp *qp; /**< Queue pair. */
118 uint8_t data[]; /**< Remaining queue resources. */
123 uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
124 int mlx4_rss_init(struct priv *priv);
125 void mlx4_rss_deinit(struct priv *priv);
126 struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
127 const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
128 uint16_t queues, const uint16_t queue_id[]);
129 void mlx4_rss_put(struct mlx4_rss *rss);
130 int mlx4_rss_attach(struct mlx4_rss *rss);
131 void mlx4_rss_detach(struct mlx4_rss *rss);
132 int mlx4_rxq_attach(struct rxq *rxq);
133 void mlx4_rxq_detach(struct rxq *rxq);
134 uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
135 uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
136 int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
137 uint16_t desc, unsigned int socket,
138 const struct rte_eth_rxconf *conf,
139 struct rte_mempool *mp);
140 void mlx4_rx_queue_release(void *dpdk_rxq);
144 uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
146 uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
148 uint16_t mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts,
150 uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
155 uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
156 int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
157 uint16_t desc, unsigned int socket,
158 const struct rte_eth_txconf *conf);
159 void mlx4_tx_queue_release(void *dpdk_txq);
163 void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
164 uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
165 uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
168 * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
169 * as mempool is pre-configured and static.
172 * Pointer to Rx queue structure.
177 * Searched LKey on success, UINT32_MAX on no match.
179 static __rte_always_inline uint32_t
180 mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
182 struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
185 /* Linear search on MR cache array. */
186 lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
187 MLX4_MR_CACHE_N, addr);
188 if (likely(lkey != UINT32_MAX))
190 /* Take slower bottom-half (Binary Search) on miss. */
191 return mlx4_rx_addr2mr_bh(rxq, addr);
194 #define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
197 * Query LKey from a packet buffer for Tx. If not found, add the mempool.
200 * Pointer to Tx queue structure.
205 * Searched LKey on success, UINT32_MAX on no match.
207 static __rte_always_inline uint32_t
208 mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
210 struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
213 /* Check generation bit to see if there's any change on existing MRs. */
214 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
215 mlx4_mr_flush_local_cache(mr_ctrl);
216 /* Linear search on MR cache array. */
217 lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
218 MLX4_MR_CACHE_N, addr);
219 if (likely(lkey != UINT32_MAX))
221 /* Take slower bottom-half (binary search) on miss. */
222 return mlx4_tx_addr2mr_bh(txq, addr);
225 #define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
227 #endif /* MLX4_RXTX_H_ */