4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_H_
35 #define RTE_PMD_MLX5_RXTX_H_
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
46 #include <infiniband/mlx5_hw.h>
48 #pragma GCC diagnostic error "-pedantic"
51 /* DPDK headers don't like -pedantic. */
53 #pragma GCC diagnostic ignored "-pedantic"
56 #include <rte_mempool.h>
58 #pragma GCC diagnostic error "-pedantic"
61 #include "mlx5_utils.h"
63 #include "mlx5_autoconf.h"
64 #include "mlx5_defs.h"
67 struct mlx5_rxq_stats {
68 unsigned int idx; /**< Mapping index. */
69 #ifdef MLX5_PMD_SOFT_COUNTERS
70 uint64_t ipackets; /**< Total of successfully received packets. */
71 uint64_t ibytes; /**< Total of successfully received bytes. */
73 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
74 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
77 struct mlx5_txq_stats {
78 unsigned int idx; /**< Mapping index. */
79 #ifdef MLX5_PMD_SOFT_COUNTERS
80 uint64_t opackets; /**< Total of successfully sent packets. */
81 uint64_t obytes; /**< Total of successfully sent bytes. */
83 uint64_t odropped; /**< Total of packets not sent when TX ring full. */
86 /* Flow director queue structure. */
88 struct ibv_qp *qp; /* Associated RX QP. */
89 struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */
90 struct ibv_exp_wq *wq; /* Work queue. */
91 struct ibv_cq *cq; /* Completion queue. */
96 /* Compressed CQE context. */
98 uint16_t ai; /* Array index. */
99 uint16_t ca; /* Current array index. */
100 uint16_t na; /* Next array index. */
101 uint16_t cq_ci; /* The next CQE. */
102 uint32_t cqe_cnt; /* Number of CQEs. */
105 /* RX queue descriptor. */
107 unsigned int csum:1; /* Enable checksum offloading. */
108 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
109 unsigned int vlan_strip:1; /* Enable VLAN stripping. */
110 unsigned int crc_present:1; /* CRC must be subtracted. */
111 unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
115 uint16_t cqe_n; /* Number of CQ elements. */
117 volatile struct mlx5_wqe_data_seg(*wqes)[];
118 volatile struct mlx5_cqe(*cqes)[];
119 struct rxq_zip zip; /* Compressed context. */
120 volatile uint32_t *rq_db;
121 volatile uint32_t *cq_db;
122 struct rte_mbuf *(*elts)[];
123 struct rte_mempool *mp;
124 struct mlx5_rxq_stats stats;
125 } __rte_cache_aligned;
127 /* RX queue control descriptor. */
129 struct priv *priv; /* Back pointer to private data. */
130 struct ibv_cq *cq; /* Completion Queue. */
131 struct ibv_exp_wq *wq; /* Work Queue. */
132 struct ibv_exp_res_domain *rd; /* Resource Domain. */
133 struct fdir_queue *fdir_queue; /* Flow director queue. */
134 struct ibv_mr *mr; /* Memory Region (for mp). */
135 struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
136 struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
137 unsigned int socket; /* CPU socket ID for allocations. */
138 struct rxq rxq; /* Data path structure. */
141 /* Hash RX queue types. */
152 /* Flow structure with Ethernet specification. It is packed to prevent padding
153 * between attr and spec as this layout is expected by libibverbs. */
154 struct flow_attr_spec_eth {
155 struct ibv_exp_flow_attr attr;
156 struct ibv_exp_flow_spec_eth spec;
157 } __attribute__((packed));
159 /* Define a struct flow_attr_spec_eth object as an array of at least
160 * "size" bytes. Room after the first index is normally used to store
161 * extra flow specifications. */
162 #define FLOW_ATTR_SPEC_ETH(name, size) \
163 struct flow_attr_spec_eth name \
164 [((size) / sizeof(struct flow_attr_spec_eth)) + \
165 !!((size) % sizeof(struct flow_attr_spec_eth))]
167 /* Initialization data for hash RX queue. */
168 struct hash_rxq_init {
169 uint64_t hash_fields; /* Fields that participate in the hash. */
170 uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
171 unsigned int flow_priority; /* Flow priority to use. */
174 enum ibv_exp_flow_spec_type type;
177 struct ibv_exp_flow_spec_tcp_udp tcp_udp;
178 struct ibv_exp_flow_spec_ipv4 ipv4;
179 struct ibv_exp_flow_spec_ipv6 ipv6;
180 struct ibv_exp_flow_spec_eth eth;
181 } flow_spec; /* Flow specification template. */
182 const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
185 /* Initialization data for indirection table. */
186 struct ind_table_init {
187 unsigned int max_size; /* Maximum number of WQs. */
188 /* Hash RX queues using this table. */
189 unsigned int hash_types;
190 unsigned int hash_types_n;
193 /* Initialization data for special flows. */
194 struct special_flow_init {
195 uint8_t dst_mac_val[6];
196 uint8_t dst_mac_mask[6];
197 unsigned int hash_types;
198 unsigned int per_vlan:1;
201 enum hash_rxq_flow_type {
202 HASH_RXQ_FLOW_TYPE_PROMISC,
203 HASH_RXQ_FLOW_TYPE_ALLMULTI,
204 HASH_RXQ_FLOW_TYPE_BROADCAST,
205 HASH_RXQ_FLOW_TYPE_IPV6MULTI,
206 HASH_RXQ_FLOW_TYPE_MAC,
210 static inline const char *
211 hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
214 case HASH_RXQ_FLOW_TYPE_PROMISC:
215 return "promiscuous";
216 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
217 return "allmulticast";
218 case HASH_RXQ_FLOW_TYPE_BROADCAST:
220 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
221 return "IPv6 multicast";
222 case HASH_RXQ_FLOW_TYPE_MAC:
230 struct priv *priv; /* Back pointer to private data. */
231 struct ibv_qp *qp; /* Hash RX QP. */
232 enum hash_rxq_type type; /* Hash RX queue type. */
233 /* MAC flow steering rules, one per VLAN ID. */
234 struct ibv_exp_flow *mac_flow
235 [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
236 struct ibv_exp_flow *special_flow
237 [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
240 /* TX queue descriptor. */
242 uint16_t elts_head; /* Current index in (*elts)[]. */
243 uint16_t elts_tail; /* First element awaiting completion. */
244 uint16_t elts_comp; /* Counter since last completion request. */
245 uint16_t elts_n; /* (*elts)[] length. */
246 uint16_t cq_ci; /* Consumer index for completion queue. */
247 uint16_t cqe_n; /* Number of CQ elements. */
248 uint16_t wqe_ci; /* Consumer index for work queue. */
249 uint16_t wqe_n; /* Number of WQ elements. */
250 uint16_t bf_offset; /* Blueflame offset. */
251 uint16_t bf_buf_size; /* Blueflame size. */
252 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
253 uint32_t qp_num_8s; /* QP number shifted by 8. */
254 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
255 volatile union mlx5_wqe (*wqes)[]; /* Work queue. */
256 volatile uint32_t *qp_db; /* Work queue doorbell. */
257 volatile uint32_t *cq_db; /* Completion queue doorbell. */
258 volatile void *bf_reg; /* Blueflame register. */
260 const struct rte_mempool *mp; /* Cached Memory Pool. */
261 struct ibv_mr *mr; /* Memory Region (for mp). */
262 uint32_t lkey; /* htonl(mr->lkey) */
263 } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
264 struct rte_mbuf *(*elts)[]; /* TX elements. */
265 struct mlx5_txq_stats stats; /* TX queue counters. */
266 } __rte_cache_aligned;
268 /* TX queue control descriptor. */
270 struct priv *priv; /* Back pointer to private data. */
271 struct ibv_cq *cq; /* Completion Queue. */
272 struct ibv_qp *qp; /* Queue Pair. */
273 struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
274 struct ibv_exp_cq_family *if_cq; /* CQ interface. */
275 struct ibv_exp_res_domain *rd; /* Resource Domain. */
276 unsigned int socket; /* CPU socket ID for allocations. */
277 struct txq txq; /* Data path structure. */
282 extern const struct hash_rxq_init hash_rxq_init[];
283 extern const unsigned int hash_rxq_init_n;
285 extern uint8_t rss_hash_default_key[];
286 extern const size_t rss_hash_default_key_len;
288 size_t priv_flow_attr(struct priv *, struct ibv_exp_flow_attr *,
289 size_t, enum hash_rxq_type);
290 int priv_create_hash_rxqs(struct priv *);
291 void priv_destroy_hash_rxqs(struct priv *);
292 int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
293 int priv_rehash_flows(struct priv *);
294 void rxq_cleanup(struct rxq_ctrl *);
295 int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
296 int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
297 unsigned int, const struct rte_eth_rxconf *,
298 struct rte_mempool *);
299 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
300 const struct rte_eth_rxconf *, struct rte_mempool *);
301 void mlx5_rx_queue_release(void *);
302 uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
306 void txq_cleanup(struct txq_ctrl *);
307 int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
308 unsigned int, const struct rte_eth_txconf *);
309 int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
310 const struct rte_eth_txconf *);
311 void mlx5_tx_queue_release(void *);
312 uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
316 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
317 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
318 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
319 uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
320 uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
321 uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
325 struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
326 void txq_mp2mr_iter(struct rte_mempool *, void *);
327 uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
329 #endif /* RTE_PMD_MLX5_RXTX_H_ */