1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_H_
7 #define RTE_PMD_MLX5_H_
13 #include <sys/queue.h>
16 #include <rte_ether.h>
17 #include <ethdev_driver.h>
18 #include <rte_rwlock.h>
19 #include <rte_interrupts.h>
20 #include <rte_errno.h>
24 #include <mlx5_glue.h>
25 #include <mlx5_devx_cmds.h>
27 #include <mlx5_common_mp.h>
28 #include <mlx5_common_mr.h>
29 #include <mlx5_common_devx.h>
30 #include <mlx5_common_defs.h>
32 #include "mlx5_defs.h"
33 #include "mlx5_utils.h"
35 #include "mlx5_autoconf.h"
36 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
40 #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)
43 * Number of modification commands.
44 * The maximal actions amount in FW is some constant, and it is 16 in the
45 * latest releases. In some old releases, it will be limited to 8.
46 * Since there is no interface to query the capacity, the maximal value should
47 * be used to allow PMD to create the flow. The validation will be done in the
48 * lower driver layer or FW. A failure will be returned if exceeds the maximal
49 * supported actions number on the root table.
50 * On non-root tables, there is no limitation, but 32 is enough right now.
52 #define MLX5_MAX_MODIFY_NUM 32
53 #define MLX5_ROOT_TBL_MODIFY_NUM 16
55 /* Maximal number of flex items created on the port.*/
56 #define MLX5_PORT_FLEX_ITEM_NUM 4
58 /* Maximal number of field/field parts to map into sample registers .*/
59 #define MLX5_FLEX_ITEM_MAPPING_NUM 32
61 enum mlx5_ipool_index {
62 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
63 MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
64 MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
65 MLX5_IPOOL_TAG, /* Pool for tag resource. */
66 MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
67 MLX5_IPOOL_JUMP, /* Pool for SWS jump resource. */
68 /* Pool for HWS group. Jump action will be created internally. */
69 MLX5_IPOOL_HW_GRP = MLX5_IPOOL_JUMP,
70 MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
71 MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
72 MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
73 MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
75 MLX5_IPOOL_MTR, /* Pool for meter resource. */
76 MLX5_IPOOL_MCP, /* Pool for metadata resource. */
77 MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
78 MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
79 MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
80 MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
81 MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
82 MLX5_IPOOL_MTR_POLICY, /* Pool for meter policy resource. */
87 * There are three reclaim memory mode supported.
88 * 0(none) means no memory reclaim.
89 * 1(light) means only PMD level reclaim.
90 * 2(aggressive) means both PMD and rdma-core level reclaim.
92 enum mlx5_reclaim_mem_mode {
93 MLX5_RCM_NONE, /* Don't reclaim memory. */
94 MLX5_RCM_LIGHT, /* Reclaim PMD level. */
95 MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
98 /* The type of flow. */
100 MLX5_FLOW_TYPE_CTL, /* Control flow. */
101 MLX5_FLOW_TYPE_GEN, /* General flow. */
102 MLX5_FLOW_TYPE_MCP, /* MCP flow. */
106 /* The mode of delay drop for Rx queues. */
107 enum mlx5_delay_drop_mode {
108 MLX5_DELAY_DROP_NONE = 0, /* All disabled. */
109 MLX5_DELAY_DROP_STANDARD = RTE_BIT32(0), /* Standard queues enable. */
110 MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
113 /* The HWS action type root/non-root. */
114 enum mlx5_hw_action_flag_type {
115 MLX5_HW_ACTION_FLAG_ROOT, /* Root action. */
116 MLX5_HW_ACTION_FLAG_NONE_ROOT, /* Non-root ation. */
117 MLX5_HW_ACTION_FLAG_MAX, /* Maximum action flag. */
120 /* Hlist and list callback context. */
121 struct mlx5_flow_cb_ctx {
122 struct rte_eth_dev *dev;
123 struct rte_flow_error *error;
128 /* Device capabilities structure which isn't changed in any stage. */
129 struct mlx5_dev_cap {
130 int max_cq; /* Maximum number of supported CQs */
131 int max_qp; /* Maximum number of supported QPs. */
132 int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
134 /* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
137 int mps; /* Multi-packet send supported mode. */
138 uint32_t vf:1; /* This is a VF. */
139 uint32_t sf:1; /* This is a SF. */
140 uint32_t txpp_en:1; /* Tx packet pacing is supported. */
141 uint32_t mpls_en:1; /* MPLS over GRE/UDP is supported. */
142 uint32_t cqe_comp:1; /* CQE compression is supported. */
143 uint32_t hw_csum:1; /* Checksum offload is supported. */
144 uint32_t hw_padding:1; /* End alignment padding is supported. */
145 uint32_t dest_tir:1; /* Whether advanced DR API is available. */
146 uint32_t dv_esw_en:1; /* E-Switch DV flow is supported. */
147 uint32_t dv_flow_en:1; /* DV flow is supported. */
148 uint32_t swp:3; /* Tx generic tunnel checksum and TSO offload. */
149 uint32_t hw_vlan_strip:1; /* VLAN stripping is supported. */
150 uint32_t scatter_fcs_w_decap_disable:1;
151 /* HW has bug working with tunnel packet decap and scatter FCS. */
152 uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
153 uint32_t rt_timestamp:1; /* Realtime timestamp format. */
154 uint32_t lro_supported:1; /* Whether LRO is supported. */
155 uint32_t rq_delay_drop_en:1; /* Enable RxQ delay drop. */
156 uint32_t tunnel_en:3;
157 /* Whether tunnel stateless offloads are supported. */
158 uint32_t ind_table_max_size;
159 /* Maximum receive WQ indirection table size. */
160 uint32_t tso:1; /* Whether TSO is supported. */
161 uint32_t tso_max_payload_sz; /* Maximum TCP payload for TSO. */
163 uint32_t enabled:1; /* Whether MPRQ is enabled. */
164 uint32_t log_min_stride_size; /* Log min size of a stride. */
165 uint32_t log_max_stride_size; /* Log max size of a stride. */
166 uint32_t log_min_stride_num; /* Log min num of strides. */
167 uint32_t log_max_stride_num; /* Log max num of strides. */
168 uint32_t log_min_stride_wqe_size;
169 /* Log min WQE size, (size of single stride)*(num of strides).*/
170 } mprq; /* Capability for Multi-Packet RQ. */
171 char fw_ver[64]; /* Firmware version of this device. */
174 /** Data associated with devices to spawn. */
175 struct mlx5_dev_spawn_data {
176 uint32_t ifindex; /**< Network interface index. */
177 uint32_t max_port; /**< Device maximal port index. */
178 uint32_t phys_port; /**< Device physical port index. */
179 int pf_bond; /**< bonding device PF index. < 0 - no bonding */
180 struct mlx5_switch_info info; /**< Switch information. */
181 const char *phys_dev_name; /**< Name of physical device. */
182 struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
183 struct rte_pci_device *pci_dev; /**< Backend PCI device. */
184 struct mlx5_common_device *cdev; /**< Backend common device. */
185 struct mlx5_bond_info *bond_info;
188 /** Data associated with socket messages. */
189 struct mlx5_flow_dump_req {
190 uint32_t port_id; /**< There are plans in DPDK to extend port_id. */
194 struct mlx5_flow_dump_ack {
195 int rc; /**< Return code. */
198 LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
200 /* Shared data between primary and secondary processes. */
201 struct mlx5_shared_data {
203 /* Global spinlock for primary and secondary processes. */
204 int init_done; /* Whether primary has done initialization. */
205 unsigned int secondary_cnt; /* Number of secondary processes init'd. */
208 /* Per-process data structure, not visible to other processes. */
209 struct mlx5_local_data {
210 int init_done; /* Whether a secondary has done initialization. */
213 extern struct mlx5_shared_data *mlx5_shared_data;
215 /* Dev ops structs */
216 extern const struct eth_dev_ops mlx5_dev_ops;
217 extern const struct eth_dev_ops mlx5_dev_sec_ops;
218 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
220 struct mlx5_counter_ctrl {
221 /* Name of the counter. */
222 char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
223 /* Name of the counter on the device table. */
224 char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
225 uint32_t dev:1; /**< Nonzero for dev counters. */
228 struct mlx5_xstats_ctrl {
229 /* Number of device stats. */
231 /* Number of device stats identified by PMD. */
232 uint16_t mlx5_stats_n;
233 /* Index in the device counters table. */
234 uint16_t dev_table_idx[MLX5_MAX_XSTATS];
235 uint64_t base[MLX5_MAX_XSTATS];
236 uint64_t xstats[MLX5_MAX_XSTATS];
237 uint64_t hw_stats[MLX5_MAX_XSTATS];
238 struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
241 struct mlx5_stats_ctrl {
242 /* Base for imissed counter. */
243 uint64_t imissed_base;
247 /* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
248 #define MLX5_LRO_SEG_CHUNK_SIZE 256u
250 /* Maximal size of aggregated LRO packet. */
251 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
253 /* Maximal number of segments to split. */
254 #define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
257 * Port configuration structure.
258 * User device parameters disabled features.
259 * This structure contains all configurations coming from devargs which
260 * oriented to port. When probing again, devargs doesn't have to be compatible
261 * with primary devargs. It is updated for each port in spawn function.
263 struct mlx5_port_config {
264 unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
265 unsigned int hw_padding:1; /* End alignment padding is supported. */
266 unsigned int cqe_comp:1; /* CQE compression is enabled. */
267 unsigned int cqe_comp_fmt:3; /* CQE compression format. */
268 unsigned int rx_vec_en:1; /* Rx vector is enabled. */
269 unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
270 unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
272 unsigned int enabled:1; /* Whether MPRQ is enabled. */
273 unsigned int log_stride_num; /* Log number of strides. */
274 unsigned int log_stride_size; /* Log size of a stride. */
275 unsigned int max_memcpy_len;
276 /* Maximum packet size to memcpy Rx packets. */
277 unsigned int min_rxqs_num;
278 /* Rx queue count threshold to enable MPRQ. */
279 } mprq; /* Configurations for Multi-Packet RQ. */
280 int mps; /* Multi-packet send supported mode. */
281 unsigned int max_dump_files_num; /* Maximum dump files per queue. */
282 unsigned int log_hp_size; /* Single hairpin queue data size in total. */
283 unsigned int lro_timeout; /* LRO user configuration. */
284 int txqs_inline; /* Queue number threshold for inlining. */
285 int txq_inline_min; /* Minimal amount of data bytes to inline. */
286 int txq_inline_max; /* Max packet size for inlining with SEND. */
287 int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
291 * Share context device configuration structure.
292 * User device parameters disabled features.
293 * This structure updated once for device in mlx5_alloc_shared_dev_ctx()
294 * function and cannot change even when probing again.
296 struct mlx5_sh_config {
297 int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
298 int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
299 uint32_t reclaim_mode:2; /* Memory reclaim mode. */
300 uint32_t dv_esw_en:1; /* Enable E-Switch DV flow. */
301 /* Enable DV flow. 1 means SW steering, 2 means HW steering. */
302 unsigned int dv_flow_en:2;
303 uint32_t dv_xmeta_en:2; /* Enable extensive flow metadata. */
304 uint32_t dv_miss_info:1; /* Restore packet after partial hw miss. */
305 uint32_t l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
306 uint32_t vf_nl_en:1; /* Enable Netlink requests in VF mode. */
307 uint32_t lacp_by_user:1; /* Enable user to manage LACP traffic. */
308 uint32_t decap_en:1; /* Whether decap will be used or not. */
309 uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
310 uint32_t allow_duplicate_pattern:1;
311 /* Allow/Prevent the duplicate rules pattern. */
315 /* Structure for VF VLAN workaround. */
316 struct mlx5_vf_vlan {
321 /* Flow drop context necessary due to Verbs API. */
323 struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
324 struct mlx5_rxq_priv *rxq; /* Rx queue. */
327 /* Loopback dummy queue resources required due to Verbs API. */
329 struct ibv_qp *qp; /* QP object. */
330 void *ibv_cq; /* Completion queue. */
331 uint16_t refcnt; /* Reference count for representors. */
334 /* HW steering queue job descriptor type. */
336 MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
337 MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
340 /* HW steering flow management job descriptor. */
341 struct mlx5_hw_q_job {
342 uint32_t type; /* Job type. */
343 struct rte_flow_hw *flow; /* Flow attached to the job. */
344 void *user_data; /* Job user data. */
345 uint8_t *encap_data; /* Encap data. */
348 /* HW steering job descriptor LIFO pool. */
350 uint32_t job_idx; /* Free job index. */
351 uint32_t size; /* LIFO size. */
352 struct mlx5_hw_q_job **job; /* LIFO header. */
353 } __rte_cache_aligned;
355 #define MLX5_COUNTERS_PER_POOL 512
356 #define MLX5_MAX_PENDING_QUERIES 4
357 #define MLX5_CNT_CONTAINER_RESIZE 64
358 #define MLX5_CNT_SHARED_OFFSET 0x80000000
359 #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \
360 MLX5_CNT_BATCH_OFFSET)
361 #define MLX5_CNT_SIZE (sizeof(struct mlx5_flow_counter))
362 #define MLX5_AGE_SIZE (sizeof(struct mlx5_age_param))
364 #define MLX5_CNT_LEN(pool) \
366 ((pool)->is_aged ? MLX5_AGE_SIZE : 0))
367 #define MLX5_POOL_GET_CNT(pool, index) \
368 ((struct mlx5_flow_counter *) \
369 ((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool))))
370 #define MLX5_CNT_ARRAY_IDX(pool, cnt) \
371 ((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \
373 #define MLX5_TS_MASK_SECS 8ull
374 /* timestamp wrapping in seconds, must be power of 2. */
377 * The pool index and offset of counter in the pool array makes up the
378 * counter index. In case the counter is from pool 0 and offset 0, it
379 * should plus 1 to avoid index 0, since 0 means invalid counter index
382 #define MLX5_MAKE_CNT_IDX(pi, offset) \
383 ((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
384 #define MLX5_CNT_TO_AGE(cnt) \
385 ((struct mlx5_age_param *)((cnt) + 1))
387 * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
388 * defines. The pool size is 512, pool index should never reach
391 #define POOL_IDX_INVALID UINT16_MAX
395 AGE_FREE, /* Initialized state. */
396 AGE_CANDIDATE, /* Counter assigned to flows. */
397 AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
400 enum mlx5_counter_type {
401 MLX5_COUNTER_TYPE_ORIGIN,
402 MLX5_COUNTER_TYPE_AGE,
403 MLX5_COUNTER_TYPE_MAX,
406 /* Counter age parameter. */
407 struct mlx5_age_param {
408 uint16_t state; /**< Age state (atomically accessed). */
409 uint16_t port_id; /**< Port id of the counter. */
410 uint32_t timeout:24; /**< Aging timeout in seconds. */
411 uint32_t sec_since_last_hit;
412 /**< Time in seconds since last hit (atomically accessed). */
413 void *context; /**< Flow counter age context. */
416 struct flow_counter_stats {
421 /* Shared counters information for counters. */
422 struct mlx5_flow_counter_shared {
424 uint32_t refcnt; /* Only for shared action management. */
425 uint32_t id; /* User counter ID for legacy sharing. */
429 struct mlx5_flow_counter_pool;
430 /* Generic counters information. */
431 struct mlx5_flow_counter {
434 * User-defined counter shared info is only used during
435 * counter active time. And aging counter sharing is not
436 * supported, so active shared counter will not be chained
437 * to the aging list. For shared counter, only when it is
438 * released, the TAILQ entry memory will be used, at that
439 * time, shared memory is not used anymore.
441 * Similarly to none-batch counter dcs, since it doesn't
442 * support aging, while counter is allocated, the entry
443 * memory is not used anymore. In this case, as bytes
444 * memory is used only when counter is allocated, and
445 * entry memory is used only when counter is free. The
446 * dcs pointer can be saved to these two different place
447 * at different stage. It will eliminate the individual
448 * counter extend struct.
450 TAILQ_ENTRY(mlx5_flow_counter) next;
451 /**< Pointer to the next flow counter structure. */
453 struct mlx5_flow_counter_shared shared_info;
454 /**< Shared counter information. */
455 void *dcs_when_active;
457 * For non-batch mode, the dcs will be saved
458 * here when the counter is free.
463 uint64_t hits; /**< Reset value of hits packets. */
464 struct mlx5_flow_counter_pool *pool; /**< Counter pool. */
467 uint64_t bytes; /**< Reset value of bytes. */
470 * For non-batch mode, the dcs will be saved here
471 * when the counter is free.
474 void *action; /**< Pointer to the dv action. */
477 TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
479 /* Generic counter pool structure - query is in pool resolution. */
480 struct mlx5_flow_counter_pool {
481 TAILQ_ENTRY(mlx5_flow_counter_pool) next;
482 struct mlx5_counters counters[2]; /* Free counter list. */
483 struct mlx5_devx_obj *min_dcs;
484 /* The devx object of the minimum counter ID. */
485 uint64_t time_of_last_age_check;
486 /* System time (from rte_rdtsc()) read in the last aging check. */
487 uint32_t index:30; /* Pool index in container. */
488 uint32_t is_aged:1; /* Pool with aging counter. */
489 volatile uint32_t query_gen:1; /* Query round. */
490 rte_spinlock_t sl; /* The pool lock. */
491 rte_spinlock_t csl; /* The pool counter free list lock. */
492 struct mlx5_counter_stats_raw *raw;
493 struct mlx5_counter_stats_raw *raw_hw;
494 /* The raw on HW working. */
497 /* Memory management structure for group of counter statistics raws. */
498 struct mlx5_counter_stats_mem_mng {
499 LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
500 struct mlx5_counter_stats_raw *raws;
501 struct mlx5_pmd_wrapped_mr wm;
504 /* Raw memory structure for the counter statistics values of a pool. */
505 struct mlx5_counter_stats_raw {
506 LIST_ENTRY(mlx5_counter_stats_raw) next;
507 struct mlx5_counter_stats_mem_mng *mem_mng;
508 volatile struct flow_counter_stats *data;
511 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
513 /* Counter global management structure. */
514 struct mlx5_flow_counter_mng {
515 volatile uint16_t n_valid; /* Number of valid pools. */
516 uint16_t n; /* Number of pools. */
517 uint16_t last_pool_idx; /* Last used pool index */
518 int min_id; /* The minimum counter ID in the pools. */
519 int max_id; /* The maximum counter ID in the pools. */
520 rte_spinlock_t pool_update_sl; /* The pool update lock. */
521 rte_spinlock_t csl[MLX5_COUNTER_TYPE_MAX];
522 /* The counter free list lock. */
523 struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
524 /* Free counter list. */
525 struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
526 struct mlx5_counter_stats_mem_mng *mem_mng;
527 /* Hold the memory management for the next allocated pools raws. */
528 struct mlx5_counters flow_counters; /* Legacy flow counter list. */
529 uint8_t pending_queries;
531 uint8_t query_thread_on;
532 bool counter_fallback; /* Use counter fallback management. */
533 LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
534 LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
537 /* ASO structures. */
538 #define MLX5_ASO_QUEUE_LOG_DESC 10
543 struct mlx5_devx_cq cq_obj;
547 struct mlx5_aso_sq_elem {
550 struct mlx5_aso_age_pool *pool;
553 struct mlx5_aso_mtr *mtr;
555 struct mlx5_aso_ct_action *ct;
564 struct mlx5_aso_cq cq;
565 struct mlx5_devx_sq sq_obj;
566 struct mlx5_pmd_mr mr;
571 struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC];
572 uint16_t next; /* Pool index of the next pool to query. */
575 struct mlx5_aso_age_action {
576 LIST_ENTRY(mlx5_aso_age_action) next;
579 /* Following fields relevant only when action is active. */
580 uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
581 struct mlx5_age_param age_params;
584 #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512
586 struct mlx5_aso_age_pool {
587 struct mlx5_devx_obj *flow_hit_aso_obj;
588 uint16_t index; /* Pool index in pools array. */
589 uint64_t time_of_last_age_check; /* In seconds. */
590 struct mlx5_aso_age_action actions[MLX5_ASO_AGE_ACTIONS_PER_POOL];
593 LIST_HEAD(aso_age_list, mlx5_aso_age_action);
595 struct mlx5_aso_age_mng {
596 struct mlx5_aso_age_pool **pools;
597 uint16_t n; /* Total number of pools. */
598 uint16_t next; /* Number of pools in use, index of next free pool. */
599 rte_rwlock_t resize_rwl; /* Lock for resize objects. */
600 rte_spinlock_t free_sl; /* Lock for free list access. */
601 struct aso_age_list free; /* Free age actions list - ready to use. */
602 struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
605 /* Management structure for geneve tlv option */
606 struct mlx5_geneve_tlv_option_resource {
607 struct mlx5_devx_obj *obj; /* Pointer to the geneve tlv opt object. */
608 rte_be16_t option_class; /* geneve tlv opt class.*/
609 uint8_t option_type; /* geneve tlv opt type.*/
610 uint8_t length; /* geneve tlv opt length. */
611 uint32_t refcnt; /* geneve tlv object reference counter */
615 #define MLX5_AGE_EVENT_NEW 1
616 #define MLX5_AGE_TRIGGER 2
617 #define MLX5_AGE_SET(age_info, BIT) \
618 ((age_info)->flags |= (1 << (BIT)))
619 #define MLX5_AGE_UNSET(age_info, BIT) \
620 ((age_info)->flags &= ~(1 << (BIT)))
621 #define MLX5_AGE_GET(age_info, BIT) \
622 ((age_info)->flags & (1 << (BIT)))
623 #define GET_PORT_AGE_INFO(priv) \
624 (&((priv)->sh->port[(priv)->dev_port - 1].age_info))
625 /* Current time in seconds. */
626 #define MLX5_CURR_TIME_SEC (rte_rdtsc() / rte_get_tsc_hz())
628 /* Aging information for per port. */
629 struct mlx5_age_info {
630 uint8_t flags; /* Indicate if is new event or need to be triggered. */
631 struct mlx5_counters aged_counters; /* Aged counter list. */
632 struct aso_age_list aged_aso; /* Aged ASO actions list. */
633 rte_spinlock_t aged_sl; /* Aged flow list lock. */
636 /* Per port data of shared IB device. */
637 struct mlx5_dev_shared_port {
639 uint32_t devx_ih_port_id;
640 uint32_t nl_ih_port_id;
642 * Interrupt handler port_id. Used by shared interrupt
643 * handler to find the corresponding rte_eth device
644 * by IB port index. If value is equal or greater
645 * RTE_MAX_ETHPORTS it means there is no subhandler
646 * installed for specified IB port index.
648 struct mlx5_age_info age_info;
649 /* Aging information for per port. */
653 * Max number of actions per DV flow.
654 * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
655 * in rdma-core file providers/mlx5/verbs.c.
657 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
659 /* ASO flow meter structures */
660 /* Modify this value if enum rte_mtr_color changes. */
661 #define RTE_MTR_DROPPED RTE_COLORS
662 /* Yellow is now supported. */
663 #define MLX5_MTR_RTE_COLORS (RTE_COLOR_YELLOW + 1)
664 /* table_id 22 bits in mlx5_flow_tbl_key so limit policy number. */
665 #define MLX5_MAX_SUB_POLICY_TBL_NUM 0x3FFFFF
666 #define MLX5_INVALID_POLICY_ID UINT32_MAX
667 /* Suffix table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
668 #define MLX5_MTR_TABLE_ID_SUFFIX 1
669 /* Drop table_id on MLX5_FLOW_TABLE_LEVEL_METER. */
670 #define MLX5_MTR_TABLE_ID_DROP 2
671 /* Priority of the meter policy matcher. */
672 #define MLX5_MTR_POLICY_MATCHER_PRIO 0
673 /* Green & yellow color valid for now. */
674 #define MLX5_MTR_POLICY_MODE_ALL 0
675 /* Default policy. */
676 #define MLX5_MTR_POLICY_MODE_DEF 1
677 /* Only green color valid. */
678 #define MLX5_MTR_POLICY_MODE_OG 2
679 /* Only yellow color valid. */
680 #define MLX5_MTR_POLICY_MODE_OY 3
682 enum mlx5_meter_domain {
683 MLX5_MTR_DOMAIN_INGRESS,
684 MLX5_MTR_DOMAIN_EGRESS,
685 MLX5_MTR_DOMAIN_TRANSFER,
688 #define MLX5_MTR_DOMAIN_INGRESS_BIT (1 << MLX5_MTR_DOMAIN_INGRESS)
689 #define MLX5_MTR_DOMAIN_EGRESS_BIT (1 << MLX5_MTR_DOMAIN_EGRESS)
690 #define MLX5_MTR_DOMAIN_TRANSFER_BIT (1 << MLX5_MTR_DOMAIN_TRANSFER)
691 #define MLX5_MTR_ALL_DOMAIN_BIT (MLX5_MTR_DOMAIN_INGRESS_BIT | \
692 MLX5_MTR_DOMAIN_EGRESS_BIT | \
693 MLX5_MTR_DOMAIN_TRANSFER_BIT)
695 /* The color tag rule structure. */
696 struct mlx5_sub_policy_color_rule {
698 /* The color rule. */
699 struct mlx5_flow_dv_matcher *matcher;
700 /* The color matcher. */
701 TAILQ_ENTRY(mlx5_sub_policy_color_rule) next_port;
702 /**< Pointer to the next color rule structure. */
704 /* On which src port this rule applied. */
707 TAILQ_HEAD(mlx5_sub_policy_color_rules, mlx5_sub_policy_color_rule);
710 * Meter sub-policy structure.
711 * Each RSS TIR in meter policy need its own sub-policy resource.
713 struct mlx5_flow_meter_sub_policy {
714 uint32_t main_policy_id:1;
715 /* Main policy id is same as this sub_policy id. */
717 /* Index to sub_policy ipool entity. */
719 /* Point to struct mlx5_flow_meter_policy. */
720 struct mlx5_flow_tbl_resource *tbl_rsc;
721 /* The sub-policy table resource. */
722 uint32_t rix_hrxq[MLX5_MTR_RTE_COLORS];
723 /* Index to TIR resource. */
724 struct mlx5_flow_tbl_resource *jump_tbl[MLX5_MTR_RTE_COLORS];
725 /* Meter jump/drop table. */
726 struct mlx5_sub_policy_color_rules color_rules[RTE_COLORS];
727 /* List for the color rules. */
730 struct mlx5_meter_policy_acts {
732 /* Number of actions. */
733 void *dv_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
737 struct mlx5_meter_policy_action_container {
739 /* Index to the mark action. */
740 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
741 /* Pointer to modify header resource in cache. */
743 /* Fate action type. */
745 struct rte_flow_action *rss;
746 /* Rss action configuration. */
747 uint32_t rix_port_id_action;
748 /* Index to port ID action resource. */
749 void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
750 /* Jump/drop action per color. */
752 /* Queue action configuration. */
754 uint32_t next_mtr_id;
755 /* The next meter id. */
756 void *next_sub_policy;
757 /* Next meter's sub-policy. */
762 /* Flow meter policy parameter structure. */
763 struct mlx5_flow_meter_policy {
764 struct rte_eth_dev *dev;
765 /* The port dev on which policy is created. */
767 /* Is RSS policy table. */
769 /* Rule applies to ingress domain. */
771 /* Rule applies to egress domain. */
773 /* Rule applies to transfer domain. */
775 /* Is queue action in policy table. */
776 uint32_t is_hierarchy:1;
777 /* Is meter action in policy table. */
779 /* If yellow color policy is skipped. */
781 /* If green color policy is skipped. */
783 /* If policy contains mark action. */
787 struct mlx5_meter_policy_action_container act_cnt[MLX5_MTR_RTE_COLORS];
788 /* Policy actions container. */
789 void *dr_drop_action[MLX5_MTR_DOMAIN_MAX];
790 /* drop action for red color. */
791 uint16_t sub_policy_num;
792 /* Count sub policy tables, 3 bits per domain. */
793 struct mlx5_flow_meter_sub_policy **sub_policys[MLX5_MTR_DOMAIN_MAX];
794 /* Sub policy table array must be the end of struct. */
797 /* The maximum sub policy is relate to struct mlx5_rss_hash_fields[]. */
798 #define MLX5_MTR_RSS_MAX_SUB_POLICY 7
799 #define MLX5_MTR_SUB_POLICY_NUM_SHIFT 3
800 #define MLX5_MTR_SUB_POLICY_NUM_MASK 0x7
801 #define MLX5_MTRS_DEFAULT_RULE_PRIORITY 0xFFFF
802 #define MLX5_MTR_CHAIN_MAX_NUM 8
804 /* Flow meter default policy parameter structure.
805 * Policy index 0 is reserved by default policy table.
806 * Action per color as below:
807 * green - do nothing, yellow - do nothing, red - drop
809 struct mlx5_flow_meter_def_policy {
810 struct mlx5_flow_meter_sub_policy sub_policy;
811 /* Policy rules jump to other tables. */
812 void *dr_jump_action[RTE_COLORS];
813 /* Jump action per color. */
816 /* Meter parameter structure. */
817 struct mlx5_flow_meter_info {
821 /* Policy id, the first sub_policy idx. */
822 struct mlx5_flow_meter_profile *profile;
823 /**< Meter profile parameters. */
824 rte_spinlock_t sl; /**< Meter action spinlock. */
825 /** Set of stats counters to be enabled.
826 * @see enum rte_mtr_stats_type
828 uint32_t bytes_dropped:1;
829 /** Set bytes dropped stats to be enabled. */
830 uint32_t pkts_dropped:1;
831 /** Set packets dropped stats to be enabled. */
832 uint32_t active_state:1;
833 /**< Meter hw active state. */
835 /**< Meter shared or not. */
836 uint32_t is_enable:1;
837 /**< Meter disable/enable state. */
839 /**< Rule applies to egress traffic. */
842 * Instead of simply matching the properties of traffic as it would
843 * appear on a given DPDK port ID, enabling this attribute transfers
844 * a flow rule to the lowest possible level of any device endpoints
845 * found in the pattern.
847 * When supported, this effectively enables an application to
848 * re-route traffic not necessarily intended for it (e.g. coming
849 * from or addressed to different physical ports, VFs or
850 * applications) at the device level.
852 * It complements the behavior of some pattern items such as
853 * RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them.
855 * When transferring flow rules, ingress and egress attributes keep
856 * their original meaning, as if processing traffic emitted or
857 * received by the application.
860 uint32_t def_policy:1;
861 /* Meter points to default policy. */
862 uint32_t color_aware:1;
863 /* Meter is color aware mode. */
864 void *drop_rule[MLX5_MTR_DOMAIN_MAX];
865 /* Meter drop rule in drop table. */
867 /**< Color counter for drop. */
870 struct mlx5_indexed_pool *flow_ipool;
871 /**< Index pool for flow id. */
872 void *meter_action_g;
873 /**< Flow meter action. */
874 void *meter_action_y;
875 /**< Flow meter action for yellow init_color. */
878 /* PPS(packets per second) map to BPS(Bytes per second).
879 * HW treat packet as 128bytes in PPS mode
881 #define MLX5_MTRS_PPS_MAP_BPS_SHIFT 7
883 /* RFC2697 parameter structure. */
884 struct mlx5_flow_meter_srtcm_rfc2697_prm {
887 * bit 24-28: cbs_exponent, bit 16-23 cbs_mantissa,
888 * bit 8-12: cir_exponent, bit 0-7 cir_mantissa.
892 * bit 24-28: ebs_exponent, bit 16-23 ebs_mantissa,
893 * bit 8-12: eir_exponent, bit 0-7 eir_mantissa.
897 /* Flow meter profile structure. */
898 struct mlx5_flow_meter_profile {
899 TAILQ_ENTRY(mlx5_flow_meter_profile) next;
900 /**< Pointer to the next flow meter structure. */
901 uint32_t id; /**< Profile id. */
902 struct rte_mtr_meter_profile profile; /**< Profile detail. */
904 struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
905 /**< srtcm_rfc2697 struct. */
907 uint32_t ref_cnt; /**< Use count. */
908 uint32_t g_support:1; /**< If G color will be generated. */
909 uint32_t y_support:1; /**< If Y color will be generated. */
912 /* 2 meters in each ASO cache line */
913 #define MLX5_MTRS_CONTAINER_RESIZE 64
915 * The pool index and offset of meter in the pool array makes up the
916 * meter index. In case the meter is from pool 0 and offset 0, it
917 * should plus 1 to avoid index 0, since 0 means invalid meter index
920 #define MLX5_MAKE_MTR_IDX(pi, offset) \
921 ((pi) * MLX5_ASO_MTRS_PER_POOL + (offset) + 1)
923 /*aso flow meter state*/
924 enum mlx5_aso_mtr_state {
925 ASO_METER_FREE, /* In free list. */
926 ASO_METER_WAIT, /* ACCESS_ASO WQE in progress. */
927 ASO_METER_READY, /* CQE received. */
930 /* Generic aso_flow_meter information. */
931 struct mlx5_aso_mtr {
932 LIST_ENTRY(mlx5_aso_mtr) next;
933 struct mlx5_flow_meter_info fm;
934 /**< Pointer to the next aso flow meter structure. */
935 uint8_t state; /**< ASO flow meter state. */
939 /* Generic aso_flow_meter pool structure. */
940 struct mlx5_aso_mtr_pool {
941 struct mlx5_aso_mtr mtrs[MLX5_ASO_MTRS_PER_POOL];
942 /*Must be the first in pool*/
943 struct mlx5_devx_obj *devx_obj;
944 /* The devx object of the minimum aso flow meter ID. */
945 uint32_t index; /* Pool index in management structure. */
948 LIST_HEAD(aso_meter_list, mlx5_aso_mtr);
949 /* Pools management structure for ASO flow meter pools. */
950 struct mlx5_aso_mtr_pools_mng {
951 volatile uint16_t n_valid; /* Number of valid pools. */
952 uint16_t n; /* Number of pools. */
953 rte_spinlock_t mtrsl; /* The ASO flow meter free list lock. */
954 rte_rwlock_t resize_mtrwl; /* Lock for resize objects. */
955 struct aso_meter_list meters; /* Free ASO flow meter list. */
956 struct mlx5_aso_sq sq; /*SQ using by ASO flow meter. */
957 struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */
960 /* Meter management structure for global flow meter resource. */
961 struct mlx5_flow_mtr_mng {
962 struct mlx5_aso_mtr_pools_mng pools_mng;
963 /* Pools management structure for ASO flow meter pools. */
964 struct mlx5_flow_meter_def_policy *def_policy[MLX5_MTR_DOMAIN_MAX];
965 /* Default policy table. */
966 uint32_t def_policy_id;
967 /* Default policy id. */
968 uint32_t def_policy_ref_cnt;
969 /** def_policy meter use count. */
970 struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
971 /* Meter drop table. */
972 struct mlx5_flow_dv_matcher *
973 drop_matcher[MLX5_MTR_DOMAIN_MAX][MLX5_REG_BITS];
974 /* Matcher meter in drop table. */
975 struct mlx5_flow_dv_matcher *def_matcher[MLX5_MTR_DOMAIN_MAX];
976 /* Default matcher in drop table. */
977 void *def_rule[MLX5_MTR_DOMAIN_MAX];
978 /* Default rule in drop table. */
979 uint8_t max_mtr_bits;
980 /* Indicate how many bits are used by meter id at the most. */
981 uint8_t max_mtr_flow_bits;
982 /* Indicate how many bits are used by meter flow id at the most. */
985 /* Table key of the hash organization. */
986 union mlx5_flow_tbl_key {
988 /* Table ID should be at the lowest address. */
989 uint32_t level; /**< Level of the table. */
990 uint32_t id:22; /**< ID of the table. */
991 uint32_t dummy:1; /**< Dummy table for DV API. */
992 uint32_t is_fdb:1; /**< 1 - FDB, 0 - NIC TX/RX. */
993 uint32_t is_egress:1; /**< 1 - egress, 0 - ingress. */
994 uint32_t reserved:7; /**< must be zero for comparison. */
996 uint64_t v64; /**< full 64bits value of key */
999 /* Table structure. */
1000 struct mlx5_flow_tbl_resource {
1001 void *obj; /**< Pointer to DR table object. */
1004 #define MLX5_MAX_TABLES UINT16_MAX
1005 #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
1006 /* Reserve the last two tables for metadata register copy. */
1007 #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
1008 #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
1009 /* Tables for metering splits should be added here. */
1010 #define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 3)
1011 #define MLX5_FLOW_TABLE_LEVEL_POLICY (MLX5_MAX_TABLES - 4)
1012 #define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_POLICY
1013 #define MLX5_MAX_TABLES_FDB UINT16_MAX
1014 #define MLX5_FLOW_TABLE_FACTOR 10
1016 /* ID generation structure. */
1017 struct mlx5_flow_id_pool {
1018 uint32_t *free_arr; /**< Pointer to the a array of free values. */
1019 uint32_t base_index;
1020 /**< The next index that can be used without any free elements. */
1021 uint32_t *curr; /**< Pointer to the index to pop. */
1022 uint32_t *last; /**< Pointer to the last element in the empty array. */
1023 uint32_t max_id; /**< Maximum id can be allocated from the pool. */
1026 /* Tx pacing queue structure - for Clock and Rearm queues. */
1027 struct mlx5_txpp_wq {
1028 /* Completion Queue related data.*/
1029 struct mlx5_devx_cq cq_obj;
1032 /* Send Queue related data.*/
1033 struct mlx5_devx_sq sq_obj;
1034 uint16_t sq_size; /* Number of WQEs in the queue. */
1035 uint16_t sq_ci; /* Next WQE to execute. */
1038 /* Tx packet pacing internal timestamp. */
1039 struct mlx5_txpp_ts {
1044 /* Tx packet pacing structure. */
1045 struct mlx5_dev_txpp {
1046 pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
1047 uint32_t refcnt; /* Pacing reference counter. */
1048 uint32_t freq; /* Timestamp frequency, Hz. */
1049 uint32_t tick; /* Completion tick duration in nanoseconds. */
1050 uint32_t test; /* Packet pacing test mode. */
1051 int32_t skew; /* Scheduling skew. */
1052 struct rte_intr_handle *intr_handle; /* Periodic interrupt. */
1053 void *echan; /* Event Channel. */
1054 struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
1055 struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
1056 void *pp; /* Packet pacing context. */
1057 uint16_t pp_id; /* Packet pacing context index. */
1058 uint16_t ts_n; /* Number of captured timestamps. */
1059 uint16_t ts_p; /* Pointer to statistics timestamp. */
1060 struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
1061 struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
1062 uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
1063 /* Statistics counters. */
1064 uint64_t err_miss_int; /* Missed service interrupt. */
1065 uint64_t err_rearm_queue; /* Rearm Queue errors. */
1066 uint64_t err_clock_queue; /* Clock Queue errors. */
1067 uint64_t err_ts_past; /* Timestamp in the past. */
1068 uint64_t err_ts_future; /* Timestamp in the distant future. */
1071 /* Sample ID information of eCPRI flex parser structure. */
1072 struct mlx5_ecpri_parser_profile {
1073 uint32_t num; /* Actual number of samples. */
1074 uint32_t ids[8]; /* Sample IDs for this profile. */
1075 uint8_t offset[8]; /* Bytes offset of each parser. */
1076 void *obj; /* Flex parser node object. */
1079 /* Max member ports per bonding device. */
1080 #define MLX5_BOND_MAX_PORTS 2
1082 /* Bonding device information. */
1083 struct mlx5_bond_info {
1084 int n_port; /* Number of bond member ports. */
1086 char ifname[MLX5_NAMESIZE + 1];
1088 char ifname[MLX5_NAMESIZE + 1];
1090 struct rte_pci_addr pci_addr;
1091 } ports[MLX5_BOND_MAX_PORTS];
1094 /* Number of connection tracking objects per pool: must be a power of 2. */
1095 #define MLX5_ASO_CT_ACTIONS_PER_POOL 64
1097 /* Generate incremental and unique CT index from pool and offset. */
1098 #define MLX5_MAKE_CT_IDX(pool, offset) \
1099 ((pool) * MLX5_ASO_CT_ACTIONS_PER_POOL + (offset) + 1)
1101 /* ASO Conntrack state. */
1102 enum mlx5_aso_ct_state {
1103 ASO_CONNTRACK_FREE, /* Inactive, in the free list. */
1104 ASO_CONNTRACK_WAIT, /* WQE sent in the SQ. */
1105 ASO_CONNTRACK_READY, /* CQE received w/o error. */
1106 ASO_CONNTRACK_QUERY, /* WQE for query sent. */
1107 ASO_CONNTRACK_MAX, /* Guard. */
1110 /* Generic ASO connection tracking structure. */
1111 struct mlx5_aso_ct_action {
1112 LIST_ENTRY(mlx5_aso_ct_action) next; /* Pointer to the next ASO CT. */
1113 void *dr_action_orig; /* General action object for original dir. */
1114 void *dr_action_rply; /* General action object for reply dir. */
1115 uint32_t refcnt; /* Action used count in device flows. */
1116 uint16_t offset; /* Offset of ASO CT in DevX objects bulk. */
1117 uint16_t peer; /* The only peer port index could also use this CT. */
1118 enum mlx5_aso_ct_state state; /* ASO CT state. */
1119 bool is_original; /* The direction of the DR action to be used. */
1122 /* CT action object state update. */
1123 #define MLX5_ASO_CT_UPDATE_STATE(c, s) \
1124 __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
1126 /* ASO connection tracking software pool definition. */
1127 struct mlx5_aso_ct_pool {
1128 uint16_t index; /* Pool index in pools array. */
1129 struct mlx5_devx_obj *devx_obj;
1130 /* The first devx object in the bulk, used for freeing (not yet). */
1131 struct mlx5_aso_ct_action actions[MLX5_ASO_CT_ACTIONS_PER_POOL];
1132 /* CT action structures bulk. */
1135 LIST_HEAD(aso_ct_list, mlx5_aso_ct_action);
1137 /* Pools management structure for ASO connection tracking pools. */
1138 struct mlx5_aso_ct_pools_mng {
1139 struct mlx5_aso_ct_pool **pools;
1140 uint16_t n; /* Total number of pools. */
1141 uint16_t next; /* Number of pools in use, index of next free pool. */
1142 rte_spinlock_t ct_sl; /* The ASO CT free list lock. */
1143 rte_rwlock_t resize_rwl; /* The ASO CT pool resize lock. */
1144 struct aso_ct_list free_cts; /* Free ASO CT objects list. */
1145 struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
1150 uint8_t tx_remap_affinity[16]; /* The PF port number of affinity */
1151 uint8_t affinity_mode; /* TIS or hash based affinity */
1154 /* DevX flex parser context. */
1155 struct mlx5_flex_parser_devx {
1156 struct mlx5_list_entry entry; /* List element at the beginning. */
1157 uint32_t num_samples;
1159 struct mlx5_devx_graph_node_attr devx_conf;
1160 uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
1163 /* Pattern field descriptor - how to translate flex pattern into samples. */
1165 struct mlx5_flex_pattern_field {
1170 #define MLX5_INVALID_SAMPLE_REG_ID 0x1F
1172 /* Port flex item context. */
1173 struct mlx5_flex_item {
1174 struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
1175 uint32_t refcnt; /* Atomically accessed refcnt by flows. */
1176 enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
1177 uint32_t mapnum; /* Number of pattern translation entries. */
1178 struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
1182 * Shared Infiniband device context for Master/Representors
1183 * which belong to same IB device with multiple IB ports.
1185 struct mlx5_dev_ctx_shared {
1186 LIST_ENTRY(mlx5_dev_ctx_shared) next;
1188 uint32_t esw_mode:1; /* Whether is E-Switch mode. */
1189 uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
1190 uint32_t steering_format_version:4;
1191 /* Indicates the device steering logic format. */
1192 uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
1193 uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
1194 uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
1195 uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */
1196 uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
1197 uint32_t dr_drop_action_en:1; /* Use DR drop action. */
1198 uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */
1199 uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
1200 uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */
1201 uint32_t max_port; /* Maximal IB device port index. */
1202 struct mlx5_bond_info bond; /* Bonding information. */
1203 struct mlx5_common_device *cdev; /* Backend mlx5 device. */
1204 uint32_t tdn; /* Transport Domain number. */
1205 char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
1206 char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
1207 struct mlx5_dev_cap dev_cap; /* Device capabilities. */
1208 struct mlx5_sh_config config; /* Device configuration. */
1209 int numa_node; /* Numa node of backing physical device. */
1210 /* Packet pacing related structure. */
1211 struct mlx5_dev_txpp txpp;
1212 /* Shared DV/DR flow data section. */
1213 uint32_t dv_meta_mask; /* flow META metadata supported mask. */
1214 uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
1215 uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
1216 void *fdb_domain; /* FDB Direct Rules name space handle. */
1217 void *rx_domain; /* RX Direct Rules name space handle. */
1218 void *tx_domain; /* TX Direct Rules name space handle. */
1220 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR. */
1221 rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
1222 /* UAR same-page access control required in 32bit implementations. */
1225 struct mlx5_hlist *flow_tbls; /* SWS flow table. */
1226 struct mlx5_hlist *groups; /* HWS flow group. */
1228 struct mlx5_flow_tunnel_hub *tunnel_hub;
1229 /* Direct Rules tables for FDB, NIC TX+RX */
1230 void *dr_drop_action; /* Pointer to DR drop action, any domain. */
1231 void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
1232 struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
1233 struct mlx5_hlist *modify_cmds;
1234 struct mlx5_hlist *tag_table;
1235 struct mlx5_list *port_id_action_list; /* Port ID action list. */
1236 struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
1237 struct mlx5_list *sample_action_list; /* List of sample actions. */
1238 struct mlx5_list *dest_array_list;
1239 struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
1240 /* List of destination array actions. */
1241 struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
1242 void *default_miss_action; /* Default miss action. */
1243 struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
1244 struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
1245 /* Shared interrupt handler section. */
1246 struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
1247 struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
1248 struct rte_intr_handle *intr_handle_nl; /* Netlink interrupt handler. */
1249 void *devx_comp; /* DEVX async comp obj. */
1250 struct mlx5_devx_obj *tis[16]; /* TIS object. */
1251 struct mlx5_devx_obj *td; /* Transport domain. */
1252 struct mlx5_lag lag; /* LAG attributes */
1253 struct mlx5_uar tx_uar; /* DevX UAR for Tx and Txpp and ASO SQs. */
1254 struct mlx5_uar rx_uar; /* DevX UAR for Rx. */
1255 struct mlx5_proc_priv *pppriv; /* Pointer to primary private process. */
1256 struct mlx5_ecpri_parser_profile ecpri_parser;
1257 /* Flex parser profiles information. */
1258 LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
1259 struct mlx5_aso_age_mng *aso_age_mng;
1260 /* Management data for aging mechanism using ASO Flow Hit. */
1261 struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
1262 /* Management structure for geneve tlv option */
1263 rte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */
1264 struct mlx5_flow_mtr_mng *mtrmng;
1265 /* Meter management structure. */
1266 struct mlx5_aso_ct_pools_mng *ct_mng;
1267 /* Management data for ASO connection tracking. */
1268 struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
1269 unsigned int flow_max_priority;
1270 enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
1271 /* Availability of mreg_c's. */
1272 struct mlx5_dev_shared_port port[]; /* per device port data array. */
1276 * Per-process private structure.
1277 * Caution, secondary process may rebuild the struct during port start.
1279 struct mlx5_proc_priv {
1280 size_t uar_table_sz;
1281 /* Size of UAR register table. */
1282 struct mlx5_uar_data uar_table[];
1283 /* Table of UAR registers for each process. */
1286 /* MTR profile list. */
1287 TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
1289 TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
1291 /* RSS description. */
1292 struct mlx5_flow_rss_desc {
1294 uint32_t queue_num; /**< Number of entries in @p queue. */
1295 uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
1296 uint64_t hash_fields; /* Verbs Hash fields. */
1297 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1298 uint32_t key_len; /**< RSS hash key len. */
1299 uint32_t hws_flags; /**< HW steering action. */
1300 uint32_t tunnel; /**< Queue in tunnel. */
1301 uint32_t shared_rss; /**< Shared RSS index. */
1302 struct mlx5_ind_table_obj *ind_tbl;
1303 /**< Indirection table for shared RSS hash RX queues. */
1305 uint16_t *queue; /**< Destination queues. */
1306 const uint16_t *const_q; /**< Const pointer convert. */
1310 #define MLX5_PROC_PRIV(port_id) \
1311 ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
1313 /* Verbs/DevX Rx queue elements. */
1314 struct mlx5_rxq_obj {
1315 LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
1316 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
1317 int fd; /* File descriptor for event channel */
1321 void *wq; /* Work Queue. */
1322 void *ibv_cq; /* Completion Queue. */
1325 struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
1327 struct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */
1328 struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
1334 /* Indirection table. */
1335 struct mlx5_ind_table_obj {
1336 LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
1337 uint32_t refcnt; /* Reference counter. */
1340 void *ind_table; /**< Indirection table. */
1341 struct mlx5_devx_obj *rqt; /* DevX RQT object. */
1343 uint32_t queues_n; /**< Number of queues in the list. */
1344 uint16_t *queues; /**< Queue list. */
1347 /* Hash Rx queue. */
1350 struct mlx5_list_entry entry; /* List entry. */
1351 uint32_t standalone:1; /* This object used in shared action. */
1352 struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
1355 void *qp; /* Verbs queue pair. */
1356 struct mlx5_devx_obj *tir; /* DevX TIR object. */
1358 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1359 void *action; /* DV QP action pointer. */
1361 uint32_t hws_flags; /* Hw steering flags. */
1362 uint64_t hash_fields; /* Verbs Hash fields. */
1363 uint32_t rss_key_len; /* Hash key length in bytes. */
1364 uint32_t idx; /* Hash Rx queue index. */
1365 uint8_t rss_key[]; /* Hash key. */
1368 /* Verbs/DevX Tx queue elements. */
1369 struct mlx5_txq_obj {
1370 LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
1371 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
1375 void *cq; /* Completion Queue. */
1376 void *qp; /* Queue Pair. */
1379 struct mlx5_devx_obj *sq;
1380 /* DevX object for Sx queue. */
1381 struct mlx5_devx_obj *tis; /* The TIS object. */
1384 struct rte_eth_dev *dev;
1385 struct mlx5_devx_cq cq_obj;
1386 /* DevX CQ object and its resources. */
1387 struct mlx5_devx_sq sq_obj;
1388 /* DevX SQ object and its resources. */
1393 enum mlx5_rxq_modify_type {
1394 MLX5_RXQ_MOD_ERR2RST, /* modify state from error to reset. */
1395 MLX5_RXQ_MOD_RST2RDY, /* modify state from reset to ready. */
1396 MLX5_RXQ_MOD_RDY2ERR, /* modify state from ready to error. */
1397 MLX5_RXQ_MOD_RDY2RST, /* modify state from ready to reset. */
1400 enum mlx5_txq_modify_type {
1401 MLX5_TXQ_MOD_RST2RDY, /* modify state from reset to ready. */
1402 MLX5_TXQ_MOD_RDY2RST, /* modify state from ready to reset. */
1403 MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
1406 struct mlx5_rxq_priv;
1408 /* HW objects operations structure. */
1409 struct mlx5_obj_ops {
1410 int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);
1411 int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
1412 int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
1413 int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
1414 void (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
1415 int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
1416 struct mlx5_ind_table_obj *ind_tbl);
1417 int (*ind_table_modify)(struct rte_eth_dev *dev,
1418 const unsigned int log_n,
1419 const uint16_t *queues, const uint32_t queues_n,
1420 struct mlx5_ind_table_obj *ind_tbl);
1421 void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
1422 int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
1423 int tunnel __rte_unused);
1424 int (*hrxq_modify)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
1425 const uint8_t *rss_key,
1426 uint64_t hash_fields,
1427 const struct mlx5_ind_table_obj *ind_tbl);
1428 void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
1429 int (*drop_action_create)(struct rte_eth_dev *dev);
1430 void (*drop_action_destroy)(struct rte_eth_dev *dev);
1431 int (*txq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
1432 int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
1433 enum mlx5_txq_modify_type type, uint8_t dev_port);
1434 void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
1435 int (*lb_dummy_queue_create)(struct rte_eth_dev *dev);
1436 void (*lb_dummy_queue_release)(struct rte_eth_dev *dev);
1439 #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
1442 struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
1443 struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
1444 uint32_t dev_port; /* Device port number. */
1445 struct rte_pci_device *pci_dev; /* Backend PCI device. */
1446 struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
1447 BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
1448 /* Bit-field of MAC addresses owned by the PMD. */
1449 uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
1450 unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
1451 /* Device properties. */
1452 uint16_t mtu; /* Configured MTU. */
1453 unsigned int isolated:1; /* Whether isolated mode is enabled. */
1454 unsigned int representor:1; /* Device is a port representor. */
1455 unsigned int master:1; /* Device is a E-Switch master. */
1456 unsigned int txpp_en:1; /* Tx packet pacing enabled. */
1457 unsigned int sampler_en:1; /* Whether support sampler. */
1458 unsigned int mtr_en:1; /* Whether support meter. */
1459 unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
1460 unsigned int lb_used:1; /* Loopback queue is referred to. */
1461 uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
1462 uint16_t domain_id; /* Switch domain identifier. */
1463 uint16_t vport_id; /* Associated VF vport index (if any). */
1464 uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
1465 uint32_t vport_meta_mask; /* Used for vport index field match mask. */
1466 uint16_t representor_id; /* UINT16_MAX if not a representor. */
1467 int32_t pf_bond; /* >=0, representor owner PF index in bonding. */
1468 unsigned int if_index; /* Associated kernel network device index. */
1470 unsigned int rxqs_n; /* RX queues array size. */
1471 unsigned int txqs_n; /* TX queues array size. */
1472 struct mlx5_external_rxq *ext_rxqs; /* External RX queues array. */
1473 struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
1474 struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
1475 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
1476 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
1477 unsigned int (*reta_idx)[]; /* RETA index table. */
1478 unsigned int reta_idx_n; /* RETA index size. */
1479 struct mlx5_drop drop_queue; /* Flow drop queues. */
1480 void *root_drop_action; /* Pointer to root drop action. */
1481 struct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];
1482 /* RTE Flow rules. */
1483 uint32_t ctrl_flows; /* Control flow rules. */
1484 rte_spinlock_t flow_list_lock;
1485 struct mlx5_obj_ops obj_ops; /* HW objects operations. */
1486 LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
1487 LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
1488 struct mlx5_list *hrxqs; /* Hash Rx queues. */
1489 LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
1490 LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
1491 /* Indirection tables. */
1492 LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
1493 /* Standalone indirect tables. */
1494 LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
1495 /* Pointer to next element. */
1496 rte_rwlock_t ind_tbls_lock;
1497 uint32_t refcnt; /**< Reference counter. */
1498 /**< Verbs modify header action object. */
1499 uint8_t ft_type; /**< Flow table type, Rx or Tx. */
1500 uint8_t max_lro_msg_size;
1501 uint32_t link_speed_capa; /* Link speed capabilities. */
1502 struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
1503 struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
1504 struct mlx5_port_config config; /* Port configuration. */
1505 /* Context for Verbs allocator. */
1506 int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
1507 int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
1508 struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
1509 struct mlx5_hlist *mreg_cp_tbl;
1510 /* Hash table of Rx metadata register copy table. */
1511 uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
1512 uint8_t mtr_color_reg; /* Meter color match REG_C. */
1513 struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
1514 struct mlx5_l3t_tbl *mtr_profile_tbl; /* Meter index lookup table. */
1515 struct mlx5_l3t_tbl *policy_idx_tbl; /* Policy index lookup table. */
1516 struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
1517 uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
1518 uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
1519 struct mlx5_mp_id mp_id; /* ID of a multi-process process */
1520 LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
1521 rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
1522 uint32_t rss_shared_actions; /* RSS shared actions. */
1523 struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
1524 uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
1525 uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
1526 rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
1527 struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
1528 /* Flex items have been created on the port. */
1529 uint32_t flex_item_map; /* Map of allocated flex item elements. */
1530 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1531 /* Item template list. */
1532 LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
1533 /* Action template list. */
1534 LIST_HEAD(flow_hw_at, rte_flow_actions_template) flow_hw_at;
1535 struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
1536 uint32_t nb_queue; /* HW steering queue number. */
1537 /* HW steering queue polling mechanism job descriptor LIFO. */
1538 struct mlx5_hw_q *hw_q;
1539 /* HW steering rte flow table list header. */
1540 LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
1541 /* HW steering global drop action. */
1542 struct mlx5dr_action *hw_drop[MLX5_HW_ACTION_FLAG_MAX]
1543 [MLX5DR_TABLE_TYPE_MAX];
1544 /* HW steering global drop action. */
1545 struct mlx5dr_action *hw_tag[MLX5_HW_ACTION_FLAG_MAX];
1546 struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
1550 #define PORT_ID(priv) ((priv)->dev_data->port_id)
1551 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
1553 struct rte_hairpin_peer_info {
1557 uint16_t tx_explicit;
1558 uint16_t manual_bind;
1561 #define BUF_SIZE 1024
1562 enum dr_dump_rec_type {
1563 DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT = 4410,
1564 DR_DUMP_REC_TYPE_PMD_MODIFY_HDR = 4420,
1565 DR_DUMP_REC_TYPE_PMD_COUNTER = 4430,
1569 * Indicates whether HW objects operations can be created by DevX.
1571 * This function is used for both:
1572 * Before creation - deciding whether to create HW objects operations by DevX.
1573 * After creation - indicator if HW objects operations were created by DevX.
1576 * Pointer to shared device context.
1579 * True if HW objects were created by DevX, False otherwise.
1582 mlx5_devx_obj_ops_en(struct mlx5_dev_ctx_shared *sh)
1585 * When advanced DR API is available and DV flow is supported and
1586 * DevX is supported, HW objects operations are created by DevX.
1588 return (sh->cdev->config.devx && sh->config.dv_flow_en &&
1589 sh->dev_cap.dest_tir);
1594 int mlx5_getenv_int(const char *);
1595 int mlx5_proc_priv_init(struct rte_eth_dev *dev);
1596 void mlx5_proc_priv_uninit(struct rte_eth_dev *dev);
1597 int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
1598 struct rte_eth_udp_tunnel *udp_tunnel);
1599 uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev);
1600 int mlx5_dev_close(struct rte_eth_dev *dev);
1601 int mlx5_net_remove(struct mlx5_common_device *cdev);
1602 bool mlx5_is_hpf(struct rte_eth_dev *dev);
1603 bool mlx5_is_sf_repr(struct rte_eth_dev *dev);
1604 void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
1606 /* Macro to iterate over all valid ports for mlx5 driver. */
1607 #define MLX5_ETH_FOREACH_DEV(port_id, dev) \
1608 for (port_id = mlx5_eth_find_next(0, dev); \
1609 port_id < RTE_MAX_ETHPORTS; \
1610 port_id = mlx5_eth_find_next(port_id + 1, dev))
1611 void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
1612 struct mlx5_hca_attr *hca_attr);
1613 struct mlx5_dev_ctx_shared *
1614 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
1615 struct mlx5_kvargs_ctrl *mkvlist);
1616 void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
1617 int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
1618 void mlx5_free_table_hash_list(struct mlx5_priv *priv);
1619 int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
1620 void mlx5_set_min_inline(struct mlx5_priv *priv);
1621 void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
1622 int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
1623 struct mlx5_kvargs_ctrl *mkvlist);
1624 int mlx5_port_args_config(struct mlx5_priv *priv,
1625 struct mlx5_kvargs_ctrl *mkvlist,
1626 struct mlx5_port_config *config);
1627 void mlx5_port_args_set_used(const char *name, uint16_t port_id,
1628 struct mlx5_kvargs_ctrl *mkvlist);
1629 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
1630 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
1631 void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
1632 int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
1633 int mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh);
1634 int mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh);
1638 int mlx5_dev_configure(struct rte_eth_dev *dev);
1639 int mlx5_representor_info_get(struct rte_eth_dev *dev,
1640 struct rte_eth_representor_info *info);
1641 #define MLX5_REPRESENTOR_ID(pf, type, repr) \
1642 (((pf) << 14) + ((type) << 12) + ((repr) & 0xfff))
1643 #define MLX5_REPRESENTOR_REPR(repr_id) \
1645 #define MLX5_REPRESENTOR_TYPE(repr_id) \
1646 (((repr_id) >> 12) & 3)
1647 uint16_t mlx5_representor_id_encode(const struct mlx5_switch_info *info,
1648 enum rte_eth_representor_type hpf_type);
1649 int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
1650 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
1651 const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
1652 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
1653 int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
1654 struct rte_eth_hairpin_cap *cap);
1655 eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
1656 struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid);
1657 struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev);
1658 int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev);
1660 /* mlx5_ethdev_os.c */
1662 int mlx5_get_ifname(const struct rte_eth_dev *dev,
1663 char (*ifname)[MLX5_NAMESIZE]);
1664 unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
1665 int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
1666 int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
1667 int mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
1668 int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
1669 int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
1670 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
1671 struct rte_eth_fc_conf *fc_conf);
1672 int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
1673 struct rte_eth_fc_conf *fc_conf);
1674 void mlx5_dev_interrupt_handler(void *arg);
1675 void mlx5_dev_interrupt_handler_devx(void *arg);
1676 void mlx5_dev_interrupt_handler_nl(void *arg);
1677 int mlx5_set_link_down(struct rte_eth_dev *dev);
1678 int mlx5_set_link_up(struct rte_eth_dev *dev);
1679 int mlx5_is_removed(struct rte_eth_dev *dev);
1680 int mlx5_sysfs_switch_info(unsigned int ifindex,
1681 struct mlx5_switch_info *info);
1682 void mlx5_translate_port_name(const char *port_name_in,
1683 struct mlx5_switch_info *port_info_out);
1684 void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
1685 rte_intr_callback_fn cb_fn, void *cb_arg);
1686 int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
1688 int mlx5_get_module_info(struct rte_eth_dev *dev,
1689 struct rte_eth_dev_module_info *modinfo);
1690 int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
1691 struct rte_dev_eeprom_info *info);
1692 int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
1693 const char *ctr_name, uint64_t *stat);
1694 int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
1695 int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
1696 void mlx5_os_stats_init(struct rte_eth_dev *dev);
1697 int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev);
1701 void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
1702 int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
1703 uint32_t index, uint32_t vmdq);
1704 int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
1705 int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
1706 struct rte_ether_addr *mc_addr_set,
1707 uint32_t nb_mc_addr);
1711 int mlx5_rss_hash_update(struct rte_eth_dev *dev,
1712 struct rte_eth_rss_conf *rss_conf);
1713 int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
1714 struct rte_eth_rss_conf *rss_conf);
1715 int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
1716 int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
1717 struct rte_eth_rss_reta_entry64 *reta_conf,
1718 uint16_t reta_size);
1719 int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
1720 struct rte_eth_rss_reta_entry64 *reta_conf,
1721 uint16_t reta_size);
1725 int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
1726 int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
1727 int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
1728 int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
1732 int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
1733 int mlx5_stats_reset(struct rte_eth_dev *dev);
1734 int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1736 int mlx5_xstats_reset(struct rte_eth_dev *dev);
1737 int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1738 struct rte_eth_xstat_name *xstats_names,
1743 int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
1744 void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
1745 int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
1747 /* mlx5_vlan_os.c */
1749 void mlx5_vlan_vmwa_exit(void *ctx);
1750 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
1751 struct mlx5_vf_vlan *vf_vlan);
1752 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
1753 struct mlx5_vf_vlan *vf_vlan);
1754 void *mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex);
1756 /* mlx5_trigger.c */
1758 int mlx5_dev_start(struct rte_eth_dev *dev);
1759 int mlx5_dev_stop(struct rte_eth_dev *dev);
1760 int mlx5_traffic_enable(struct rte_eth_dev *dev);
1761 void mlx5_traffic_disable(struct rte_eth_dev *dev);
1762 int mlx5_traffic_restart(struct rte_eth_dev *dev);
1763 int mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
1764 struct rte_hairpin_peer_info *current_info,
1765 struct rte_hairpin_peer_info *peer_info,
1766 uint32_t direction);
1767 int mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
1768 struct rte_hairpin_peer_info *peer_info,
1769 uint32_t direction);
1770 int mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
1771 uint32_t direction);
1772 int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port);
1773 int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port);
1774 int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
1775 size_t len, uint32_t direction);
1779 int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev);
1780 bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev);
1781 void mlx5_flow_print(struct rte_flow *flow);
1782 int mlx5_flow_validate(struct rte_eth_dev *dev,
1783 const struct rte_flow_attr *attr,
1784 const struct rte_flow_item items[],
1785 const struct rte_flow_action actions[],
1786 struct rte_flow_error *error);
1787 struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
1788 const struct rte_flow_attr *attr,
1789 const struct rte_flow_item items[],
1790 const struct rte_flow_action actions[],
1791 struct rte_flow_error *error);
1792 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1793 struct rte_flow_error *error);
1794 void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
1796 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
1797 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1798 const struct rte_flow_action *action, void *data,
1799 struct rte_flow_error *error);
1800 int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
1801 struct rte_flow_error *error);
1802 int mlx5_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
1803 int mlx5_flow_start_default(struct rte_eth_dev *dev);
1804 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
1805 int mlx5_flow_verify(struct rte_eth_dev *dev);
1806 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
1807 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1808 struct rte_flow_item_eth *eth_spec,
1809 struct rte_flow_item_eth *eth_mask,
1810 struct rte_flow_item_vlan *vlan_spec,
1811 struct rte_flow_item_vlan *vlan_mask);
1812 int mlx5_ctrl_flow(struct rte_eth_dev *dev,
1813 struct rte_flow_item_eth *eth_spec,
1814 struct rte_flow_item_eth *eth_mask);
1815 int mlx5_flow_lacp_miss(struct rte_eth_dev *dev);
1816 struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
1817 uint32_t mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev,
1819 void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
1820 uint64_t async_id, int status);
1821 void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
1822 void mlx5_flow_query_alarm(void *arg);
1823 uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
1824 void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
1825 int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
1826 bool clear, uint64_t *pkts, uint64_t *bytes, void **action);
1827 int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow,
1828 FILE *file, struct rte_flow_error *error);
1829 int save_dump_file(const unsigned char *data, uint32_t size,
1830 uint32_t type, uint64_t id, void *arg, FILE *file);
1831 int mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
1832 struct rte_flow_query_count *count, struct rte_flow_error *error);
1833 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1834 int mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, struct rte_flow *flow,
1835 FILE *file, struct rte_flow_error *error);
1837 void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev);
1838 int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
1839 uint32_t nb_contexts, struct rte_flow_error *error);
1840 int mlx5_validate_action_ct(struct rte_eth_dev *dev,
1841 const struct rte_flow_action_conntrack *conntrack,
1842 struct rte_flow_error *error);
1847 int mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg,
1849 int mlx5_mp_os_secondary_handle(const struct rte_mp_msg *mp_msg,
1851 void mlx5_mp_os_req_start_rxtx(struct rte_eth_dev *dev);
1852 void mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev *dev);
1853 int mlx5_mp_os_req_queue_control(struct rte_eth_dev *dev, uint16_t queue_id,
1854 enum mlx5_mp_req_type req_type);
1858 int mlx5_pmd_socket_init(void);
1859 void mlx5_pmd_socket_uninit(void);
1861 /* mlx5_flow_meter.c */
1863 int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
1864 struct mlx5_flow_meter_info *mlx5_flow_meter_find(struct mlx5_priv *priv,
1865 uint32_t meter_id, uint32_t *mtr_idx);
1866 struct mlx5_flow_meter_info *
1867 flow_dv_meter_find_by_idx(struct mlx5_priv *priv, uint32_t idx);
1868 int mlx5_flow_meter_attach(struct mlx5_priv *priv,
1869 struct mlx5_flow_meter_info *fm,
1870 const struct rte_flow_attr *attr,
1871 struct rte_flow_error *error);
1872 void mlx5_flow_meter_detach(struct mlx5_priv *priv,
1873 struct mlx5_flow_meter_info *fm);
1874 struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
1875 (struct rte_eth_dev *dev,
1877 uint32_t *policy_idx);
1878 struct mlx5_flow_meter_info *
1879 mlx5_flow_meter_hierarchy_next_meter(struct mlx5_priv *priv,
1880 struct mlx5_flow_meter_policy *policy,
1882 struct mlx5_flow_meter_policy *
1883 mlx5_flow_meter_hierarchy_get_final_policy(struct rte_eth_dev *dev,
1884 struct mlx5_flow_meter_policy *policy);
1885 int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
1886 struct rte_mtr_error *error);
1887 void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
1891 struct rte_pci_driver;
1892 int mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh);
1893 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
1894 int mlx5_os_net_probe(struct mlx5_common_device *cdev,
1895 struct mlx5_kvargs_ctrl *mkvlist);
1896 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
1897 void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
1898 void mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
1899 int mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
1901 int mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, unsigned int iface_idx,
1902 struct rte_ether_addr *mac_addr,
1904 int mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable);
1905 int mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable);
1906 int mlx5_os_set_nonblock_channel_fd(int fd);
1907 void mlx5_os_mac_addr_flush(struct rte_eth_dev *dev);
1908 void mlx5_os_net_cleanup(void);
1912 int mlx5_txpp_start(struct rte_eth_dev *dev);
1913 void mlx5_txpp_stop(struct rte_eth_dev *dev);
1914 int mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp);
1915 int mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1916 struct rte_eth_xstat *stats,
1917 unsigned int n, unsigned int n_used);
1918 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev);
1919 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev,
1920 struct rte_eth_xstat_name *xstats_names,
1921 unsigned int n, unsigned int n_used);
1922 void mlx5_txpp_interrupt_handler(void *cb_arg);
1926 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
1928 /* mlx5_flow_aso.c */
1930 int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
1931 enum mlx5_access_aso_opc_mod aso_opc_mod);
1932 int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);
1933 int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
1934 void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
1935 enum mlx5_access_aso_opc_mod aso_opc_mod);
1936 int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
1937 struct mlx5_aso_mtr *mtr);
1938 int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
1939 struct mlx5_aso_mtr *mtr);
1940 int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
1941 struct mlx5_aso_ct_action *ct,
1942 const struct rte_flow_action_conntrack *profile);
1943 int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
1944 struct mlx5_aso_ct_action *ct);
1945 int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
1946 struct mlx5_aso_ct_action *ct,
1947 struct rte_flow_action_conntrack *profile);
1948 int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
1949 struct mlx5_aso_ct_action *ct);
1951 mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
1953 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
1955 /* mlx5_flow_flex.c */
1957 struct rte_flow_item_flex_handle *
1958 flow_dv_item_create(struct rte_eth_dev *dev,
1959 const struct rte_flow_item_flex_conf *conf,
1960 struct rte_flow_error *error);
1961 int flow_dv_item_release(struct rte_eth_dev *dev,
1962 const struct rte_flow_item_flex_handle *flex_handle,
1963 struct rte_flow_error *error);
1964 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
1965 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
1966 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
1967 void *key, const struct rte_flow_item *item,
1969 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
1970 struct rte_flow_item_flex_handle *handle,
1972 int mlx5_flex_release_index(struct rte_eth_dev *dev, int index);
1974 /* Flex parser list callbacks. */
1975 struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
1976 int mlx5_flex_parser_match_cb(void *list_ctx,
1977 struct mlx5_list_entry *iter, void *ctx);
1978 void mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry);
1979 struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
1980 struct mlx5_list_entry *entry,
1982 void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
1983 struct mlx5_list_entry *entry);
1984 #endif /* RTE_PMD_MLX5_H_ */