1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_H_
7 #define RTE_PMD_MLX5_H_
13 #include <sys/queue.h>
16 #include <rte_ether.h>
17 #include <ethdev_driver.h>
18 #include <rte_rwlock.h>
19 #include <rte_interrupts.h>
20 #include <rte_errno.h>
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
26 #include <mlx5_common_mp.h>
27 #include <mlx5_common_mr.h>
28 #include <mlx5_common_devx.h>
30 #include "mlx5_defs.h"
31 #include "mlx5_utils.h"
33 #include "mlx5_autoconf.h"
36 #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)
38 enum mlx5_ipool_index {
39 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
40 MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
41 MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
42 MLX5_IPOOL_TAG, /* Pool for tag resource. */
43 MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
44 MLX5_IPOOL_JUMP, /* Pool for jump resource. */
45 MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
46 MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
47 MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
48 MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
50 MLX5_IPOOL_MTR, /* Pool for meter resource. */
51 MLX5_IPOOL_MCP, /* Pool for metadata resource. */
52 MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
53 MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
54 MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
55 MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
56 MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
61 * There are three reclaim memory mode supported.
62 * 0(none) means no memory reclaim.
63 * 1(light) means only PMD level reclaim.
64 * 2(aggressive) means both PMD and rdma-core level reclaim.
66 enum mlx5_reclaim_mem_mode {
67 MLX5_RCM_NONE, /* Don't reclaim memory. */
68 MLX5_RCM_LIGHT, /* Reclaim PMD level. */
69 MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
72 /* Hash and cache list callback context. */
73 struct mlx5_flow_cb_ctx {
74 struct rte_eth_dev *dev;
75 struct rte_flow_error *error;
79 /* Device attributes used in mlx5 PMD */
80 struct mlx5_dev_attr {
81 uint64_t device_cap_flags_ex;
91 uint32_t raw_packet_caps;
92 uint32_t max_rwq_indirection_table_size;
94 uint32_t tso_supported_qpts;
97 uint32_t sw_parsing_offloads;
98 uint32_t min_single_stride_log_num_of_bytes;
99 uint32_t max_single_stride_log_num_of_bytes;
100 uint32_t min_single_wqe_log_num_of_strides;
101 uint32_t max_single_wqe_log_num_of_strides;
102 uint32_t stride_supported_qpts;
103 uint32_t tunnel_offloads_caps;
107 /** Data associated with devices to spawn. */
108 struct mlx5_dev_spawn_data {
109 uint32_t ifindex; /**< Network interface index. */
110 uint32_t max_port; /**< Device maximal port index. */
111 uint32_t phys_port; /**< Device physical port index. */
112 int pf_bond; /**< bonding device PF index. < 0 - no bonding */
113 struct mlx5_switch_info info; /**< Switch information. */
114 void *phys_dev; /**< Associated physical device. */
115 struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
116 struct rte_pci_device *pci_dev; /**< Backend PCI device. */
117 struct mlx5_bond_info *bond_info;
120 /** Data associated with socket messages. */
121 struct mlx5_flow_dump_req {
122 uint32_t port_id; /**< There are plans in DPDK to extend port_id. */
126 struct mlx5_flow_dump_ack {
127 int rc; /**< Return code. */
130 /** Key string for IPC. */
131 #define MLX5_MP_NAME "net_mlx5_mp"
134 LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
136 /* Shared data between primary and secondary processes. */
137 struct mlx5_shared_data {
139 /* Global spinlock for primary and secondary processes. */
140 int init_done; /* Whether primary has done initialization. */
141 unsigned int secondary_cnt; /* Number of secondary processes init'd. */
142 struct mlx5_dev_list mem_event_cb_list;
143 rte_rwlock_t mem_event_rwlock;
146 /* Per-process data structure, not visible to other processes. */
147 struct mlx5_local_data {
148 int init_done; /* Whether a secondary has done initialization. */
151 extern struct mlx5_shared_data *mlx5_shared_data;
153 /* Dev ops structs */
154 extern const struct eth_dev_ops mlx5_dev_ops;
155 extern const struct eth_dev_ops mlx5_dev_sec_ops;
156 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
158 struct mlx5_counter_ctrl {
159 /* Name of the counter. */
160 char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
161 /* Name of the counter on the device table. */
162 char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
163 uint32_t dev:1; /**< Nonzero for dev counters. */
166 struct mlx5_xstats_ctrl {
167 /* Number of device stats. */
169 /* Number of device stats identified by PMD. */
170 uint16_t mlx5_stats_n;
171 /* Index in the device counters table. */
172 uint16_t dev_table_idx[MLX5_MAX_XSTATS];
173 uint64_t base[MLX5_MAX_XSTATS];
174 uint64_t xstats[MLX5_MAX_XSTATS];
175 uint64_t hw_stats[MLX5_MAX_XSTATS];
176 struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
179 struct mlx5_stats_ctrl {
180 /* Base for imissed counter. */
181 uint64_t imissed_base;
185 /* Default PMD specific parameter value. */
186 #define MLX5_ARG_UNSET (-1)
188 #define MLX5_LRO_SUPPORTED(dev) \
189 (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)
191 /* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
192 #define MLX5_LRO_SEG_CHUNK_SIZE 256u
194 /* Maximal size of aggregated LRO packet. */
195 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
197 /* Maximal number of segments to split. */
198 #define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
200 /* LRO configurations structure. */
201 struct mlx5_lro_config {
202 uint32_t supported:1; /* Whether LRO is supported. */
203 uint32_t timeout; /* User configuration. */
207 * Device configuration structure.
209 * Merged configuration from:
211 * - Device capabilities,
212 * - User device parameters disabled features.
214 struct mlx5_dev_config {
215 unsigned int hw_csum:1; /* Checksum offload is supported. */
216 unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
217 unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
218 unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
219 unsigned int hw_padding:1; /* End alignment padding is supported. */
220 unsigned int vf:1; /* This is a VF. */
221 unsigned int tunnel_en:1;
222 /* Whether tunnel stateless offloads are supported. */
223 unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
224 unsigned int cqe_comp:1; /* CQE compression is enabled. */
225 unsigned int cqe_comp_fmt:3; /* CQE compression format. */
226 unsigned int tso:1; /* Whether TSO is supported. */
227 unsigned int rx_vec_en:1; /* Rx vector is enabled. */
228 unsigned int mr_ext_memseg_en:1;
229 /* Whether memseg should be extended for MR creation. */
230 unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
231 unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
232 unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */
233 unsigned int dv_flow_en:1; /* Enable DV flow. */
234 unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
235 unsigned int lacp_by_user:1;
236 /* Enable user to manage LACP traffic. */
237 unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
238 unsigned int devx:1; /* Whether devx interface is available or not. */
239 unsigned int dest_tir:1; /* Whether advanced DR API is available. */
240 unsigned int reclaim_mode:2; /* Memory reclaim mode. */
241 unsigned int rt_timestamp:1; /* realtime timestamp format. */
242 unsigned int sys_mem_en:1; /* The default memory allocator. */
243 unsigned int decap_en:1; /* Whether decap will be used or not. */
244 unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
246 unsigned int enabled:1; /* Whether MPRQ is enabled. */
247 unsigned int stride_num_n; /* Number of strides. */
248 unsigned int stride_size_n; /* Size of a stride. */
249 unsigned int min_stride_size_n; /* Min size of a stride. */
250 unsigned int max_stride_size_n; /* Max size of a stride. */
251 unsigned int max_memcpy_len;
252 /* Maximum packet size to memcpy Rx packets. */
253 unsigned int min_rxqs_num;
254 /* Rx queue count threshold to enable MPRQ. */
255 } mprq; /* Configurations for Multi-Packet RQ. */
256 int mps; /* Multi-packet send supported mode. */
257 int dbnc; /* Skip doorbell register write barrier. */
258 unsigned int flow_prio; /* Number of flow priorities. */
259 enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
260 /* Availibility of mreg_c's. */
261 unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
262 unsigned int ind_table_max_size; /* Maximum indirection table size. */
263 unsigned int max_dump_files_num; /* Maximum dump files per queue. */
264 unsigned int log_hp_size; /* Single hairpin queue data size in total. */
265 int txqs_inline; /* Queue number threshold for inlining. */
266 int txq_inline_min; /* Minimal amount of data bytes to inline. */
267 int txq_inline_max; /* Max packet size for inlining with SEND. */
268 int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
269 int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
270 int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
271 struct mlx5_hca_attr hca_attr; /* HCA attributes. */
272 struct mlx5_lro_config lro; /* LRO configuration. */
276 /* Structure for VF VLAN workaround. */
277 struct mlx5_vf_vlan {
282 /* Flow drop context necessary due to Verbs API. */
284 struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
285 struct mlx5_rxq_obj *rxq; /* Rx queue object. */
288 #define MLX5_COUNTERS_PER_POOL 512
289 #define MLX5_MAX_PENDING_QUERIES 4
290 #define MLX5_CNT_CONTAINER_RESIZE 64
291 #define MLX5_CNT_SHARED_OFFSET 0x80000000
292 #define IS_SHARED_CNT(cnt) (!!((cnt) & MLX5_CNT_SHARED_OFFSET))
293 #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \
294 MLX5_CNT_BATCH_OFFSET)
295 #define MLX5_CNT_SIZE (sizeof(struct mlx5_flow_counter))
296 #define MLX5_AGE_SIZE (sizeof(struct mlx5_age_param))
298 #define MLX5_CNT_LEN(pool) \
300 ((pool)->is_aged ? MLX5_AGE_SIZE : 0))
301 #define MLX5_POOL_GET_CNT(pool, index) \
302 ((struct mlx5_flow_counter *) \
303 ((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool))))
304 #define MLX5_CNT_ARRAY_IDX(pool, cnt) \
305 ((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \
308 * The pool index and offset of counter in the pool array makes up the
309 * counter index. In case the counter is from pool 0 and offset 0, it
310 * should plus 1 to avoid index 0, since 0 means invalid counter index
313 #define MLX5_MAKE_CNT_IDX(pi, offset) \
314 ((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
315 #define MLX5_CNT_TO_AGE(cnt) \
316 ((struct mlx5_age_param *)((cnt) + 1))
318 * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
319 * defines. The pool size is 512, pool index should never reach
322 #define POOL_IDX_INVALID UINT16_MAX
326 AGE_FREE, /* Initialized state. */
327 AGE_CANDIDATE, /* Counter assigned to flows. */
328 AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
331 enum mlx5_counter_type {
332 MLX5_COUNTER_TYPE_ORIGIN,
333 MLX5_COUNTER_TYPE_AGE,
334 MLX5_COUNTER_TYPE_MAX,
337 /* Counter age parameter. */
338 struct mlx5_age_param {
339 uint16_t state; /**< Age state (atomically accessed). */
340 uint16_t port_id; /**< Port id of the counter. */
341 uint32_t timeout:24; /**< Aging timeout in seconds. */
342 uint32_t sec_since_last_hit;
343 /**< Time in seconds since last hit (atomically accessed). */
344 void *context; /**< Flow counter age context. */
347 struct flow_counter_stats {
352 /* Shared counters information for counters. */
353 struct mlx5_flow_counter_shared {
354 uint32_t id; /**< User counter ID. */
357 /* Shared counter configuration. */
358 struct mlx5_shared_counter_conf {
359 struct rte_eth_dev *dev; /* The device shared counter belongs to. */
360 uint32_t id; /* The shared counter ID. */
363 struct mlx5_flow_counter_pool;
364 /* Generic counters information. */
365 struct mlx5_flow_counter {
368 * User-defined counter shared info is only used during
369 * counter active time. And aging counter sharing is not
370 * supported, so active shared counter will not be chained
371 * to the aging list. For shared counter, only when it is
372 * released, the TAILQ entry memory will be used, at that
373 * time, shared memory is not used anymore.
375 * Similarly to none-batch counter dcs, since it doesn't
376 * support aging, while counter is allocated, the entry
377 * memory is not used anymore. In this case, as bytes
378 * memory is used only when counter is allocated, and
379 * entry memory is used only when counter is free. The
380 * dcs pointer can be saved to these two different place
381 * at different stage. It will eliminate the individual
382 * counter extend struct.
384 TAILQ_ENTRY(mlx5_flow_counter) next;
385 /**< Pointer to the next flow counter structure. */
387 struct mlx5_flow_counter_shared shared_info;
388 /**< Shared counter information. */
389 void *dcs_when_active;
391 * For non-batch mode, the dcs will be saved
392 * here when the counter is free.
397 uint64_t hits; /**< Reset value of hits packets. */
398 struct mlx5_flow_counter_pool *pool; /**< Counter pool. */
401 uint64_t bytes; /**< Reset value of bytes. */
404 * For non-batch mode, the dcs will be saved here
405 * when the counter is free.
408 void *action; /**< Pointer to the dv action. */
411 TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
413 /* Generic counter pool structure - query is in pool resolution. */
414 struct mlx5_flow_counter_pool {
415 TAILQ_ENTRY(mlx5_flow_counter_pool) next;
416 struct mlx5_counters counters[2]; /* Free counter list. */
417 struct mlx5_devx_obj *min_dcs;
418 /* The devx object of the minimum counter ID. */
419 uint64_t time_of_last_age_check;
420 /* System time (from rte_rdtsc()) read in the last aging check. */
421 uint32_t index:30; /* Pool index in container. */
422 uint32_t is_aged:1; /* Pool with aging counter. */
423 volatile uint32_t query_gen:1; /* Query round. */
424 rte_spinlock_t sl; /* The pool lock. */
425 rte_spinlock_t csl; /* The pool counter free list lock. */
426 struct mlx5_counter_stats_raw *raw;
427 struct mlx5_counter_stats_raw *raw_hw;
428 /* The raw on HW working. */
431 /* Memory management structure for group of counter statistics raws. */
432 struct mlx5_counter_stats_mem_mng {
433 LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
434 struct mlx5_counter_stats_raw *raws;
435 struct mlx5_devx_obj *dm;
439 /* Raw memory structure for the counter statistics values of a pool. */
440 struct mlx5_counter_stats_raw {
441 LIST_ENTRY(mlx5_counter_stats_raw) next;
442 struct mlx5_counter_stats_mem_mng *mem_mng;
443 volatile struct flow_counter_stats *data;
446 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
448 /* Counter global management structure. */
449 struct mlx5_flow_counter_mng {
450 volatile uint16_t n_valid; /* Number of valid pools. */
451 uint16_t n; /* Number of pools. */
452 uint16_t last_pool_idx; /* Last used pool index */
453 int min_id; /* The minimum counter ID in the pools. */
454 int max_id; /* The maximum counter ID in the pools. */
455 rte_spinlock_t pool_update_sl; /* The pool update lock. */
456 rte_spinlock_t csl[MLX5_COUNTER_TYPE_MAX];
457 /* The counter free list lock. */
458 struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
459 /* Free counter list. */
460 struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
461 struct mlx5_counter_stats_mem_mng *mem_mng;
462 /* Hold the memory management for the next allocated pools raws. */
463 struct mlx5_counters flow_counters; /* Legacy flow counter list. */
464 uint8_t pending_queries;
466 uint8_t query_thread_on;
467 bool relaxed_ordering_read;
468 bool relaxed_ordering_write;
469 bool counter_fallback; /* Use counter fallback management. */
470 LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
471 LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
474 /* ASO structures. */
475 #define MLX5_ASO_QUEUE_LOG_DESC 10
480 struct mlx5_devx_cq cq_obj;
484 struct mlx5_aso_devx_mr {
487 struct mlx5dv_devx_umem *umem;
488 struct mlx5_devx_obj *mkey;
492 struct mlx5_aso_sq_elem {
493 struct mlx5_aso_age_pool *pool;
499 struct mlx5_aso_cq cq;
500 struct mlx5_devx_sq sq_obj;
501 volatile uint64_t *uar_addr;
502 struct mlx5_aso_devx_mr mr;
507 struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC];
508 uint16_t next; /* Pool index of the next pool to query. */
511 struct mlx5_aso_age_action {
512 LIST_ENTRY(mlx5_aso_age_action) next;
515 /* Following fields relevant only when action is active. */
516 uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
517 struct mlx5_age_param age_params;
520 #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512
522 struct mlx5_aso_age_pool {
523 struct mlx5_devx_obj *flow_hit_aso_obj;
524 uint16_t index; /* Pool index in pools array. */
525 uint64_t time_of_last_age_check; /* In seconds. */
526 struct mlx5_aso_age_action actions[MLX5_ASO_AGE_ACTIONS_PER_POOL];
529 LIST_HEAD(aso_age_list, mlx5_aso_age_action);
531 struct mlx5_aso_age_mng {
532 struct mlx5_aso_age_pool **pools;
533 uint16_t n; /* Total number of pools. */
534 uint16_t next; /* Number of pools in use, index of next free pool. */
535 rte_spinlock_t resize_sl; /* Lock for resize objects. */
536 rte_spinlock_t free_sl; /* Lock for free list access. */
537 struct aso_age_list free; /* Free age actions list - ready to use. */
538 struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
541 /* Management structure for geneve tlv option */
542 struct mlx5_geneve_tlv_option_resource {
543 struct mlx5_devx_obj *obj; /* Pointer to the geneve tlv opt object. */
544 rte_be16_t option_class; /* geneve tlv opt class.*/
545 uint8_t option_type; /* geneve tlv opt type.*/
546 uint8_t length; /* geneve tlv opt length. */
547 uint32_t refcnt; /* geneve tlv object reference counter */
551 #define MLX5_AGE_EVENT_NEW 1
552 #define MLX5_AGE_TRIGGER 2
553 #define MLX5_AGE_SET(age_info, BIT) \
554 ((age_info)->flags |= (1 << (BIT)))
555 #define MLX5_AGE_GET(age_info, BIT) \
556 ((age_info)->flags & (1 << (BIT)))
557 #define GET_PORT_AGE_INFO(priv) \
558 (&((priv)->sh->port[(priv)->dev_port - 1].age_info))
559 /* Current time in seconds. */
560 #define MLX5_CURR_TIME_SEC (rte_rdtsc() / rte_get_tsc_hz())
562 /* Aging information for per port. */
563 struct mlx5_age_info {
564 uint8_t flags; /* Indicate if is new event or need to be triggered. */
565 struct mlx5_counters aged_counters; /* Aged counter list. */
566 struct aso_age_list aged_aso; /* Aged ASO actions list. */
567 rte_spinlock_t aged_sl; /* Aged flow list lock. */
570 /* Per port data of shared IB device. */
571 struct mlx5_dev_shared_port {
573 uint32_t devx_ih_port_id;
575 * Interrupt handler port_id. Used by shared interrupt
576 * handler to find the corresponding rte_eth device
577 * by IB port index. If value is equal or greater
578 * RTE_MAX_ETHPORTS it means there is no subhandler
579 * installed for specified IB port index.
581 struct mlx5_age_info age_info;
582 /* Aging information for per port. */
585 /* Table key of the hash organization. */
586 union mlx5_flow_tbl_key {
588 /* Table ID should be at the lowest address. */
589 uint32_t table_id; /**< ID of the table. */
590 uint16_t dummy; /**< Dummy table for DV API. */
591 uint8_t domain; /**< 1 - FDB, 0 - NIC TX/RX. */
592 uint8_t direction; /**< 1 - egress, 0 - ingress. */
594 uint64_t v64; /**< full 64bits value of key */
597 /* Table structure. */
598 struct mlx5_flow_tbl_resource {
599 void *obj; /**< Pointer to DR table object. */
600 uint32_t refcnt; /**< Reference counter. */
603 #define MLX5_MAX_TABLES UINT16_MAX
604 #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
605 /* Reserve the last two tables for metadata register copy. */
606 #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
607 #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
608 /* Tables for metering splits should be added here. */
609 #define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3)
610 #define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
611 #define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_METER
612 #define MLX5_MAX_TABLES_FDB UINT16_MAX
613 #define MLX5_FLOW_TABLE_FACTOR 10
615 /* ID generation structure. */
616 struct mlx5_flow_id_pool {
617 uint32_t *free_arr; /**< Pointer to the a array of free values. */
619 /**< The next index that can be used without any free elements. */
620 uint32_t *curr; /**< Pointer to the index to pop. */
621 uint32_t *last; /**< Pointer to the last element in the empty arrray. */
622 uint32_t max_id; /**< Maximum id can be allocated from the pool. */
625 /* Tx pacing queue structure - for Clock and Rearm queues. */
626 struct mlx5_txpp_wq {
627 /* Completion Queue related data.*/
628 struct mlx5_devx_cq cq_obj;
631 /* Send Queue related data.*/
632 struct mlx5_devx_sq sq_obj;
633 uint16_t sq_size; /* Number of WQEs in the queue. */
634 uint16_t sq_ci; /* Next WQE to execute. */
637 /* Tx packet pacing internal timestamp. */
638 struct mlx5_txpp_ts {
643 /* Tx packet pacing structure. */
644 struct mlx5_dev_txpp {
645 pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
646 uint32_t refcnt; /* Pacing reference counter. */
647 uint32_t freq; /* Timestamp frequency, Hz. */
648 uint32_t tick; /* Completion tick duration in nanoseconds. */
649 uint32_t test; /* Packet pacing test mode. */
650 int32_t skew; /* Scheduling skew. */
651 struct rte_intr_handle intr_handle; /* Periodic interrupt. */
652 void *echan; /* Event Channel. */
653 struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
654 struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
655 void *pp; /* Packet pacing context. */
656 uint16_t pp_id; /* Packet pacing context index. */
657 uint16_t ts_n; /* Number of captured timestamps. */
658 uint16_t ts_p; /* Pointer to statisticks timestamp. */
659 struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
660 struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
661 uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
662 /* Statistics counters. */
663 uint64_t err_miss_int; /* Missed service interrupt. */
664 uint64_t err_rearm_queue; /* Rearm Queue errors. */
665 uint64_t err_clock_queue; /* Clock Queue errors. */
666 uint64_t err_ts_past; /* Timestamp in the past. */
667 uint64_t err_ts_future; /* Timestamp in the distant future. */
670 /* Supported flex parser profile ID. */
671 enum mlx5_flex_parser_profile_id {
672 MLX5_FLEX_PARSER_ECPRI_0 = 0,
673 MLX5_FLEX_PARSER_MAX = 8,
676 /* Sample ID information of flex parser structure. */
677 struct mlx5_flex_parser_profiles {
678 uint32_t num; /* Actual number of samples. */
679 uint32_t ids[8]; /* Sample IDs for this profile. */
680 uint8_t offset[8]; /* Bytes offset of each parser. */
681 void *obj; /* Flex parser node object. */
684 /* Max member ports per bonding device. */
685 #define MLX5_BOND_MAX_PORTS 2
687 /* Bonding device information. */
688 struct mlx5_bond_info {
689 int n_port; /* Number of bond member ports. */
691 char ifname[MLX5_NAMESIZE + 1];
693 char ifname[MLX5_NAMESIZE + 1];
695 struct rte_pci_addr pci_addr;
696 } ports[MLX5_BOND_MAX_PORTS];
700 * Shared Infiniband device context for Master/Representors
701 * which belong to same IB device with multiple IB ports.
703 struct mlx5_dev_ctx_shared {
704 LIST_ENTRY(mlx5_dev_ctx_shared) next;
706 uint32_t devx:1; /* Opened with DV. */
707 uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
708 uint32_t rq_ts_format:2; /* RQ timestamp formats supported. */
709 uint32_t sq_ts_format:2; /* SQ timestamp formats supported. */
710 uint32_t qp_ts_format:2; /* QP timestamp formats supported. */
711 uint32_t max_port; /* Maximal IB device port index. */
712 struct mlx5_bond_info bond; /* Bonding information. */
713 void *ctx; /* Verbs/DV/DevX context. */
714 void *pd; /* Protection Domain. */
715 uint32_t pdn; /* Protection Domain number. */
716 uint32_t tdn; /* Transport Domain number. */
717 char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
718 char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
719 struct mlx5_dev_attr device_attr; /* Device properties. */
720 int numa_node; /* Numa node of backing physical device. */
721 LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
722 /**< Called by memory event callback. */
723 struct mlx5_mr_share_cache share_cache;
724 /* Packet pacing related structure. */
725 struct mlx5_dev_txpp txpp;
726 /* Shared DV/DR flow data section. */
727 uint32_t dv_meta_mask; /* flow META metadata supported mask. */
728 uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
729 uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
730 void *fdb_domain; /* FDB Direct Rules name space handle. */
731 void *rx_domain; /* RX Direct Rules name space handle. */
732 void *tx_domain; /* TX Direct Rules name space handle. */
734 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
735 rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
736 /* UAR same-page access control required in 32bit implementations. */
738 struct mlx5_hlist *flow_tbls;
739 struct mlx5_flow_tunnel_hub *tunnel_hub;
740 /* Direct Rules tables for FDB, NIC TX+RX */
741 void *dr_drop_action; /* Pointer to DR drop action, any domain. */
742 void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
743 struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
744 struct mlx5_hlist *modify_cmds;
745 struct mlx5_hlist *tag_table;
746 struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */
747 struct mlx5_cache_list push_vlan_action_list; /* Push VLAN actions. */
748 struct mlx5_cache_list sample_action_list; /* List of sample actions. */
749 struct mlx5_cache_list dest_array_list;
750 /* List of destination array actions. */
751 struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
752 void *default_miss_action; /* Default miss action. */
753 struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
754 /* Memory Pool for mlx5 flow resources. */
755 struct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */
756 /* Shared interrupt handler section. */
757 struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
758 struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
759 void *devx_comp; /* DEVX async comp obj. */
760 struct mlx5_devx_obj *tis; /* TIS object. */
761 struct mlx5_devx_obj *td; /* Transport domain. */
762 void *tx_uar; /* Tx/packet pacing shared UAR. */
763 struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
764 /* Flex parser profiles information. */
765 void *devx_rx_uar; /* DevX UAR for Rx. */
766 struct mlx5_aso_age_mng *aso_age_mng;
767 /* Management data for aging mechanism using ASO Flow Hit. */
768 struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
769 /* Management structure for geneve tlv option */
770 rte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */
771 struct mlx5_dev_shared_port port[]; /* per device port data array. */
775 * Per-process private structure.
776 * Caution, secondary process may rebuild the struct during port start.
778 struct mlx5_proc_priv {
780 /* Size of UAR register table. */
782 /* Table of UAR registers for each process. */
785 /* MTR profile list. */
786 TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
788 TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
790 /* RSS description. */
791 struct mlx5_flow_rss_desc {
793 uint32_t queue_num; /**< Number of entries in @p queue. */
794 uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
795 uint64_t hash_fields; /* Verbs Hash fields. */
796 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
797 uint32_t key_len; /**< RSS hash key len. */
798 uint32_t tunnel; /**< Queue in tunnel. */
799 uint32_t shared_rss; /**< Shared RSS index. */
800 struct mlx5_ind_table_obj *ind_tbl;
801 /**< Indirection table for shared RSS hash RX queues. */
803 uint16_t *queue; /**< Destination queues. */
804 const uint16_t *const_q; /**< Const pointer convert. */
808 #define MLX5_PROC_PRIV(port_id) \
809 ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
811 /* Verbs/DevX Rx queue elements. */
812 struct mlx5_rxq_obj {
813 LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
814 struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
815 int fd; /* File descriptor for event channel */
819 void *wq; /* Work Queue. */
820 void *ibv_cq; /* Completion Queue. */
823 struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
825 struct mlx5_devx_rq rq_obj; /* DevX RQ object. */
826 struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
832 /* Indirection table. */
833 struct mlx5_ind_table_obj {
834 LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
835 uint32_t refcnt; /* Reference counter. */
838 void *ind_table; /**< Indirection table. */
839 struct mlx5_devx_obj *rqt; /* DevX RQT object. */
841 uint32_t queues_n; /**< Number of queues in the list. */
842 uint16_t *queues; /**< Queue list. */
848 struct mlx5_cache_entry entry; /* Cache entry. */
849 uint32_t standalone:1; /* This object used in shared action. */
850 struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
853 void *qp; /* Verbs queue pair. */
854 struct mlx5_devx_obj *tir; /* DevX TIR object. */
856 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
857 void *action; /* DV QP action pointer. */
859 uint64_t hash_fields; /* Verbs Hash fields. */
860 uint32_t rss_key_len; /* Hash key length in bytes. */
861 uint32_t idx; /* Hash Rx queue index. */
862 uint8_t rss_key[]; /* Hash key. */
865 /* Verbs/DevX Tx queue elements. */
866 struct mlx5_txq_obj {
867 LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
868 struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
872 void *cq; /* Completion Queue. */
873 void *qp; /* Queue Pair. */
876 struct mlx5_devx_obj *sq;
877 /* DevX object for Sx queue. */
878 struct mlx5_devx_obj *tis; /* The TIS object. */
881 struct rte_eth_dev *dev;
882 struct mlx5_devx_cq cq_obj;
883 /* DevX CQ object and its resources. */
884 struct mlx5_devx_sq sq_obj;
885 /* DevX SQ object and its resources. */
890 enum mlx5_rxq_modify_type {
891 MLX5_RXQ_MOD_ERR2RST, /* modify state from error to reset. */
892 MLX5_RXQ_MOD_RST2RDY, /* modify state from reset to ready. */
893 MLX5_RXQ_MOD_RDY2ERR, /* modify state from ready to error. */
894 MLX5_RXQ_MOD_RDY2RST, /* modify state from ready to reset. */
897 enum mlx5_txq_modify_type {
898 MLX5_TXQ_MOD_RST2RDY, /* modify state from reset to ready. */
899 MLX5_TXQ_MOD_RDY2RST, /* modify state from ready to reset. */
900 MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
903 /* HW objects operations structure. */
904 struct mlx5_obj_ops {
905 int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
906 int (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
907 int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
908 int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);
909 void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
910 int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
911 struct mlx5_ind_table_obj *ind_tbl);
912 int (*ind_table_modify)(struct rte_eth_dev *dev,
913 const unsigned int log_n,
914 const uint16_t *queues, const uint32_t queues_n,
915 struct mlx5_ind_table_obj *ind_tbl);
916 void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
917 int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
918 int tunnel __rte_unused);
919 int (*hrxq_modify)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
920 const uint8_t *rss_key,
921 uint64_t hash_fields,
922 const struct mlx5_ind_table_obj *ind_tbl);
923 void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
924 int (*drop_action_create)(struct rte_eth_dev *dev);
925 void (*drop_action_destroy)(struct rte_eth_dev *dev);
926 int (*txq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
927 int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
928 enum mlx5_txq_modify_type type, uint8_t dev_port);
929 void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
932 #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
934 /* MR operations structure. */
936 mlx5_reg_mr_t reg_mr;
937 mlx5_dereg_mr_t dereg_mr;
941 struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
942 struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
943 uint32_t dev_port; /* Device port number. */
944 struct rte_pci_device *pci_dev; /* Backend PCI device. */
945 struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
946 BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
947 /* Bit-field of MAC addresses owned by the PMD. */
948 uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
949 unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
950 /* Device properties. */
951 uint16_t mtu; /* Configured MTU. */
952 unsigned int isolated:1; /* Whether isolated mode is enabled. */
953 unsigned int representor:1; /* Device is a port representor. */
954 unsigned int master:1; /* Device is a E-Switch master. */
955 unsigned int txpp_en:1; /* Tx packet pacing enabled. */
956 unsigned int mtr_en:1; /* Whether support meter. */
957 unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
958 unsigned int sampler_en:1; /* Whether support sampler. */
959 uint16_t domain_id; /* Switch domain identifier. */
960 uint16_t vport_id; /* Associated VF vport index (if any). */
961 uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
962 uint32_t vport_meta_mask; /* Used for vport index field match mask. */
963 int32_t representor_id; /* -1 if not a representor. */
964 int32_t pf_bond; /* >=0, representor owner PF index in bonding. */
965 unsigned int if_index; /* Associated kernel network device index. */
967 unsigned int rxqs_n; /* RX queues array size. */
968 unsigned int txqs_n; /* TX queues array size. */
969 struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
970 struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
971 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
972 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
973 unsigned int (*reta_idx)[]; /* RETA index table. */
974 unsigned int reta_idx_n; /* RETA index size. */
975 struct mlx5_drop drop_queue; /* Flow drop queues. */
976 uint32_t flows; /* RTE Flow rules. */
977 uint32_t ctrl_flows; /* Control flow rules. */
978 rte_spinlock_t flow_list_lock;
979 struct mlx5_obj_ops obj_ops; /* HW objects operations. */
980 LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
981 LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
982 struct mlx5_cache_list hrxqs; /* Hash Rx queues. */
983 LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
984 LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
985 /* Indirection tables. */
986 LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
987 /* Pointer to next element. */
988 uint32_t refcnt; /**< Reference counter. */
989 /**< Verbs modify header action object. */
990 uint8_t ft_type; /**< Flow table type, Rx or Tx. */
991 uint8_t max_lro_msg_size;
992 /* Tags resources cache. */
993 uint32_t link_speed_capa; /* Link speed capabilities. */
994 struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
995 struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
996 struct mlx5_dev_config config; /* Device configuration. */
997 /* Context for Verbs allocator. */
998 int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
999 int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
1000 struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
1001 struct mlx5_hlist *mreg_cp_tbl;
1002 /* Hash table of Rx metadata register copy table. */
1003 uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
1004 uint8_t mtr_color_reg; /* Meter color match REG_C. */
1005 struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */
1006 struct mlx5_flow_meters flow_meters; /* MTR list. */
1007 uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
1008 uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
1009 struct mlx5_mp_id mp_id; /* ID of a multi-process process */
1010 LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
1011 rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
1012 uint32_t rss_shared_actions; /* RSS shared actions. */
1013 struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
1014 uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
1017 #define PORT_ID(priv) ((priv)->dev_data->port_id)
1018 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
1020 struct rte_hairpin_peer_info {
1024 uint16_t tx_explicit;
1025 uint16_t manual_bind;
1030 int mlx5_getenv_int(const char *);
1031 int mlx5_proc_priv_init(struct rte_eth_dev *dev);
1032 void mlx5_proc_priv_uninit(struct rte_eth_dev *dev);
1033 int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
1034 struct rte_eth_udp_tunnel *udp_tunnel);
1035 uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
1036 int mlx5_dev_close(struct rte_eth_dev *dev);
1037 bool mlx5_is_hpf(struct rte_eth_dev *dev);
1038 void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
1040 /* Macro to iterate over all valid ports for mlx5 driver. */
1041 #define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
1042 for (port_id = mlx5_eth_find_next(0, pci_dev); \
1043 port_id < RTE_MAX_ETHPORTS; \
1044 port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
1045 int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
1046 struct mlx5_dev_ctx_shared *
1047 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
1048 const struct mlx5_dev_config *config);
1049 void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
1050 void mlx5_free_table_hash_list(struct mlx5_priv *priv);
1051 int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
1052 void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1053 struct mlx5_dev_config *config);
1054 void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
1055 int mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
1056 struct mlx5_dev_config *config);
1057 int mlx5_dev_configure(struct rte_eth_dev *dev);
1058 int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
1059 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
1060 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
1061 int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
1062 struct rte_eth_hairpin_cap *cap);
1063 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
1064 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
1065 int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
1069 int mlx5_dev_configure(struct rte_eth_dev *dev);
1070 int mlx5_representor_info_get(struct rte_eth_dev *dev,
1071 struct rte_eth_representor_info *info);
1072 #define MLX5_REPRESENTOR_ID(pf, type, repr) \
1073 (((pf) << 14) + ((type) << 12) + ((repr) & 0xfff))
1074 #define MLX5_REPRESENTOR_REPR(repr_id) \
1076 #define MLX5_REPRESENTOR_TYPE(repr_id) \
1077 (((repr_id) >> 12) & 3)
1078 uint16_t mlx5_representor_id_encode(const struct mlx5_switch_info *info,
1079 enum rte_eth_representor_type hpf_type);
1080 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver,
1082 int mlx5_dev_infos_get(struct rte_eth_dev *dev,
1083 struct rte_eth_dev_info *info);
1084 const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
1085 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
1086 int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
1087 struct rte_eth_hairpin_cap *cap);
1088 eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
1089 struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid);
1090 struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev);
1091 int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev);
1093 /* mlx5_ethdev_os.c */
1095 int mlx5_get_ifname(const struct rte_eth_dev *dev,
1096 char (*ifname)[MLX5_NAMESIZE]);
1097 unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
1098 int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
1099 int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
1100 int mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
1101 int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
1102 int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
1103 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
1104 struct rte_eth_fc_conf *fc_conf);
1105 int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
1106 struct rte_eth_fc_conf *fc_conf);
1107 void mlx5_dev_interrupt_handler(void *arg);
1108 void mlx5_dev_interrupt_handler_devx(void *arg);
1109 int mlx5_set_link_down(struct rte_eth_dev *dev);
1110 int mlx5_set_link_up(struct rte_eth_dev *dev);
1111 int mlx5_is_removed(struct rte_eth_dev *dev);
1112 int mlx5_sysfs_switch_info(unsigned int ifindex,
1113 struct mlx5_switch_info *info);
1114 void mlx5_translate_port_name(const char *port_name_in,
1115 struct mlx5_switch_info *port_info_out);
1116 void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
1117 rte_intr_callback_fn cb_fn, void *cb_arg);
1118 int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
1120 int mlx5_get_module_info(struct rte_eth_dev *dev,
1121 struct rte_eth_dev_module_info *modinfo);
1122 int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
1123 struct rte_dev_eeprom_info *info);
1124 int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
1125 const char *ctr_name, uint64_t *stat);
1126 int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
1127 int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
1128 void mlx5_os_stats_init(struct rte_eth_dev *dev);
1132 void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
1133 int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
1134 uint32_t index, uint32_t vmdq);
1135 int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
1136 int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
1137 struct rte_ether_addr *mc_addr_set,
1138 uint32_t nb_mc_addr);
1142 int mlx5_rss_hash_update(struct rte_eth_dev *dev,
1143 struct rte_eth_rss_conf *rss_conf);
1144 int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
1145 struct rte_eth_rss_conf *rss_conf);
1146 int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
1147 int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
1148 struct rte_eth_rss_reta_entry64 *reta_conf,
1149 uint16_t reta_size);
1150 int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
1151 struct rte_eth_rss_reta_entry64 *reta_conf,
1152 uint16_t reta_size);
1156 int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
1157 int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
1158 int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
1159 int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
1163 int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
1164 int mlx5_stats_reset(struct rte_eth_dev *dev);
1165 int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1167 int mlx5_xstats_reset(struct rte_eth_dev *dev);
1168 int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1169 struct rte_eth_xstat_name *xstats_names,
1174 int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
1175 void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
1176 int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
1178 /* mlx5_vlan_os.c */
1180 void mlx5_vlan_vmwa_exit(void *ctx);
1181 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
1182 struct mlx5_vf_vlan *vf_vlan);
1183 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
1184 struct mlx5_vf_vlan *vf_vlan);
1185 void *mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex);
1187 /* mlx5_trigger.c */
1189 int mlx5_dev_start(struct rte_eth_dev *dev);
1190 int mlx5_dev_stop(struct rte_eth_dev *dev);
1191 int mlx5_traffic_enable(struct rte_eth_dev *dev);
1192 void mlx5_traffic_disable(struct rte_eth_dev *dev);
1193 int mlx5_traffic_restart(struct rte_eth_dev *dev);
1194 int mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
1195 struct rte_hairpin_peer_info *current_info,
1196 struct rte_hairpin_peer_info *peer_info,
1197 uint32_t direction);
1198 int mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
1199 struct rte_hairpin_peer_info *peer_info,
1200 uint32_t direction);
1201 int mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
1202 uint32_t direction);
1203 int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port);
1204 int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port);
1205 int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
1206 size_t len, uint32_t direction);
1210 int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev);
1211 bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev);
1212 void mlx5_flow_print(struct rte_flow *flow);
1213 int mlx5_flow_validate(struct rte_eth_dev *dev,
1214 const struct rte_flow_attr *attr,
1215 const struct rte_flow_item items[],
1216 const struct rte_flow_action actions[],
1217 struct rte_flow_error *error);
1218 struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
1219 const struct rte_flow_attr *attr,
1220 const struct rte_flow_item items[],
1221 const struct rte_flow_action actions[],
1222 struct rte_flow_error *error);
1223 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1224 struct rte_flow_error *error);
1225 void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
1226 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
1227 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1228 const struct rte_flow_action *action, void *data,
1229 struct rte_flow_error *error);
1230 int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
1231 struct rte_flow_error *error);
1232 int mlx5_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
1233 int mlx5_flow_start_default(struct rte_eth_dev *dev);
1234 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
1235 int mlx5_flow_verify(struct rte_eth_dev *dev);
1236 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
1237 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1238 struct rte_flow_item_eth *eth_spec,
1239 struct rte_flow_item_eth *eth_mask,
1240 struct rte_flow_item_vlan *vlan_spec,
1241 struct rte_flow_item_vlan *vlan_mask);
1242 int mlx5_ctrl_flow(struct rte_eth_dev *dev,
1243 struct rte_flow_item_eth *eth_spec,
1244 struct rte_flow_item_eth *eth_mask);
1245 int mlx5_flow_lacp_miss(struct rte_eth_dev *dev);
1246 struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
1247 int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
1248 void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
1249 void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
1250 uint64_t async_id, int status);
1251 void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
1252 void mlx5_flow_query_alarm(void *arg);
1253 uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
1254 void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
1255 int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
1256 bool clear, uint64_t *pkts, uint64_t *bytes);
1257 int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow,
1258 FILE *file, struct rte_flow_error *error);
1259 void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev);
1260 int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
1261 uint32_t nb_contexts, struct rte_flow_error *error);
1265 int mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg,
1267 int mlx5_mp_os_secondary_handle(const struct rte_mp_msg *mp_msg,
1269 void mlx5_mp_os_req_start_rxtx(struct rte_eth_dev *dev);
1270 void mlx5_mp_os_req_stop_rxtx(struct rte_eth_dev *dev);
1271 int mlx5_mp_os_req_queue_control(struct rte_eth_dev *dev, uint16_t queue_id,
1272 enum mlx5_mp_req_type req_type);
1276 int mlx5_pmd_socket_init(void);
1278 /* mlx5_flow_meter.c */
1280 int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
1281 struct mlx5_flow_meter *mlx5_flow_meter_find(struct mlx5_priv *priv,
1283 struct mlx5_flow_meter *mlx5_flow_meter_attach
1284 (struct mlx5_priv *priv,
1286 const struct rte_flow_attr *attr,
1287 struct rte_flow_error *error);
1288 void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm);
1291 struct rte_pci_driver;
1292 int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
1293 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
1294 int mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
1295 const struct mlx5_dev_config *config,
1296 struct mlx5_dev_ctx_shared *sh);
1297 int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
1298 int mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1299 struct rte_pci_device *pci_dev);
1300 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
1301 void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
1302 void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1303 mlx5_dereg_mr_t *dereg_mr_cb);
1304 void mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
1305 int mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
1307 int mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, unsigned int iface_idx,
1308 struct rte_ether_addr *mac_addr,
1310 int mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable);
1311 int mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable);
1312 int mlx5_os_set_nonblock_channel_fd(int fd);
1313 void mlx5_os_mac_addr_flush(struct rte_eth_dev *dev);
1317 int mlx5_txpp_start(struct rte_eth_dev *dev);
1318 void mlx5_txpp_stop(struct rte_eth_dev *dev);
1319 int mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp);
1320 int mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1321 struct rte_eth_xstat *stats,
1322 unsigned int n, unsigned int n_used);
1323 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev);
1324 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev,
1325 struct rte_eth_xstat_name *xstats_names,
1326 unsigned int n, unsigned int n_used);
1327 void mlx5_txpp_interrupt_handler(void *cb_arg);
1331 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
1333 /* mlx5_flow_age.c */
1335 int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh);
1336 int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh);
1337 int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh);
1338 void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh);
1340 #endif /* RTE_PMD_MLX5_H_ */