1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
6 #ifndef RTE_PMD_MLX5_H_
7 #define RTE_PMD_MLX5_H_
14 #include <netinet/in.h>
15 #include <sys/queue.h>
18 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
20 #pragma GCC diagnostic ignored "-Wpedantic"
22 #include <infiniband/verbs.h>
24 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_rwlock.h>
31 #include <rte_interrupts.h>
32 #include <rte_errno.h>
35 #include <mlx5_glue.h>
36 #include <mlx5_devx_cmds.h>
39 #include <mlx5_common_mp.h>
40 #include <mlx5_common_mr.h>
42 #include "mlx5_defs.h"
43 #include "mlx5_utils.h"
44 #include "mlx5_autoconf.h"
47 enum mlx5_ipool_index {
48 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
49 MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
50 MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
51 MLX5_IPOOL_TAG, /* Pool for tag resource. */
52 MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
53 MLX5_IPOOL_JUMP, /* Pool for jump resource. */
55 MLX5_IPOOL_MTR, /* Pool for meter resource. */
56 MLX5_IPOOL_MCP, /* Pool for metadata resource. */
57 MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
58 MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
59 MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
63 /** Key string for IPC. */
64 #define MLX5_MP_NAME "net_mlx5_mp"
67 LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared);
69 /* Shared data between primary and secondary processes. */
70 struct mlx5_shared_data {
72 /* Global spinlock for primary and secondary processes. */
73 int init_done; /* Whether primary has done initialization. */
74 unsigned int secondary_cnt; /* Number of secondary processes init'd. */
75 struct mlx5_dev_list mem_event_cb_list;
76 rte_rwlock_t mem_event_rwlock;
79 /* Per-process data structure, not visible to other processes. */
80 struct mlx5_local_data {
81 int init_done; /* Whether a secondary has done initialization. */
84 extern struct mlx5_shared_data *mlx5_shared_data;
86 struct mlx5_counter_ctrl {
87 /* Name of the counter. */
88 char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
89 /* Name of the counter on the device table. */
90 char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
91 uint32_t ib:1; /**< Nonzero for IB counters. */
94 struct mlx5_xstats_ctrl {
95 /* Number of device stats. */
97 /* Number of device stats identified by PMD. */
98 uint16_t mlx5_stats_n;
99 /* Index in the device counters table. */
100 uint16_t dev_table_idx[MLX5_MAX_XSTATS];
101 uint64_t base[MLX5_MAX_XSTATS];
102 uint64_t xstats[MLX5_MAX_XSTATS];
103 uint64_t hw_stats[MLX5_MAX_XSTATS];
104 struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
107 struct mlx5_stats_ctrl {
108 /* Base for imissed counter. */
109 uint64_t imissed_base;
113 /* Default PMD specific parameter value. */
114 #define MLX5_ARG_UNSET (-1)
116 #define MLX5_LRO_SUPPORTED(dev) \
117 (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)
119 /* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
120 #define MLX5_LRO_SEG_CHUNK_SIZE 256u
122 /* Maximal size of aggregated LRO packet. */
123 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
125 /* LRO configurations structure. */
126 struct mlx5_lro_config {
127 uint32_t supported:1; /* Whether LRO is supported. */
128 uint32_t timeout; /* User configuration. */
132 * Device configuration structure.
134 * Merged configuration from:
136 * - Device capabilities,
137 * - User device parameters disabled features.
139 struct mlx5_dev_config {
140 unsigned int hw_csum:1; /* Checksum offload is supported. */
141 unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
142 unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
143 unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
144 unsigned int hw_padding:1; /* End alignment padding is supported. */
145 unsigned int vf:1; /* This is a VF. */
146 unsigned int tunnel_en:1;
147 /* Whether tunnel stateless offloads are supported. */
148 unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
149 unsigned int cqe_comp:1; /* CQE compression is enabled. */
150 unsigned int cqe_pad:1; /* CQE padding is enabled. */
151 unsigned int tso:1; /* Whether TSO is supported. */
152 unsigned int rx_vec_en:1; /* Rx vector is enabled. */
153 unsigned int mr_ext_memseg_en:1;
154 /* Whether memseg should be extended for MR creation. */
155 unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
156 unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
157 unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */
158 unsigned int dv_flow_en:1; /* Enable DV flow. */
159 unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
160 unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
161 unsigned int devx:1; /* Whether devx interface is available or not. */
162 unsigned int dest_tir:1; /* Whether advanced DR API is available. */
164 unsigned int enabled:1; /* Whether MPRQ is enabled. */
165 unsigned int stride_num_n; /* Number of strides. */
166 unsigned int stride_size_n; /* Size of a stride. */
167 unsigned int min_stride_size_n; /* Min size of a stride. */
168 unsigned int max_stride_size_n; /* Max size of a stride. */
169 unsigned int max_memcpy_len;
170 /* Maximum packet size to memcpy Rx packets. */
171 unsigned int min_rxqs_num;
172 /* Rx queue count threshold to enable MPRQ. */
173 } mprq; /* Configurations for Multi-Packet RQ. */
174 int mps; /* Multi-packet send supported mode. */
175 int dbnc; /* Skip doorbell register write barrier. */
176 unsigned int flow_prio; /* Number of flow priorities. */
177 enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
178 /* Availibility of mreg_c's. */
179 unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
180 unsigned int ind_table_max_size; /* Maximum indirection table size. */
181 unsigned int max_dump_files_num; /* Maximum dump files per queue. */
182 unsigned int log_hp_size; /* Single hairpin queue data size in total. */
183 int txqs_inline; /* Queue number threshold for inlining. */
184 int txq_inline_min; /* Minimal amount of data bytes to inline. */
185 int txq_inline_max; /* Max packet size for inlining with SEND. */
186 int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
187 struct mlx5_hca_attr hca_attr; /* HCA attributes. */
188 struct mlx5_lro_config lro; /* LRO configuration. */
193 * Type of object being allocated.
195 enum mlx5_verbs_alloc_type {
196 MLX5_VERBS_ALLOC_TYPE_NONE,
197 MLX5_VERBS_ALLOC_TYPE_TX_QUEUE,
198 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
201 /* Structure for VF VLAN workaround. */
202 struct mlx5_vf_vlan {
208 * Verbs allocator needs a context to know in the callback which kind of
209 * resources it is allocating.
211 struct mlx5_verbs_alloc_ctx {
212 enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */
213 const void *obj; /* Pointer to the DPDK object. */
216 /* Flow drop context necessary due to Verbs API. */
218 struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
219 struct mlx5_rxq_obj *rxq; /* Rx queue object. */
222 #define MLX5_COUNTERS_PER_POOL 512
223 #define MLX5_MAX_PENDING_QUERIES 4
224 #define MLX5_CNT_CONTAINER_RESIZE 64
225 #define MLX5_CNT_AGE_OFFSET 0x80000000
226 #define CNT_SIZE (sizeof(struct mlx5_flow_counter))
227 #define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext))
228 #define AGE_SIZE (sizeof(struct mlx5_age_param))
229 #define MLX5_AGING_TIME_DELAY 7
230 #define CNT_POOL_TYPE_EXT (1 << 0)
231 #define CNT_POOL_TYPE_AGE (1 << 1)
232 #define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT)
233 #define IS_AGE_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_AGE)
234 #define MLX_CNT_IS_AGE(counter) ((counter) & MLX5_CNT_AGE_OFFSET ? 1 : 0)
235 #define MLX5_CNT_LEN(pool) \
237 (IS_AGE_POOL(pool) ? AGE_SIZE : 0) + \
238 (IS_EXT_POOL(pool) ? CNTEXT_SIZE : 0))
239 #define MLX5_POOL_GET_CNT(pool, index) \
240 ((struct mlx5_flow_counter *) \
241 ((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool))))
242 #define MLX5_CNT_ARRAY_IDX(pool, cnt) \
243 ((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \
246 * The pool index and offset of counter in the pool array makes up the
247 * counter index. In case the counter is from pool 0 and offset 0, it
248 * should plus 1 to avoid index 0, since 0 means invalid counter index
251 #define MLX5_MAKE_CNT_IDX(pi, offset) \
252 ((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
253 #define MLX5_CNT_TO_CNT_EXT(pool, cnt) \
254 ((struct mlx5_flow_counter_ext *)\
255 ((uint8_t *)((cnt) + 1) + \
256 (IS_AGE_POOL(pool) ? AGE_SIZE : 0)))
257 #define MLX5_GET_POOL_CNT_EXT(pool, offset) \
258 MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset)))
259 #define MLX5_CNT_TO_AGE(cnt) \
260 ((struct mlx5_age_param *)((cnt) + 1))
262 struct mlx5_flow_counter_pool;
266 AGE_FREE, /* Initialized state. */
267 AGE_CANDIDATE, /* Counter assigned to flows. */
268 AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
271 #define MLX5_CNT_CONTAINER(sh, batch, age) (&(sh)->cmng.ccont \
272 [(batch) * 2 + (age)])
275 MLX5_CCONT_TYPE_SINGLE,
276 MLX5_CCONT_TYPE_SINGLE_FOR_AGE,
277 MLX5_CCONT_TYPE_BATCH,
278 MLX5_CCONT_TYPE_BATCH_FOR_AGE,
282 /* Counter age parameter. */
283 struct mlx5_age_param {
284 rte_atomic16_t state; /**< Age state. */
285 uint16_t port_id; /**< Port id of the counter. */
286 uint32_t timeout:15; /**< Age timeout in unit of 0.1sec. */
287 uint32_t expire:16; /**< Expire time(0.1sec) in the future. */
288 void *context; /**< Flow counter age context. */
291 struct flow_counter_stats {
296 /* Generic counters information. */
297 struct mlx5_flow_counter {
298 TAILQ_ENTRY(mlx5_flow_counter) next;
299 /**< Pointer to the next flow counter structure. */
301 uint64_t hits; /**< Reset value of hits packets. */
302 int64_t query_gen; /**< Generation of the last release. */
304 uint64_t bytes; /**< Reset value of bytes. */
305 void *action; /**< Pointer to the dv action. */
308 /* Extend counters information for none batch counters. */
309 struct mlx5_flow_counter_ext {
310 uint32_t shared:1; /**< Share counter ID with other flow rules. */
312 /**< Whether the counter was allocated by batch command. */
313 uint32_t ref_cnt:30; /**< Reference counter. */
314 uint32_t id; /**< User counter ID. */
315 union { /**< Holds the counters for the rule. */
316 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
317 struct ibv_counter_set *cs;
318 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
319 struct ibv_counters *cs;
321 struct mlx5_devx_obj *dcs; /**< Counter Devx object. */
325 TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
327 /* Generic counter pool structure - query is in pool resolution. */
328 struct mlx5_flow_counter_pool {
329 TAILQ_ENTRY(mlx5_flow_counter_pool) next;
330 struct mlx5_counters counters; /* Free counter list. */
332 struct mlx5_devx_obj *min_dcs;
333 rte_atomic64_t a64_dcs;
335 /* The devx object of the minimum counter ID. */
336 rte_atomic64_t start_query_gen; /* Query start round. */
337 rte_atomic64_t end_query_gen; /* Query end round. */
338 uint32_t index; /* Pool index in container. */
339 uint8_t type; /* Memory type behind the counter array. */
340 rte_spinlock_t sl; /* The pool lock. */
341 struct mlx5_counter_stats_raw *raw;
342 struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
345 struct mlx5_counter_stats_raw;
347 /* Memory management structure for group of counter statistics raws. */
348 struct mlx5_counter_stats_mem_mng {
349 LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
350 struct mlx5_counter_stats_raw *raws;
351 struct mlx5_devx_obj *dm;
352 struct mlx5dv_devx_umem *umem;
355 /* Raw memory structure for the counter statistics values of a pool. */
356 struct mlx5_counter_stats_raw {
357 LIST_ENTRY(mlx5_counter_stats_raw) next;
359 struct mlx5_counter_stats_mem_mng *mem_mng;
360 volatile struct flow_counter_stats *data;
363 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
365 /* Container structure for counter pools. */
366 struct mlx5_pools_container {
367 rte_atomic16_t n_valid; /* Number of valid pools. */
368 uint16_t n; /* Number of pools. */
369 rte_spinlock_t resize_sl; /* The resize lock. */
370 struct mlx5_counter_pools pool_list; /* Counter pool list. */
371 struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
372 struct mlx5_counter_stats_mem_mng *mem_mng;
373 /* Hold the memory management for the next allocated pools raws. */
376 /* Counter global management structure. */
377 struct mlx5_flow_counter_mng {
378 struct mlx5_pools_container ccont[MLX5_CCONT_TYPE_MAX];
379 struct mlx5_counters flow_counters; /* Legacy flow counter list. */
380 uint8_t pending_queries;
384 uint8_t query_thread_on;
385 LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
386 LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
389 #define MLX5_AGE_EVENT_NEW 1
390 #define MLX5_AGE_TRIGGER 2
391 #define MLX5_AGE_SET(age_info, BIT) \
392 ((age_info)->flags |= (1 << (BIT)))
393 #define MLX5_AGE_GET(age_info, BIT) \
394 ((age_info)->flags & (1 << (BIT)))
395 #define GET_PORT_AGE_INFO(priv) \
396 (&((priv)->sh->port[(priv)->ibv_port - 1].age_info))
398 /* Aging information for per port. */
399 struct mlx5_age_info {
400 uint8_t flags; /*Indicate if is new event or need be trigered*/
401 struct mlx5_counters aged_counters; /* Aged flow counter list. */
402 rte_spinlock_t aged_sl; /* Aged flow counter list lock. */
405 /* Per port data of shared IB device. */
406 struct mlx5_ibv_shared_port {
408 uint32_t devx_ih_port_id;
410 * Interrupt handler port_id. Used by shared interrupt
411 * handler to find the corresponding rte_eth device
412 * by IB port index. If value is equal or greater
413 * RTE_MAX_ETHPORTS it means there is no subhandler
414 * installed for specified IB port index.
416 struct mlx5_age_info age_info;
417 /* Aging information for per port. */
420 /* Table key of the hash organization. */
421 union mlx5_flow_tbl_key {
423 /* Table ID should be at the lowest address. */
424 uint32_t table_id; /**< ID of the table. */
425 uint16_t reserved; /**< must be zero for comparison. */
426 uint8_t domain; /**< 1 - FDB, 0 - NIC TX/RX. */
427 uint8_t direction; /**< 1 - egress, 0 - ingress. */
429 uint64_t v64; /**< full 64bits value of key */
432 /* Table structure. */
433 struct mlx5_flow_tbl_resource {
434 void *obj; /**< Pointer to DR table object. */
435 rte_atomic32_t refcnt; /**< Reference counter. */
438 #define MLX5_MAX_TABLES UINT16_MAX
439 #define MLX5_FLOW_TABLE_LEVEL_METER (UINT16_MAX - 3)
440 #define MLX5_FLOW_TABLE_LEVEL_SUFFIX (UINT16_MAX - 2)
441 #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
442 /* Reserve the last two tables for metadata register copy. */
443 #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
444 #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
445 /* Tables for metering splits should be added here. */
446 #define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3)
447 #define MLX5_MAX_TABLES_FDB UINT16_MAX
449 #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */
450 #define MLX5_DBR_SIZE 8
451 #define MLX5_DBR_PER_PAGE (MLX5_DBR_PAGE_SIZE / MLX5_DBR_SIZE)
452 #define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / 64)
454 struct mlx5_devx_dbr_page {
455 /* Door-bell records, must be first member in structure. */
456 uint8_t dbrs[MLX5_DBR_PAGE_SIZE];
457 LIST_ENTRY(mlx5_devx_dbr_page) next; /* Pointer to the next element. */
458 struct mlx5dv_devx_umem *umem;
459 uint32_t dbr_count; /* Number of door-bell records in use. */
460 /* 1 bit marks matching door-bell is in use. */
461 uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE];
464 /* ID generation structure. */
465 struct mlx5_flow_id_pool {
466 uint32_t *free_arr; /**< Pointer to the a array of free values. */
468 /**< The next index that can be used without any free elements. */
469 uint32_t *curr; /**< Pointer to the index to pop. */
470 uint32_t *last; /**< Pointer to the last element in the empty arrray. */
471 uint32_t max_id; /**< Maximum id can be allocated from the pool. */
475 * Shared Infiniband device context for Master/Representors
476 * which belong to same IB device with multiple IB ports.
478 struct mlx5_ibv_shared {
479 LIST_ENTRY(mlx5_ibv_shared) next;
481 uint32_t devx:1; /* Opened with DV. */
482 uint32_t max_port; /* Maximal IB device port index. */
483 struct ibv_context *ctx; /* Verbs/DV context. */
484 struct ibv_pd *pd; /* Protection Domain. */
485 uint32_t pdn; /* Protection Domain number. */
486 uint32_t tdn; /* Transport Domain number. */
487 char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
488 char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
489 struct ibv_device_attr_ex device_attr; /* Device properties. */
490 LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
491 /**< Called by memory event callback. */
492 struct mlx5_mr_share_cache share_cache;
493 /* Shared DV/DR flow data section. */
494 pthread_mutex_t dv_mutex; /* DV context mutex. */
495 uint32_t dv_meta_mask; /* flow META metadata supported mask. */
496 uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
497 uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
498 uint32_t dv_refcnt; /* DV/DR data reference counter. */
499 void *fdb_domain; /* FDB Direct Rules name space handle. */
500 void *rx_domain; /* RX Direct Rules name space handle. */
501 void *tx_domain; /* TX Direct Rules name space handle. */
502 struct mlx5_hlist *flow_tbls;
503 /* Direct Rules tables for FDB, NIC TX+RX */
504 void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
505 void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
506 uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
507 LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
508 struct mlx5_hlist *tag_table;
509 uint32_t port_id_action_list; /* List of port ID actions. */
510 uint32_t push_vlan_action_list; /* List of push VLAN actions. */
511 struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
512 struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
513 /* Memory Pool for mlx5 flow resources. */
514 /* Shared interrupt handler section. */
515 pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
516 uint32_t intr_cnt; /* Interrupt handler reference counter. */
517 struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
518 uint32_t devx_intr_cnt; /* Devx interrupt handler reference counter. */
519 struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
520 struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */
521 struct mlx5_devx_obj *tis; /* TIS object. */
522 struct mlx5_devx_obj *td; /* Transport domain. */
523 struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
524 struct mlx5_ibv_shared_port port[]; /* per device port data array. */
527 /* Per-process private structure. */
528 struct mlx5_proc_priv {
530 /* Size of UAR register table. */
532 /* Table of UAR registers for each process. */
535 /* MTR profile list. */
536 TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
538 TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
540 #define MLX5_PROC_PRIV(port_id) \
541 ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
544 struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
545 struct mlx5_ibv_shared *sh; /* Shared IB device context. */
546 uint32_t ibv_port; /* IB device port number. */
547 struct rte_pci_device *pci_dev; /* Backend PCI device. */
548 struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
549 BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
550 /* Bit-field of MAC addresses owned by the PMD. */
551 uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
552 unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
553 /* Device properties. */
554 uint16_t mtu; /* Configured MTU. */
555 unsigned int isolated:1; /* Whether isolated mode is enabled. */
556 unsigned int representor:1; /* Device is a port representor. */
557 unsigned int master:1; /* Device is a E-Switch master. */
558 unsigned int dr_shared:1; /* DV/DR data is shared. */
559 unsigned int counter_fallback:1; /* Use counter fallback management. */
560 unsigned int mtr_en:1; /* Whether support meter. */
561 unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
562 uint16_t domain_id; /* Switch domain identifier. */
563 uint16_t vport_id; /* Associated VF vport index (if any). */
564 uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
565 uint32_t vport_meta_mask; /* Used for vport index field match mask. */
566 int32_t representor_id; /* Port representor identifier. */
567 int32_t pf_bond; /* >=0 means PF index in bonding configuration. */
568 unsigned int if_index; /* Associated kernel network device index. */
570 unsigned int rxqs_n; /* RX queues array size. */
571 unsigned int txqs_n; /* TX queues array size. */
572 struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
573 struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
574 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
575 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
576 unsigned int (*reta_idx)[]; /* RETA index table. */
577 unsigned int reta_idx_n; /* RETA index size. */
578 struct mlx5_drop drop_queue; /* Flow drop queues. */
579 uint32_t flows; /* RTE Flow rules. */
580 uint32_t ctrl_flows; /* Control flow rules. */
581 void *inter_flows; /* Intermediate resources for flow creation. */
582 void *rss_desc; /* Intermediate rss description resources. */
583 int flow_idx; /* Intermediate device flow index. */
584 int flow_nested_idx; /* Intermediate device flow index, nested. */
585 LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
586 LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
587 uint32_t hrxqs; /* Verbs Hash Rx queues. */
588 LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
589 LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
590 /* Indirection tables. */
591 LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
592 /* Pointer to next element. */
593 rte_atomic32_t refcnt; /**< Reference counter. */
594 struct ibv_flow_action *verbs_action;
595 /**< Verbs modify header action object. */
596 uint8_t ft_type; /**< Flow table type, Rx or Tx. */
597 uint8_t max_lro_msg_size;
598 /* Tags resources cache. */
599 uint32_t link_speed_capa; /* Link speed capabilities. */
600 struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
601 struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
602 struct mlx5_dev_config config; /* Device configuration. */
603 struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
604 /* Context for Verbs allocator. */
605 int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
606 int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
607 LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */
608 struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
609 struct mlx5_flow_id_pool *qrss_id_pool;
610 struct mlx5_hlist *mreg_cp_tbl;
611 /* Hash table of Rx metadata register copy table. */
612 uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
613 uint8_t mtr_color_reg; /* Meter color match REG_C. */
614 struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */
615 struct mlx5_flow_meters flow_meters; /* MTR list. */
617 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
618 rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
619 /* UAR same-page access control required in 32bit implementations. */
621 uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
622 uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
623 struct mlx5_mp_id mp_id; /* ID of a multi-process process */
624 LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
627 #define PORT_ID(priv) ((priv)->dev_data->port_id)
628 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
632 int mlx5_getenv_int(const char *);
633 int mlx5_proc_priv_init(struct rte_eth_dev *dev);
634 int64_t mlx5_get_dbr(struct rte_eth_dev *dev,
635 struct mlx5_devx_dbr_page **dbr_page);
636 int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id,
638 int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
639 struct rte_eth_udp_tunnel *udp_tunnel);
640 uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
642 /* Macro to iterate over all valid ports for mlx5 driver. */
643 #define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
644 for (port_id = mlx5_eth_find_next(0, pci_dev); \
645 port_id < RTE_MAX_ETHPORTS; \
646 port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
650 int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
651 int mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]);
652 unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
653 int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
654 int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
655 int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
657 int mlx5_dev_configure(struct rte_eth_dev *dev);
658 int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
659 int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
660 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
661 const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
662 int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
663 int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
664 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
665 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
666 struct rte_eth_fc_conf *fc_conf);
667 int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
668 struct rte_eth_fc_conf *fc_conf);
669 void mlx5_dev_link_status_handler(void *arg);
670 void mlx5_dev_interrupt_handler(void *arg);
671 void mlx5_dev_interrupt_handler_devx(void *arg);
672 void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
673 void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
674 void mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev);
675 void mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev);
676 int mlx5_set_link_down(struct rte_eth_dev *dev);
677 int mlx5_set_link_up(struct rte_eth_dev *dev);
678 int mlx5_is_removed(struct rte_eth_dev *dev);
679 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
680 eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
681 struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid);
682 struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev);
683 int mlx5_sysfs_switch_info(unsigned int ifindex,
684 struct mlx5_switch_info *info);
685 void mlx5_sysfs_check_switch_info(bool device_dir,
686 struct mlx5_switch_info *switch_info);
687 void mlx5_translate_port_name(const char *port_name_in,
688 struct mlx5_switch_info *port_info_out);
689 void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
690 rte_intr_callback_fn cb_fn, void *cb_arg);
691 int mlx5_get_module_info(struct rte_eth_dev *dev,
692 struct rte_eth_dev_module_info *modinfo);
693 int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
694 struct rte_dev_eeprom_info *info);
695 int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
696 struct rte_eth_hairpin_cap *cap);
697 int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev);
701 int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
702 void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
703 int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
704 uint32_t index, uint32_t vmdq);
705 struct mlx5_nl_vlan_vmwa_context *mlx5_vlan_vmwa_init
706 (struct rte_eth_dev *dev, uint32_t ifindex);
707 int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
708 int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
709 struct rte_ether_addr *mc_addr_set,
710 uint32_t nb_mc_addr);
714 int mlx5_rss_hash_update(struct rte_eth_dev *dev,
715 struct rte_eth_rss_conf *rss_conf);
716 int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
717 struct rte_eth_rss_conf *rss_conf);
718 int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
719 int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
720 struct rte_eth_rss_reta_entry64 *reta_conf,
722 int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
723 struct rte_eth_rss_reta_entry64 *reta_conf,
728 int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
729 int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
730 int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
731 int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
735 void mlx5_stats_init(struct rte_eth_dev *dev);
736 int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
737 int mlx5_stats_reset(struct rte_eth_dev *dev);
738 int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
740 int mlx5_xstats_reset(struct rte_eth_dev *dev);
741 int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
742 struct rte_eth_xstat_name *xstats_names,
747 int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
748 void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
749 int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
750 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *ctx);
751 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
752 struct mlx5_vf_vlan *vf_vlan);
753 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
754 struct mlx5_vf_vlan *vf_vlan);
758 int mlx5_dev_start(struct rte_eth_dev *dev);
759 void mlx5_dev_stop(struct rte_eth_dev *dev);
760 int mlx5_traffic_enable(struct rte_eth_dev *dev);
761 void mlx5_traffic_disable(struct rte_eth_dev *dev);
762 int mlx5_traffic_restart(struct rte_eth_dev *dev);
766 int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev);
767 bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev);
768 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
769 void mlx5_flow_print(struct rte_flow *flow);
770 int mlx5_flow_validate(struct rte_eth_dev *dev,
771 const struct rte_flow_attr *attr,
772 const struct rte_flow_item items[],
773 const struct rte_flow_action actions[],
774 struct rte_flow_error *error);
775 struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
776 const struct rte_flow_attr *attr,
777 const struct rte_flow_item items[],
778 const struct rte_flow_action actions[],
779 struct rte_flow_error *error);
780 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
781 struct rte_flow_error *error);
782 void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
783 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
784 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
785 const struct rte_flow_action *action, void *data,
786 struct rte_flow_error *error);
787 int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
788 struct rte_flow_error *error);
789 int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
790 enum rte_filter_type filter_type,
791 enum rte_filter_op filter_op,
793 int mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list);
794 void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list);
795 int mlx5_flow_start_default(struct rte_eth_dev *dev);
796 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
797 void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
798 void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
799 int mlx5_flow_verify(struct rte_eth_dev *dev);
800 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
801 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
802 struct rte_flow_item_eth *eth_spec,
803 struct rte_flow_item_eth *eth_mask,
804 struct rte_flow_item_vlan *vlan_spec,
805 struct rte_flow_item_vlan *vlan_mask);
806 int mlx5_ctrl_flow(struct rte_eth_dev *dev,
807 struct rte_flow_item_eth *eth_spec,
808 struct rte_flow_item_eth *eth_mask);
809 struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
810 int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
811 void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
812 void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
813 uint64_t async_id, int status);
814 void mlx5_set_query_alarm(struct mlx5_ibv_shared *sh);
815 void mlx5_flow_query_alarm(void *arg);
816 uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
817 void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
818 int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
819 bool clear, uint64_t *pkts, uint64_t *bytes);
820 int mlx5_flow_dev_dump(struct rte_eth_dev *dev, FILE *file,
821 struct rte_flow_error *error);
822 void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev);
823 int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
824 uint32_t nb_contexts, struct rte_flow_error *error);
827 int mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer);
828 int mlx5_mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer);
829 void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev);
830 void mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev);
834 int mlx5_pmd_socket_init(void);
836 /* mlx5_flow_meter.c */
838 int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
839 struct mlx5_flow_meter *mlx5_flow_meter_find(struct mlx5_priv *priv,
841 struct mlx5_flow_meter *mlx5_flow_meter_attach
842 (struct mlx5_priv *priv,
844 const struct rte_flow_attr *attr,
845 struct rte_flow_error *error);
846 void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm);
848 #endif /* RTE_PMD_MLX5_H_ */