#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
-
enum mlx5_ipool_index {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
MLX5_IPOOL_MAX,
};
+/*
+ * There are three reclaim memory mode supported.
+ * 0(none) means no memory reclaim.
+ * 1(light) means only PMD level reclaim.
+ * 2(aggressive) means both PMD and rdma-core level reclaim.
+ */
+enum mlx5_reclaim_mem_mode {
+ MLX5_RCM_NONE, /* Don't reclaim memory. */
+ MLX5_RCM_LIGHT, /* Reclaim PMD level. */
+ MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
+};
+
+/* Device attributes used in mlx5 PMD */
+struct mlx5_dev_attr {
+ uint64_t device_cap_flags_ex;
+ int max_qp_wr;
+ int max_sge;
+ int max_cq;
+ int max_qp;
+ uint32_t raw_packet_caps;
+ uint32_t max_rwq_indirection_table_size;
+ uint32_t max_tso;
+ uint32_t tso_supported_qpts;
+ uint64_t flags;
+ uint64_t comp_mask;
+ uint32_t sw_parsing_offloads;
+ uint32_t min_single_stride_log_num_of_bytes;
+ uint32_t max_single_stride_log_num_of_bytes;
+ uint32_t min_single_wqe_log_num_of_strides;
+ uint32_t max_single_wqe_log_num_of_strides;
+ uint32_t stride_supported_qpts;
+ uint32_t tunnel_offloads_caps;
+ char fw_ver[64];
+};
+
+/** Data associated with devices to spawn. */
+struct mlx5_dev_spawn_data {
+ uint32_t ifindex; /**< Network interface index. */
+ uint32_t max_port; /**< IB device maximal port index. */
+ uint32_t ibv_port; /**< IB device physical port index. */
+ int pf_bond; /**< bonding device PF index. < 0 - no bonding */
+ struct mlx5_switch_info info; /**< Switch information. */
+ struct ibv_device *ibv_dev; /**< Associated IB device. */
+ struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
+ struct rte_pci_device *pci_dev; /**< Backend PCI device. */
+};
+
/** Key string for IPC. */
#define MLX5_MP_NAME "net_mlx5_mp"
-LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared);
+LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
/* Shared data between primary and secondary processes. */
struct mlx5_shared_data {
};
extern struct mlx5_shared_data *mlx5_shared_data;
+extern struct rte_pci_driver mlx5_driver;
+
+/* Dev ops structs */
+extern const struct eth_dev_ops mlx5_dev_sec_ops;
+extern const struct eth_dev_ops mlx5_dev_ops;
struct mlx5_counter_ctrl {
/* Name of the counter. */
unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
unsigned int devx:1; /* Whether devx interface is available or not. */
unsigned int dest_tir:1; /* Whether advanced DR API is available. */
+ unsigned int reclaim_mode:2; /* Memory reclaim mode. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int stride_num_n; /* Number of strides. */
#define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext))
#define AGE_SIZE (sizeof(struct mlx5_age_param))
#define MLX5_AGING_TIME_DELAY 7
-
#define CNT_POOL_TYPE_EXT (1 << 0)
#define CNT_POOL_TYPE_AGE (1 << 1)
#define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT)
#define IS_AGE_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_AGE)
#define MLX_CNT_IS_AGE(counter) ((counter) & MLX5_CNT_AGE_OFFSET ? 1 : 0)
-
#define MLX5_CNT_LEN(pool) \
(CNT_SIZE + \
(IS_AGE_POOL(pool) ? AGE_SIZE : 0) + \
AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */
};
+#define MLX5_CNT_CONTAINER(sh, batch, age) (&(sh)->cmng.ccont \
+ [(batch) * 2 + (age)])
+
+enum {
+ MLX5_CCONT_TYPE_SINGLE,
+ MLX5_CCONT_TYPE_SINGLE_FOR_AGE,
+ MLX5_CCONT_TYPE_BATCH,
+ MLX5_CCONT_TYPE_BATCH_FOR_AGE,
+ MLX5_CCONT_TYPE_MAX,
+};
+
/* Counter age parameter. */
struct mlx5_age_param {
rte_atomic16_t state; /**< Age state. */
};
};
-
TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
/* Generic counter pool structure - query is in pool resolution. */
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
struct mlx5_counter_stats_raw *raws;
struct mlx5_devx_obj *dm;
- struct mlx5dv_devx_umem *umem;
+ void *umem;
};
/* Raw memory structure for the counter statistics values of a pool. */
struct mlx5_pools_container {
rte_atomic16_t n_valid; /* Number of valid pools. */
uint16_t n; /* Number of pools. */
+ rte_spinlock_t resize_sl; /* The resize lock. */
struct mlx5_counter_pools pool_list; /* Counter pool list. */
struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
- struct mlx5_counter_stats_mem_mng *init_mem_mng;
+ struct mlx5_counter_stats_mem_mng *mem_mng;
/* Hold the memory management for the next allocated pools raws. */
};
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- uint8_t mhi[2][2]; /* master \ host and age \ no age container index. */
- struct mlx5_pools_container ccont[2 * 2][2];
- /* master \ host and age \ no age pools container. */
+ struct mlx5_pools_container ccont[MLX5_CCONT_TYPE_MAX];
struct mlx5_counters flow_counters; /* Legacy flow counter list. */
uint8_t pending_queries;
uint8_t batch;
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
};
+
#define MLX5_AGE_EVENT_NEW 1
#define MLX5_AGE_TRIGGER 2
#define MLX5_AGE_SET(age_info, BIT) \
struct mlx5_counters aged_counters; /* Aged flow counter list. */
rte_spinlock_t aged_sl; /* Aged flow counter list lock. */
};
+
/* Per port data of shared IB device. */
struct mlx5_ibv_shared_port {
uint32_t ih_port_id;
/* Door-bell records, must be first member in structure. */
uint8_t dbrs[MLX5_DBR_PAGE_SIZE];
LIST_ENTRY(mlx5_devx_dbr_page) next; /* Pointer to the next element. */
- struct mlx5dv_devx_umem *umem;
+ void *umem;
uint32_t dbr_count; /* Number of door-bell records in use. */
/* 1 bit marks matching door-bell is in use. */
uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE];
* Shared Infiniband device context for Master/Representors
* which belong to same IB device with multiple IB ports.
**/
-struct mlx5_ibv_shared {
- LIST_ENTRY(mlx5_ibv_shared) next;
+struct mlx5_dev_ctx_shared {
+ LIST_ENTRY(mlx5_dev_ctx_shared) next;
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
uint32_t max_port; /* Maximal IB device port index. */
- struct ibv_context *ctx; /* Verbs/DV context. */
- struct ibv_pd *pd; /* Protection Domain. */
+ void *ctx; /* Verbs/DV/DevX context. */
+ void *pd; /* Protection Domain. */
uint32_t pdn; /* Protection Domain number. */
uint32_t tdn; /* Transport Domain number. */
char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
- struct ibv_device_attr_ex device_attr; /* Device properties. */
- LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
+ struct mlx5_dev_attr device_attr; /* Device properties. */
+ LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache share_cache;
/* Shared DV/DR flow data section. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
/* Memory Pool for mlx5 flow resources. */
/* Shared interrupt handler section. */
- pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
- uint32_t intr_cnt; /* Interrupt handler reference counter. */
struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
- uint32_t devx_intr_cnt; /* Devx interrupt handler reference counter. */
struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */
struct mlx5_devx_obj *tis; /* TIS object. */
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
- struct mlx5_ibv_shared *sh; /* Shared IB device context. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
uint32_t ibv_port; /* IB device port number. */
struct rte_pci_device *pci_dev; /* Backend PCI device. */
struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
+void mlx5_dev_close(struct rte_eth_dev *dev);
/* Macro to iterate over all valid ports for mlx5 driver. */
#define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
for (port_id = mlx5_eth_find_next(0, pci_dev); \
port_id < RTE_MAX_ETHPORTS; \
port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
+int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
+struct mlx5_dev_ctx_shared *
+mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config);
+void mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh);
+void mlx5_free_table_hash_list(struct mlx5_priv *priv);
+int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
+void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
+ struct mlx5_dev_config *config);
+void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
+int mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
+ struct mlx5_dev_config *config);
+int mlx5_init_once(void);
/* mlx5_ethdev.c */
void mlx5_dev_interrupt_handler_devx(void *arg);
void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
-void mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev);
-void mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
int mlx5_is_removed(struct rte_eth_dev *dev);
struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
-void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
+void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
uint64_t async_id, int status);
-void mlx5_set_query_alarm(struct mlx5_ibv_shared *sh);
+void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
void mlx5_flow_query_alarm(void *arg);
uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
struct rte_flow_error *error);
void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm);
+/* mlx5_os.c */
+struct rte_pci_driver;
+const char *mlx5_os_get_ctx_device_name(void *ctx);
+const char *mlx5_os_get_ctx_device_path(void *ctx);
+uint32_t mlx5_os_get_umem_id(void *umem);
+int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
+void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
+int mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config,
+ struct mlx5_dev_ctx_shared *sh);
+int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
+int mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev);
+void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
+void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
#endif /* RTE_PMD_MLX5_H_ */