rte_atomic32_t refcnt; /**< Reference counter. */
};
-#define MLX5_MAX_TABLES 1024
-#define MLX5_MAX_TABLES_FDB 32
-#define MLX5_GROUP_FACTOR 1
+#define MLX5_MAX_TABLES UINT16_MAX
+#define MLX5_MAX_TABLES_FDB UINT16_MAX
#define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */
#define MLX5_DBR_SIZE 8
char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ibv_device_attr_ex device_attr; /* Device properties. */
- struct rte_pci_device *pci_dev; /* Backend PCI device. */
LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
/**< Called by memory event callback. */
struct {
/* RX Direct Rules tables. */
void *tx_domain; /* TX Direct Rules name space handle. */
struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
+ /* TX Direct Rules tables. */
void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
+ void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
/* TX Direct Rules tables/ */
LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
LIST_HEAD(jump, mlx5_flow_dv_jump_tbl_resource) jump_tbl;
LIST_HEAD(port_id_action_list, mlx5_flow_dv_port_id_action_resource)
port_id_action_list; /* List of port ID actions. */
+ LIST_HEAD(push_vlan_action_list, mlx5_flow_dv_push_vlan_action_resource)
+ push_vlan_action_list; /* List of push VLAN actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
/* Shared interrupt handler section. */
pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_ibv_shared *sh; /* Shared IB device context. */
uint32_t ibv_port; /* IB device port number. */
+ struct rte_pci_device *pci_dev; /* Backend PCI device. */
struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */
unsigned int counter_fallback:1; /* Use counter fallback management. */
uint16_t domain_id; /* Switch domain identifier. */
uint16_t vport_id; /* Associated VF vport index (if any). */
+ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
+ uint32_t vport_meta_mask; /* Used for vport index field match mask. */
int32_t representor_id; /* Port representor identifier. */
unsigned int if_index; /* Associated kernel network device index. */
/* RX/TX queues. */
struct mlx5_devx_dbr_page **dbr_page);
int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id,
uint64_t offset);
+int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
/* mlx5_ethdev.c */
int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
unsigned int flags);
int mlx5_dev_configure(struct rte_eth_dev *dev);
-void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
+int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
struct rte_eth_fc_conf *fc_conf);
int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
- struct rte_pci_addr *pci_addr);
+int mlx5_dev_to_pci_addr(const char *dev_path,
+ struct rte_pci_addr *pci_addr);
void mlx5_dev_link_status_handler(void *arg);
void mlx5_dev_interrupt_handler(void *arg);
void mlx5_dev_interrupt_handler_devx(void *arg);
struct mlx5_switch_info *port_info_out);
void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg);
+int mlx5_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
/* mlx5_mac.c */
/* mlx5_rxmode.c */
-void mlx5_promiscuous_enable(struct rte_eth_dev *dev);
-void mlx5_promiscuous_disable(struct rte_eth_dev *dev);
-void mlx5_allmulticast_enable(struct rte_eth_dev *dev);
-void mlx5_allmulticast_disable(struct rte_eth_dev *dev);
+int mlx5_promiscuous_enable(struct rte_eth_dev *dev);
+int mlx5_promiscuous_disable(struct rte_eth_dev *dev);
+int mlx5_allmulticast_enable(struct rte_eth_dev *dev);
+int mlx5_allmulticast_disable(struct rte_eth_dev *dev);
/* mlx5_stats.c */
void mlx5_stats_init(struct rte_eth_dev *dev);
int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
-void mlx5_stats_reset(struct rte_eth_dev *dev);
+int mlx5_stats_reset(struct rte_eth_dev *dev);
int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
unsigned int n);
-void mlx5_xstats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_reset(struct rte_eth_dev *dev);
int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names,
unsigned int n);
int mlx5_ctrl_flow(struct rte_eth_dev *dev,
struct rte_flow_item_eth *eth_spec,
struct rte_flow_item_eth *eth_mask);
+struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,