PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c,
};
/** Switch information returned by mlx5_nl_switch_info(). */
struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
};
+struct mlx5_stats_ctrl {
+ /* Base for imissed counter. */
+ uint64_t imissed_base;
+};
+
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
unsigned int tunnel_en:1;
/* Whether tunnel stateless offloads are supported. */
unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
- unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
+ unsigned int cqe_pad:1; /* CQE padding is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
int txq_inline; /* Maximum packet size for inlining. */
int txqs_inline; /* Queue number threshold for inlining. */
+ int txqs_vec; /* Queue number threshold for vectorized Tx. */
int inline_max_packet_sz; /* Max packet size for inlining. */
};
struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
};
-struct mnl_socket;
+struct mlx5_flow_tcf_context;
struct priv {
LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
/* Verbs Indirection tables. */
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
+ LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
+ LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
+ struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
int primary_socket; /* Unix socket for primary process. */
void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
/* UAR same-page access control required in 32bit implementations. */
#endif
- struct mnl_socket *mnl_socket; /* Libmnl socket. */
+ struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
/* mlx5_stats.c */
-void mlx5_xstats_init(struct rte_eth_dev *dev);
+void mlx5_stats_init(struct rte_eth_dev *dev);
int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx5_stats_reset(struct rte_eth_dev *dev);
int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,