MLX5_IPOOL_TAG, /* Pool for tag resource. */
MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+ MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
+ MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
#endif
MLX5_IPOOL_MTR, /* Pool for meter resource. */
MLX5_IPOOL_MCP, /* Pool for metadata resource. */
#define CNT_SIZE (sizeof(struct mlx5_flow_counter))
#define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext))
#define AGE_SIZE (sizeof(struct mlx5_age_param))
-#define MLX5_AGING_TIME_DELAY 7
#define CNT_POOL_TYPE_EXT (1 << 0)
#define CNT_POOL_TYPE_AGE (1 << 1)
#define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT)
*/
#define POOL_IDX_INVALID UINT16_MAX
-struct mlx5_flow_counter_pool;
-
-/*age status*/
+/* Age status. */
enum {
AGE_FREE, /* Initialized state. */
AGE_CANDIDATE, /* Counter assigned to flows. */
/* Counter age parameter. */
struct mlx5_age_param {
- rte_atomic16_t state; /**< Age state. */
+ uint16_t state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
- uint32_t timeout:15; /**< Age timeout in unit of 0.1sec. */
- uint32_t expire:16; /**< Expire time(0.1sec) in the future. */
+ uint32_t timeout:24; /**< Aging timeout in seconds. */
+ uint32_t sec_since_last_hit;
+ /**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
uint64_t bytes;
};
-struct mlx5_flow_counter_pool;
/* Generic counters information. */
struct mlx5_flow_counter {
TAILQ_ENTRY(mlx5_flow_counter) next;
rte_atomic64_t a64_dcs;
};
/* The devx object of the minimum counter ID. */
+ uint64_t time_of_last_age_check;
+ /* System time (from rte_rdtsc()) read in the last aging check. */
uint32_t index:28; /* Pool index in container. */
uint32_t type:2; /* Memory type behind the counter array. */
uint32_t skip_cnt:1; /* Pool contains skipped counter. */
struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
};
-struct mlx5_counter_stats_raw;
-
/* Memory management structure for group of counter statistics raws. */
struct mlx5_counter_stats_mem_mng {
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
((age_info)->flags & (1 << (BIT)))
#define GET_PORT_AGE_INFO(priv) \
(&((priv)->sh->port[(priv)->dev_port - 1].age_info))
+/* Current time in seconds. */
+#define MLX5_CURR_TIME_SEC (rte_rdtsc() / rte_get_tsc_hz())
/* Aging information for per port. */
struct mlx5_age_info {
- uint8_t flags; /*Indicate if is new event or need be trigered*/
+ uint8_t flags; /* Indicate if is new event or need to be triggered. */
struct mlx5_counters aged_counters; /* Aged flow counter list. */
rte_spinlock_t aged_sl; /* Aged flow counter list lock. */
};
};
#define MLX5_MAX_TABLES UINT16_MAX
-#define MLX5_FLOW_TABLE_LEVEL_METER (UINT16_MAX - 3)
-#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (UINT16_MAX - 2)
#define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
/* Reserve the last two tables for metadata register copy. */
#define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
#define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
/* Tables for metering splits should be added here. */
#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3)
+#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
+#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3)
#define MLX5_MAX_TABLES_FDB UINT16_MAX
+#define MLX5_FLOW_TABLE_FACTOR 10
/* ID generation structure. */
struct mlx5_flow_id_pool {
uint32_t tick; /* Completion tick duration in nanoseconds. */
uint32_t test; /* Packet pacing test mode. */
int32_t skew; /* Scheduling skew. */
- uint32_t eqn; /* Event Queue number. */
struct rte_intr_handle intr_handle; /* Periodic interrupt. */
void *echan; /* Event Channel. */
struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
LIST_ENTRY(mlx5_dev_ctx_shared) next;
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
+ uint32_t eqn; /* Event Queue number. */
uint32_t max_port; /* Maximal IB device port index. */
void *ctx; /* Verbs/DV/DevX context. */
void *pd; /* Protection Domain. */
/* Direct Rules tables for FDB, NIC TX+RX */
void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
- uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
+ struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
struct mlx5_hlist *modify_cmds;
struct mlx5_hlist *tag_table;
uint32_t port_id_action_list; /* List of port ID actions. */
uint32_t push_vlan_action_list; /* List of push VLAN actions. */
+ uint32_t sample_action_list; /* List of sample actions. */
+ uint32_t dest_array_list; /* List of destination array actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
struct mlx5_flow_default_miss_resource default_miss;
/* Default miss action resource structure. */
#define MLX5_PROC_PRIV(port_id) \
((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
-enum mlx5_rxq_obj_type {
- MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */
- MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */
- MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN,
- /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */
-};
-
/* Verbs/DevX Rx queue elements. */
struct mlx5_rxq_obj {
LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
- enum mlx5_rxq_obj_type type;
int fd; /* File descriptor for event channel */
RTE_STD_C11
union {
uint8_t rss_key[]; /* Hash key. */
};
+/* Verbs/DevX Tx queue elements. */
+struct mlx5_txq_obj {
+ LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
+ RTE_STD_C11
+ union {
+ struct {
+ void *cq; /* Completion Queue. */
+ void *qp; /* Queue Pair. */
+ };
+ struct {
+ struct mlx5_devx_obj *sq;
+ /* DevX object for Sx queue. */
+ struct mlx5_devx_obj *tis; /* The TIS object. */
+ };
+ struct {
+ struct rte_eth_dev *dev;
+ struct mlx5_devx_obj *cq_devx;
+ void *cq_umem;
+ void *cq_buf;
+ int64_t cq_dbrec_offset;
+ struct mlx5_devx_dbr_page *cq_dbrec_page;
+ struct mlx5_devx_obj *sq_devx;
+ void *sq_umem;
+ void *sq_buf;
+ int64_t sq_dbrec_offset;
+ struct mlx5_devx_dbr_page *sq_dbrec_page;
+ };
+ };
+};
+
+enum mlx5_rxq_modify_type {
+ MLX5_RXQ_MOD_ERR2RST, /* modify state from error to reset. */
+ MLX5_RXQ_MOD_RST2RDY, /* modify state from reset to ready. */
+ MLX5_RXQ_MOD_RDY2ERR, /* modify state from ready to error. */
+ MLX5_RXQ_MOD_RDY2RST, /* modify state from ready to reset. */
+};
+
+enum mlx5_txq_modify_type {
+ MLX5_TXQ_MOD_RDY2RDY, /* modify state from ready to ready. */
+ MLX5_TXQ_MOD_RST2RDY, /* modify state from reset to ready. */
+ MLX5_TXQ_MOD_RDY2RST, /* modify state from ready to reset. */
+ MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
+};
+
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
int (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
- int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, bool is_start);
+ int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);
void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
struct mlx5_ind_table_obj *ind_tbl);
int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
int tunnel __rte_unused);
void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
- struct mlx5_hrxq *(*hrxq_drop_new)(struct rte_eth_dev *dev);
- void (*hrxq_drop_release)(struct rte_eth_dev *dev);
+ int (*drop_action_create)(struct rte_eth_dev *dev);
+ void (*drop_action_destroy)(struct rte_eth_dev *dev);
+ int (*txq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
+ int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
+ enum mlx5_txq_modify_type type, uint8_t dev_port);
+ void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
};
struct mlx5_priv {
unsigned int counter_fallback:1; /* Use counter fallback management. */
unsigned int mtr_en:1; /* Whether support meter. */
unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+ unsigned int sampler_en:1; /* Whether support sampler. */
uint16_t domain_id; /* Switch domain identifier. */
uint16_t vport_id; /* Associated VF vport index (if any). */
uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
int32_t representor_id; /* Port representor identifier. */
int32_t pf_bond; /* >=0 means PF index in bonding configuration. */
unsigned int if_index; /* Associated kernel network device index. */
+ uint32_t bond_ifindex; /**< Bond interface index. */
+ char bond_name[IF_NAMESIZE]; /**< Bond interface name. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
-void mlx5_dev_close(struct rte_eth_dev *dev);
+int mlx5_dev_close(struct rte_eth_dev *dev);
/* Macro to iterate over all valid ports for mlx5 driver. */
#define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
struct mlx5_switch_info *port_info_out);
void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg);
+int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
+ char *ifname);
int mlx5_get_module_info(struct rte_eth_dev *dev,
struct rte_eth_dev_module_info *modinfo);
int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
/* mlx5_trigger.c */
int mlx5_dev_start(struct rte_eth_dev *dev);
-void mlx5_dev_stop(struct rte_eth_dev *dev);
+int mlx5_dev_stop(struct rte_eth_dev *dev);
int mlx5_traffic_enable(struct rte_eth_dev *dev);
void mlx5_traffic_disable(struct rte_eth_dev *dev);
int mlx5_traffic_restart(struct rte_eth_dev *dev);