MLX5_FLOW_TYPE_MAXI,
};
+/* The mode of delay drop for Rx queues. */
+enum mlx5_delay_drop_mode {
+ MLX5_DELAY_DROP_NONE = 0, /* All disabled. */
+ MLX5_DELAY_DROP_STANDARD = RTE_BIT32(0), /* Standard queues enable. */
+ MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
+};
+
/* Hlist and list callback context. */
struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
unsigned int allow_duplicate_pattern:1;
/* Allow/Prevent the duplicate rules pattern. */
+ unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
+ unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
- unsigned int stride_num_n; /* Number of strides. */
- unsigned int stride_size_n; /* Size of a stride. */
- unsigned int min_stride_size_n; /* Min size of a stride. */
- unsigned int max_stride_size_n; /* Max size of a stride. */
+ unsigned int log_stride_num; /* Log number of strides. */
+ unsigned int log_stride_size; /* Log size of a stride. */
+ unsigned int log_min_stride_size; /* Log min size of a stride.*/
+ unsigned int log_max_stride_size; /* Log max size of a stride.*/
unsigned int max_memcpy_len;
/* Maximum packet size to memcpy Rx packets. */
unsigned int min_rxqs_num;
struct mlx5_counter_stats_mem_mng {
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
struct mlx5_counter_stats_raw *raws;
- struct mlx5_devx_obj *dm;
- void *umem;
+ struct mlx5_pmd_wrapped_mr wm;
};
/* Raw memory structure for the counter statistics values of a pool. */
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering_read;
- bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
rte_spinlock_t sqsl;
struct mlx5_aso_cq cq;
struct mlx5_devx_sq sq_obj;
- volatile uint64_t *uar_addr;
struct mlx5_pmd_mr mr;
uint16_t pi;
uint32_t head;
#define MLX5_MTR_TABLE_ID_DROP 2
/* Priority of the meter policy matcher. */
#define MLX5_MTR_POLICY_MATCHER_PRIO 0
+/* Green & yellow color valid for now. */
+#define MLX5_MTR_POLICY_MODE_ALL 0
/* Default policy. */
#define MLX5_MTR_POLICY_MODE_DEF 1
/* Only green color valid. */
uint32_t base_index;
/**< The next index that can be used without any free elements. */
uint32_t *curr; /**< Pointer to the index to pop. */
- uint32_t *last; /**< Pointer to the last element in the empty arrray. */
+ uint32_t *last; /**< Pointer to the last element in the empty array. */
uint32_t max_id; /**< Maximum id can be allocated from the pool. */
};
void *pp; /* Packet pacing context. */
uint16_t pp_id; /* Packet pacing context index. */
uint16_t ts_n; /* Number of captured timestamps. */
- uint16_t ts_p; /* Pointer to statisticks timestamp. */
+ uint16_t ts_p; /* Pointer to statistics timestamp. */
struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
};
-/* Pattern field dscriptor - how to translate flex pattern into samples. */
+/* Pattern field descriptor - how to translate flex pattern into samples. */
__extension__
struct mlx5_flex_pattern_field {
uint16_t width:6;
/* Shared DV/DR flow data section. */
uint32_t dv_meta_mask; /* flow META metadata supported mask. */
uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
- uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
+ uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
void *fdb_domain; /* FDB Direct Rules name space handle. */
void *rx_domain; /* RX Direct Rules name space handle. */
void *tx_domain; /* TX Direct Rules name space handle. */
#ifndef RTE_ARCH_64
- rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
+ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR. */
rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
/* UAR same-page access control required in 32bit implementations. */
#endif
struct mlx5_devx_obj *tis[16]; /* TIS object. */
struct mlx5_devx_obj *td; /* Transport domain. */
struct mlx5_lag lag; /* LAG attributes */
- void *tx_uar; /* Tx/packet pacing shared UAR. */
+ struct mlx5_uar tx_uar; /* DevX UAR for Tx and Txpp and ASO SQs. */
+ struct mlx5_uar rx_uar; /* DevX UAR for Rx. */
+ struct mlx5_proc_priv *pppriv; /* Pointer to primary private process. */
struct mlx5_ecpri_parser_profile ecpri_parser;
/* Flex parser profiles information. */
- void *devx_rx_uar; /* DevX UAR for Rx. */
LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
struct mlx5_aso_age_mng *aso_age_mng;
/* Management data for aging mechanism using ASO Flow Hit. */
struct mlx5_proc_priv {
size_t uar_table_sz;
/* Size of UAR register table. */
- void *uar_table[];
+ struct mlx5_uar_data uar_table[];
/* Table of UAR registers for each process. */
};
int mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
struct mlx5_dev_config *config,
struct rte_device *dpdk_dev);
-int mlx5_dev_configure(struct rte_eth_dev *dev);
int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
void mlx5_os_stats_init(struct rte_eth_dev *dev);
+int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev);
/* mlx5_mac.c */
void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
/* mlx5_os.c */
+
struct rte_pci_driver;
int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
struct mlx5_dev_attr *dev_attr);