X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=30f0af3920c96d1c56baa5e205add41d7cd6a066;hb=1fd9af05e44e;hp=271b648d4755a686041e4d76ae340022445ba926;hpb=ae18a1ae969262cc87f859497164a45285f13995;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 271b648d47..30f0af3920 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -40,7 +40,7 @@ #include "mlx5_glue.h" /* Support tunnel matching. */ -#define MLX5_FLOW_TUNNEL 8 +#define MLX5_FLOW_TUNNEL 9 struct mlx5_rxq_stats { #ifdef MLX5_PMD_SOFT_COUNTERS @@ -144,7 +144,7 @@ struct mlx5_rxq_data { struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ uint16_t idx; /* Queue index. */ struct mlx5_rxq_stats stats; - uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ + rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ void *cq_uar; /* CQ user access region. */ uint32_t cqn; /* CQ number. */ @@ -166,6 +166,7 @@ enum mlx5_rxq_obj_type { enum mlx5_rxq_type { MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */ MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ + MLX5_RXQ_TYPE_UNDEFINED, }; /* Verbs/DevX Rx queue elements. */ @@ -286,6 +287,8 @@ struct mlx5_txq_data { /* When set TX offload for tunneled packets are supported. */ uint16_t swp_en:1; /* Whether SW parser is enabled. */ uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */ + uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */ + uint16_t db_heu:1; /* Doorbell heuristic write barrier. */ uint16_t inlen_send; /* Ordinary send data inline size. */ uint16_t inlen_empw; /* eMPW max packet size to inline. */ uint16_t inlen_mode; /* Minimal data length to inline. */ @@ -406,6 +409,7 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n); int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); int mlx5_hrxq_verify(struct rte_eth_dev *dev); +enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); uint64_t mlx5_get_rx_port_offloads(void); @@ -436,6 +440,7 @@ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl); uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ @@ -447,9 +452,6 @@ extern uint8_t mlx5_swp_types_table[]; void mlx5_set_ptype_table(void); void mlx5_set_cksum_table(void); void mlx5_set_swp_types_table(void); -__rte_noinline int mlx5_tx_error_cqe_handle - (struct mlx5_txq_data *restrict txq, - volatile struct mlx5_err_cqe *err_cqe); uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);