X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=1e9345af61dfcc9c8d2866509f2e39908e63373f;hb=d0b3ef1a6e3804c76f0d35b16946fc25d54443ab;hp=3a4bd98a273bbd762660084ee07dc1021a816bc1;hpb=41c2bb635724fdf118c28878ff0a4e97c2b79e63;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 3a4bd98a27..1e9345af61 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -126,6 +126,7 @@ struct mlx5_rxq_data { unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ unsigned int lro:1; /* Enable LRO. */ unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ + unsigned int mcqe_format:3; /* Dynamic metadata is configured. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; @@ -135,6 +136,7 @@ struct mlx5_rxq_data { uint32_t rq_pi; uint32_t cq_ci; uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ + uint32_t byte_mask; union { struct rxq_zip zip; /* Compressed context. */ uint16_t decompressed; @@ -309,9 +311,6 @@ struct mlx5_txq_ctrl { extern uint8_t rss_hash_default_key[]; -int mlx5_check_mprq_support(struct rte_eth_dev *dev); -int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); -int mlx5_mprq_enabled(struct rte_eth_dev *dev); unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); int mlx5_mprq_free_mp(struct rte_eth_dev *dev); int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); @@ -350,6 +349,12 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, bool standalone); +int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl); +int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl, + uint16_t *queues, const uint32_t queues_n, + bool standalone); struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, @@ -912,4 +917,74 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, return MLX5_RXQ_CODE_EXIT; } +/** + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +static __rte_always_inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +static __rte_always_inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +static __rte_always_inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t i; + uint16_t n = 0; + uint16_t n_ibv = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = container_of + (rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + continue; + n_ibv++; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + MLX5_ASSERT(n == 0 || n == n_ibv); + return n == n_ibv; +} #endif /* RTE_PMD_MLX5_RXTX_H_ */