X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=4f0fda0dec0bb6e9eae95d7b6776164b31f579df;hb=784b83495bc5c40c5aa91d1e4d3966ec22e59e38;hp=3a4bd98a273bbd762660084ee07dc1021a816bc1;hpb=41c2bb635724fdf118c28878ff0a4e97c2b79e63;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 3a4bd98a27..4f0fda0dec 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -126,6 +126,7 @@ struct mlx5_rxq_data { unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ unsigned int lro:1; /* Enable LRO. */ unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ + unsigned int mcqe_format:3; /* CQE compression format. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; @@ -135,6 +136,7 @@ struct mlx5_rxq_data { uint32_t rq_pi; uint32_t cq_ci; uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ + uint32_t byte_mask; union { struct rxq_zip zip; /* Compressed context. */ uint16_t decompressed; @@ -166,6 +168,7 @@ struct mlx5_rxq_data { uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */ uint64_t flow_meta_mask; int32_t flow_meta_offset; + uint32_t flow_meta_port_mask; uint32_t rxseg_n; /* Number of split segment descriptions. */ struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; /* Buffer split segment descriptions - sizes, offsets, pools. */ @@ -191,14 +194,6 @@ struct mlx5_rxq_ctrl { uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ uint32_t wqn; /* WQ number. */ uint16_t dump_file_n; /* Number of dump files. */ - struct mlx5_devx_dbr_page *rq_dbrec_page; - uint64_t rq_dbr_offset; - /* Storing RQ door-bell information, needed when freeing door-bell. */ - struct mlx5_devx_dbr_page *cq_dbrec_page; - uint64_t cq_dbr_offset; - /* Storing CQ door-bell information, needed when freeing door-bell. */ - void *wq_umem; /* WQ buffer registration info. */ - void *cq_umem; /* CQ buffer registration info. */ struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ uint32_t hairpin_status; /* Hairpin binding status. */ }; @@ -215,6 +210,7 @@ struct mlx5_txq_local { uint16_t wqe_free; /* available wqe remain. */ uint16_t mbuf_off; /* data offset in current mbuf. */ uint16_t mbuf_nseg; /* number of remaining mbuf. */ + uint16_t mbuf_free; /* number of inline mbufs to free. */ }; /* TX queue descriptor. */ @@ -248,6 +244,7 @@ struct mlx5_txq_data { uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */ uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */ uint16_t db_heu:1; /* Doorbell heuristic write barrier. */ + uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */ uint16_t inlen_send; /* Ordinary send data inline size. */ uint16_t inlen_empw; /* eMPW max packet size to inline. */ uint16_t inlen_mode; /* Minimal data length to inline. */ @@ -309,9 +306,6 @@ struct mlx5_txq_ctrl { extern uint8_t rss_hash_default_key[]; -int mlx5_check_mprq_support(struct rte_eth_dev *dev); -int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); -int mlx5_mprq_enabled(struct rte_eth_dev *dev); unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data); int mlx5_mprq_free_mp(struct rte_eth_dev *dev); int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); @@ -350,6 +344,12 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, bool standalone); +int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl); +int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl, + uint16_t *queues, const uint32_t queues_n, + bool standalone); struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, @@ -912,4 +912,74 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, return MLX5_RXQ_CODE_EXIT; } +/** + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +static __rte_always_inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +static __rte_always_inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +static __rte_always_inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t i; + uint16_t n = 0; + uint16_t n_ibv = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = container_of + (rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + continue; + n_ibv++; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + MLX5_ASSERT(n == 0 || n == n_ibv); + return n == n_ibv; +} #endif /* RTE_PMD_MLX5_RXTX_H_ */