Implement LRO support using a single RQ object per DPDK RxQ.
Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
MLX5_FLOW_LAYER_OUTER_L4)
+/* LRO support mask, i.e. flow contains IPv4/IPv6 and TCP. */
+#define MLX5_FLOW_LAYER_IPV4_LRO \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_IPV6_LRO \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+
/* Tunnel Masks. */
#define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
uint32_t attr;
};
+#define MLX5_FLOW_IPV4_LRO (1 << 0)
+#define MLX5_FLOW_IPV6_LRO (1 << 1)
+
/**
* Initialize flow attributes structure according to flow items' types.
*
dv->hash_fields,
(*flow->queue),
flow->rss.queue_num);
- if (!hrxq)
+ if (!hrxq) {
+ int lro = 0;
+
+ if (mlx5_lro_on(dev)) {
+ if ((dev_flow->layers &
+ MLX5_FLOW_LAYER_IPV4_LRO)
+ == MLX5_FLOW_LAYER_IPV4_LRO)
+ lro = MLX5_FLOW_IPV4_LRO;
+ else if ((dev_flow->layers &
+ MLX5_FLOW_LAYER_IPV6_LRO)
+ == MLX5_FLOW_LAYER_IPV6_LRO)
+ lro = MLX5_FLOW_IPV6_LRO;
+ }
hrxq = mlx5_hrxq_new
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_FLOW_LAYER_TUNNEL), lro);
+ }
+
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
(*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_FLOW_LAYER_TUNNEL),
+ 0);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
* Number of queues.
* @param tunnel
* Tunnel type.
+ * @param lro
+ * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
*
* @return
* The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- int tunnel __rte_unused)
+ int tunnel __rte_unused, int lro)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
if (dev->data->dev_conf.lpbk_mode)
tir_attr.self_lb_block =
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (lro) {
+ tir_attr.lro_timeout_period_usecs =
+ priv->config.lro.timeout;
+ tir_attr.lro_max_msg_sz = 0xff;
+ tir_attr.lro_enable_mask = lro;
+ }
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!tir) {
DRV_LOG(ERR, "port %u cannot create DevX TIR",
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- int tunnel __rte_unused);
+ int tunnel __rte_unused, int lro);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
+ unsigned int lro_on = mlx5_lro_on(dev);
+ enum mlx5_rxq_obj_type obj_type = lro_on ? MLX5_RXQ_OBJ_TYPE_DEVX_RQ :
+ MLX5_RXQ_OBJ_TYPE_IBV;
/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
if (mlx5_mprq_alloc_mp(dev)) {
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
- rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i,
- MLX5_RXQ_OBJ_TYPE_DEVX_RQ);
+ rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
if (!rxq_ctrl->obj)
goto error;
- rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
+ if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
+ rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
+ else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
+ rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
}
return 0;
error: