Currently, the mlx5_flow_handle struct is not fully aligned and has some
bits wasted. The members can be optimized and reorganized to save memory.
1. As metadata and meter is sharing the same flow match id, now the flow
id is limited to 24 bits due to the 8 MSBs are used as for the meter
color. Align the flow id to other bit members to 32 bits to save the
mlx5 flow handle memory.
2. The vlan_vf in struct mlx5_flow_handle_dv was already moved to struct
mlx5_flow_handle. Remove the legacy vlan_vf in struct
mlx5_flow_handle_dv.
3. Reorganize the vlan_vf in mlx5_flow_handle with member SILIST_ENTRY
next to make it align with 8 bytes.
4. Reorganize the header modify in mlx5_flow_handle_dv to ILIST_ENTRY
next to make it align to with bytes.
5. Introduce __rte_pack attribute to make the struct tightly organized.
It will totally save 20 bytes memory for mlx5_flow_handle struct.
For the resource objects which are converted to indexed, align the names
with the prefix of rix_.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
- if (dev_handle->qrss_id)
- flow_qrss_free_id(dev, dev_handle->qrss_id);
+ if (dev_handle->split_flow_id)
+ flow_qrss_free_id(dev, dev_handle->split_flow_id);
* reallocation becomes possible (for example, for
* other flows in other threads).
*/
* reallocation becomes possible (for example, for
* other flows in other threads).
*/
- dev_flow->handle->qrss_id = qrss_id;
+ dev_flow->handle->split_flow_id = qrss_id;
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
if (ret < 0)
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
if (ret < 0)
ret = -rte_errno;
goto exit;
}
ret = -rte_errno;
goto exit;
}
- dev_flow->handle->mtr_flow_id = mtr_tag_id;
+ dev_flow->handle->split_flow_id = mtr_tag_id;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
struct mlx5_flow_handle_dv {
/* Flow DV api: */
struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
struct mlx5_flow_handle_dv {
/* Flow DV api: */
struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
- uint32_t encap_decap;
- /**< Index to encap/decap resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
- struct mlx5_vf_vlan vf_vlan;
- /**< Structure for VF VLAN workaround. */
- uint32_t push_vlan_res;
+ uint32_t rix_encap_decap;
+ /**< Index to encap/decap resource in cache. */
+ uint32_t rix_push_vlan;
/**< Index to push VLAN action resource in cache. */
/**< Index to push VLAN action resource in cache. */
/**< Index to the tag action. */
/**< Index to the tag action. */
/** Device flow handle structure: used both for creating & destroying. */
struct mlx5_flow_handle {
SILIST_ENTRY(uint32_t)next;
/** Device flow handle structure: used both for creating & destroying. */
struct mlx5_flow_handle {
SILIST_ENTRY(uint32_t)next;
+ struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
/**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *ib_flow; /**< Verbs flow pointer. */
/**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *ib_flow; /**< Verbs flow pointer. */
- struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
- union {
- uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
- uint32_t mtr_flow_id; /**< Unique meter match flow id. */
- };
+ uint32_t split_flow_id:28; /**< Sub flow unique match flow id. */
uint32_t mark:1; /**< Metadate rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
union {
uint32_t mark:1; /**< Metadate rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
union {
- uint32_t hrxq; /**< Hash Rx queue object index. */
- uint32_t jump; /**< Index to the jump action resource. */
- uint32_t port_id_action;
+ uint32_t rix_hrxq; /**< Hash Rx queue object index. */
+ uint32_t rix_jump; /**< Index to the jump action resource. */
+ uint32_t rix_port_id_action;
/**< Index to port ID action resource. */
/**< Index to port ID action resource. */
/**< Generic value indicates the fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#endif
/**< Generic value indicates the fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#endif
/*
* Size for Verbs device flow handle structure only. Do not use the DV only
/*
* Size for Verbs device flow handle structure only. Do not use the DV only
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.encap_decap = idx;
+ dev_flow->handle->dvh.rix_encap_decap = idx;
dev_flow->dv.encap_decap = cache_resource;
return 0;
}
}
/* Register new encap/decap resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
dev_flow->dv.encap_decap = cache_resource;
return 0;
}
}
/* Register new encap/decap resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &dev_flow->handle->dvh.encap_decap);
+ &dev_flow->handle->dvh.rix_encap_decap);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
- dev_flow->handle->dvh.encap_decap, cache_resource, next);
+ dev_flow->handle->dvh.rix_encap_decap, cache_resource,
+ next);
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->handle->jump = tbl_data->idx;
+ dev_flow->handle->rix_jump = tbl_data->idx;
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->port_id_action = idx;
+ dev_flow->handle->rix_port_id_action = idx;
dev_flow->dv.port_id_action = cache_resource;
return 0;
}
}
/* Register new port id action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
dev_flow->dv.port_id_action = cache_resource;
return 0;
}
}
/* Register new port id action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
- &dev_flow->handle->port_id_action);
+ &dev_flow->handle->rix_port_id_action);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
- dev_flow->handle->port_id_action, cache_resource, next);
+ dev_flow->handle->rix_port_id_action, cache_resource,
+ next);
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.push_vlan_res = idx;
+ dev_flow->handle->dvh.rix_push_vlan = idx;
dev_flow->dv.push_vlan_res = cache_resource;
return 0;
}
}
/* Register new push_vlan action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
dev_flow->dv.push_vlan_res = cache_resource;
return 0;
}
}
/* Register new push_vlan action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- &dev_flow->handle->dvh.push_vlan_res);
+ &dev_flow->handle->dvh.rix_push_vlan);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&sh->push_vlan_action_list,
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&sh->push_vlan_action_list,
- dev_flow->handle->dvh.push_vlan_res,
+ dev_flow->handle->dvh.rix_push_vlan,
cache_resource, next);
dev_flow->dv.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
cache_resource, next);
dev_flow->dv.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.tag_resource = cache_resource->idx;
+ dev_flow->handle->dvh.rix_tag = cache_resource->idx;
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
}
/* Register new resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
}
/* Register new resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
- &dev_flow->handle->dvh.tag_resource);
+ &dev_flow->handle->dvh.rix_tag);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- MLX5_ASSERT(!handle->port_id_action);
+ MLX5_ASSERT(!handle->rix_port_id_action);
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
* right now. So the pointer to the tag resource must be
* zero before the register process.
*/
* right now. So the pointer to the tag resource must be
* zero before the register process.
*/
- MLX5_ASSERT(!handle->dvh.tag_resource);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
if (flow_dv_tag_resource_register(dev, tag_be,
dev_flow, error))
return -rte_errno;
if (flow_dv_tag_resource_register(dev, tag_be,
dev_flow, error))
return -rte_errno;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- MLX5_ASSERT(!handle->dvh.tag_resource);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
if (flow_dv_tag_resource_register(dev, tag_be,
dev_flow, error))
return -rte_errno;
if (flow_dv_tag_resource_register(dev, tag_be,
dev_flow, error))
return -rte_errno;
* the special index to hrxq to mark the queue
* has been allocated.
*/
* the special index to hrxq to mark the queue
* has been allocated.
*/
+ dh->rix_hrxq = UINT32_MAX;
dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
"cannot get hash queue");
goto error;
}
"cannot get hash queue");
goto error;
}
+ dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
}
dh->ib_flow =
dv->actions[n++] = hrxq->action;
}
dh->ib_flow =
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
/* hrxq is union, don't clear it if the flag is not set. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
/* hrxq is union, don't clear it if the flag is not set. */
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
- mlx5_hrxq_release(dev, dh->hrxq);
- dh->hrxq = 0;
+ mlx5_hrxq_release(dev, dh->rix_hrxq);
+ dh->rix_hrxq = 0;
}
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
}
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.encap_decap;
+ uint32_t idx = handle->dvh.rix_encap_decap;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
if (!tbl_data)
return 0;
cache_resource = &tbl_data->jump;
if (!tbl_data)
return 0;
cache_resource = &tbl_data->jump;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->port_id_action;
+ uint32_t idx = handle->rix_port_id_action;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.push_vlan_res;
+ uint32_t idx = handle->dvh.rix_push_vlan;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
flow_dv_fate_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_handle *handle)
{
flow_dv_fate_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_handle *handle)
{
return;
if (handle->fate_action == MLX5_FLOW_FATE_DROP)
mlx5_hrxq_drop_release(dev);
else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
return;
if (handle->fate_action == MLX5_FLOW_FATE_DROP)
mlx5_hrxq_drop_release(dev);
else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
- mlx5_hrxq_release(dev, handle->hrxq);
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
flow_dv_jump_tbl_resource_release(dev, handle);
else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
flow_dv_port_id_action_resource_release(dev, handle);
else
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
flow_dv_jump_tbl_resource_release(dev, handle);
else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
flow_dv_port_id_action_resource_release(dev, handle);
else
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
- if (dev_handle->dvh.encap_decap)
+ if (dev_handle->dvh.rix_encap_decap)
flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
- if (dev_handle->dvh.push_vlan_res)
+ if (dev_handle->dvh.rix_push_vlan)
flow_dv_push_vlan_action_resource_release(dev,
dev_handle);
flow_dv_push_vlan_action_resource_release(dev,
dev_handle);
- if (dev_handle->dvh.tag_resource)
+ if (dev_handle->dvh.rix_tag)
- dev_handle->dvh.tag_resource);
+ dev_handle->dvh.rix_tag);
flow_dv_fate_resource_release(dev, dev_handle);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
flow_dv_fate_resource_release(dev, dev_handle);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
handle->ib_flow = NULL;
}
/* hrxq is union, don't touch it only the flag is set. */
handle->ib_flow = NULL;
}
/* hrxq is union, don't touch it only the flag is set. */
+ if (handle->rix_hrxq) {
if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
} else if (handle->fate_action ==
MLX5_FLOW_FATE_QUEUE) {
} else if (handle->fate_action ==
MLX5_FLOW_FATE_QUEUE) {
- mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = 0;
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
+ handle->rix_hrxq = 0;
}
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
}
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
"cannot get hash queue");
goto error;
}
"cannot get hash queue");
goto error;
}
- handle->hrxq = hrxq_idx;
+ handle->rix_hrxq = hrxq_idx;
}
MLX5_ASSERT(hrxq);
handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
}
MLX5_ASSERT(hrxq);
handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
dev_handles, handle, next) {
/* hrxq is union, don't touch it only the flag is set. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
dev_handles, handle, next) {
/* hrxq is union, don't touch it only the flag is set. */
+ if (handle->rix_hrxq) {
if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
} else if (handle->fate_action ==
MLX5_FLOW_FATE_QUEUE) {
} else if (handle->fate_action ==
MLX5_FLOW_FATE_QUEUE) {
- mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = 0;
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
+ handle->rix_hrxq = 0;
}
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
}
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)