This commit converts hrxq to indexed.
Using the uint32_t index instead of pointer saves 4 bytes memory for the
flow handle. For millions flows, it will save several MBytes of memory.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
.free = rte_free,
.type = "mlx5_jump_ipool",
},
+ {
+ .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 0,
+ .release_mem_en = 1,
+ .malloc = rte_malloc_socket,
+ .free = rte_free,
+ .type = "mlx5_hrxq_ipool",
+ },
};
close(priv->nl_socket_rdma);
if (priv->vmwa_context)
mlx5_vlan_vmwa_exit(priv->vmwa_context);
- if (priv->sh) {
- /*
- * Free the shared context in last turn, because the cleanup
- * routines above may use some shared fields, like
- * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
- * ifindex if Netlink fails.
- */
- mlx5_free_shared_ibctx(priv->sh);
- priv->sh = NULL;
- }
ret = mlx5_hrxq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
+ if (priv->sh) {
+ /*
+ * Free the shared context in last turn, because the cleanup
+ * routines above may use some shared fields, like
+ * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
+ * ifindex if Netlink fails.
+ */
+ mlx5_free_shared_ibctx(priv->sh);
+ priv->sh = NULL;
+ }
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;
MLX5_IPOOL_TAG, /* Pool for tag resource. */
MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+ MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
MLX5_IPOOL_MAX,
};
int flow_nested_idx; /* Intermediate device flow index, nested. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
- LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
+ uint32_t hrxqs; /* Verbs Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
/* Indirection tables. */
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
void *ib_flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ uint32_t hrxq; /**< Hash Rx queue object index. */
struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
union {
uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
- dh->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dh->hrxq) {
+ struct mlx5_hrxq *drop_hrxq;
+ drop_hrxq = mlx5_hrxq_drop_new(dev);
+ if (!drop_hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dh->hrxq->action;
+ dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->act_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
MLX5_ASSERT(flow->rss.queue);
- hrxq = mlx5_hrxq_get(dev, flow->rss.key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num);
- if (!hrxq) {
- hrxq = mlx5_hrxq_new
- (dev, flow->rss.key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
+ flow->rss.queue_num);
+ if (!hrxq_idx) {
+ hrxq_idx = mlx5_hrxq_new
+ (dev, flow->rss.key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
+ flow->rss.queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
}
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- dh->hrxq = hrxq;
- dv->actions[n++] = dh->hrxq->action;
+ dh->hrxq = hrxq_idx;
+ dv->actions[n++] = hrxq->action;
}
dh->ib_flow =
mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dh->hrxq);
- dh->hrxq = NULL;
+ dh->hrxq = 0;
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dh->hrxq);
- dh->hrxq = NULL;
+ dh->hrxq = 0;
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = NULL;
+ handle->hrxq = 0;
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *handle;
struct mlx5_flow *dev_flow;
+ struct mlx5_hrxq *hrxq;
int err;
int idx;
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
handle = dev_flow->handle;
if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
- handle->hrxq = mlx5_hrxq_drop_new(dev);
- if (!handle->hrxq) {
+ hrxq = mlx5_hrxq_drop_new(dev);
+ if (!hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
goto error;
}
} else {
- struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
MLX5_ASSERT(flow->rss.queue);
- hrxq = mlx5_hrxq_get(dev, flow->rss.key,
+ hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->rss.key,
+ if (!hrxq_idx)
+ hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
!!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- handle->hrxq = hrxq;
+ handle->hrxq = hrxq_idx;
}
- handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+ MLX5_ASSERT(hrxq);
+ handle->ib_flow = mlx5_glue->create_flow(hrxq->qp,
&dev_flow->verbs.attr);
if (!handle->ib_flow) {
rte_flow_error_set(error, errno,
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, handle->hrxq);
- handle->hrxq = NULL;
+ handle->hrxq = 0;
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
* Tunnel type.
*
* @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
*/
-struct mlx5_hrxq *
+uint32_t
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx = 0;
struct ibv_qp *qp = NULL;
struct mlx5_ind_table_obj *ind_tbl;
int err;
}
if (!ind_tbl) {
rte_errno = ENOMEM;
- return NULL;
+ return 0;
}
if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
goto error;
}
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+ hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
- LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- return hrxq;
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
+ hrxq, next);
+ return hrxq_idx;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl);
else if (tir)
claim_zero(mlx5_devx_cmd_destroy(tir));
rte_errno = err; /* Restore rte_errno. */
- return NULL;
+ return 0;
}
/**
* Number of queues.
*
* @return
- * An hash Rx queue on success.
+ * An hash Rx queue index on success.
*/
-struct mlx5_hrxq *
+uint32_t
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t idx;
queues_n = hash_fields ? queues_n : 1;
- LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+ hrxq, next) {
struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- return hrxq;
+ return idx;
}
- return NULL;
+ return 0;
}
/**
* @param dev
* Pointer to Ethernet device.
* @param hrxq
- * Pointer to Hash Rx queue to release.
+ * Index to Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ if (!hrxq)
+ return 0;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
mlx5_ind_table_obj_release(dev, hrxq->ind_table);
- LIST_REMOVE(hrxq, next);
- rte_free(hrxq);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
+ hrxq_idx, hrxq, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return 0;
}
claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t idx;
int ret = 0;
- LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+ hrxq, next) {
DRV_LOG(DEBUG,
"port %u hash Rx queue %p still referenced",
dev->data->port_id, (void *)hrxq);
/* Hash Rx queue. */
struct mlx5_hrxq {
- LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+ ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
RTE_STD_C11
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
-struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- int tunnel __rte_unused);
-struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n);
-int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel __rte_unused);
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
int mlx5_hrxq_verify(struct rte_eth_dev *dev);
enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);