#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
+#include "mlx5_common_os.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
mlx5_mprq_enabled(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint16_t i;
+ uint32_t i;
uint16_t n = 0;
uint16_t n_ibv = 0;
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!rte_is_power_of_2(desc)) {
- desc = 1 << log2above(desc);
+ if (!rte_is_power_of_2(*desc)) {
+ *desc = 1 << log2above(*desc);
DRV_LOG(WARNING,
"port %u increased number of descriptors in Rx queue %u"
" to the next power of two (%d)",
- dev->data->port_id, idx, desc);
+ dev->data->port_id, idx, *desc);
}
DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
- dev->data->port_id, idx, desc);
+ dev->data->port_id, idx, *desc);
if (idx >= priv->rxqs_n) {
DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
dev->data->port_id, idx, priv->rxqs_n);
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int res;
- res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int res;
- res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
if (hairpin_conf->peer_count != 1 ||
rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
{
if (rxq_ctrl->rxq.wqes) {
- rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+ mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
rxq_ctrl->rxq.wqes = NULL;
}
if (rxq_ctrl->wq_umem) {
claim_zero(mlx5_glue->destroy_comp_channel
(rxq_obj->channel));
LIST_REMOVE(rxq_obj, next);
- rte_free(rxq_obj);
+ mlx5_free(rxq_obj);
return 0;
}
return 1;
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
- intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ intr_handle->intr_vec = mlx5_malloc(0,
+ n * sizeof(intr_handle->intr_vec[0]),
+ 0, SOCKET_ID_ANY);
if (intr_handle->intr_vec == NULL) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
free:
rte_intr_free_epoll_fd(intr_handle);
if (intr_handle->intr_vec)
- free(intr_handle->intr_vec);
+ mlx5_free(intr_handle->intr_vec);
intr_handle->nb_efd = 0;
intr_handle->intr_vec = NULL;
}
/* Calculate and allocate WQ memory space. */
wqe_size = 1 << log_wqe_size; /* round up power of two.*/
wq_size = wqe_n * wqe_size;
- buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
- rxq_ctrl->socket);
+ buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
+ MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket);
if (!buf)
return NULL;
rxq_data->wqes = buf;
rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
buf, wq_size, 0);
if (!rxq_ctrl->wq_umem) {
- rte_free(buf);
+ mlx5_free(buf);
return NULL;
}
mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_create_rq_attr attr = { 0 };
struct mlx5_rxq_obj *tmpl = NULL;
- int ret = 0;
uint32_t max_wq_data;
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(!rxq_ctrl->obj);
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
if (!tmpl) {
DRV_LOG(ERR,
"port %u Rx queue %u cannot allocate verbs resources",
dev->data->port_id, rxq_data->idx);
rte_errno = ENOMEM;
- goto error;
+ return NULL;
}
tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
tmpl->rxq_ctrl = rxq_ctrl;
DRV_LOG(ERR, "total data size %u power of 2 is "
"too large for hairpin",
priv->config.log_hp_size);
+ mlx5_free(tmpl);
rte_errno = ERANGE;
return NULL;
}
DRV_LOG(ERR,
"port %u Rx hairpin queue %u can't create rq object",
dev->data->port_id, idx);
+ mlx5_free(tmpl);
rte_errno = errno;
- goto error;
+ return NULL;
}
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
-error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->rq)
- mlx5_devx_cmd_destroy(tmpl->rq);
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
}
/**
return mlx5_rxq_obj_hairpin_new(dev, idx);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq_ctrl;
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
if (!tmpl) {
DRV_LOG(ERR,
"port %u Rx queue %u cannot allocate verbs resources",
goto error;
}
DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+ dev->data->port_id, priv->sh->device_attr.max_qp_wr);
DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
- dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+ dev->data->port_id, priv->sh->device_attr.max_sge);
/* Allocate door-bell for types created with DevX. */
if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
struct mlx5_devx_dbr_page *dbr_page;
int64_t dbr_offset;
- dbr_offset = mlx5_get_dbr(dev, &dbr_page);
+ dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
+ &dbr_page);
if (dbr_offset < 0)
goto error;
rxq_ctrl->dbr_offset = dbr_offset;
- rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
rxq_ctrl->dbr_umem_id_valid = 1;
rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
(uintptr_t)rxq_ctrl->dbr_offset);
if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel
(tmpl->channel));
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = ret; /* Restore rte_errno. */
}
if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
unsigned int mprq_stride_size;
unsigned int mprq_stride_cap;
struct mlx5_dev_config *config = &priv->config;
- unsigned int strd_headroom_en;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
rte_errno = ENOSPC;
return NULL;
}
- tmpl = rte_calloc_socket("RXQ", 1,
- sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *),
- 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+ desc_n * sizeof(struct rte_mbuf *), 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- /*
- * LRO packet may consume all the stride memory, hence we cannot
- * guaranty head-room near the packet memory in the stride.
- * In this case scatter is, for sure, enabled and an empty mbuf may be
- * added in the start for the head-room.
- */
- if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
- non_scatter_min_mbuf_size > mb_len) {
- strd_headroom_en = 0;
- mprq_stride_size = RTE_MIN(max_rx_pkt_len,
- 1u << config->mprq.max_stride_size_n);
- } else {
- strd_headroom_en = 1;
- mprq_stride_size = non_scatter_min_mbuf_size;
- }
mprq_stride_nums = config->mprq.stride_num_n ?
config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- mprq_stride_size = (mprq_stride_size <=
- (1U << config->mprq.max_stride_size_n)) ?
- log2above(mprq_stride_size) : MLX5_MPRQ_STRIDE_SIZE_N;
+ mprq_stride_size = non_scatter_min_mbuf_size <=
+ (1U << config->mprq.max_stride_size_n) ?
+ log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
mprq_stride_cap = (config->mprq.stride_num_n ?
(1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
(config->mprq.stride_size_n ?
* Otherwise, enable Rx scatter if necessary.
*/
if (mprq_en && desc > (1U << mprq_stride_nums) &&
- (non_scatter_min_mbuf_size -
- (lro_on_queue ? RTE_PKTMBUF_HEADROOM : 0) <=
+ (non_scatter_min_mbuf_size <=
(1U << config->mprq.max_stride_size_n) ||
(config->mprq.stride_size_n &&
non_scatter_min_mbuf_size <= mprq_stride_cap))) {
tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
config->mprq.stride_size_n : mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
- tmpl->rxq.strd_headroom_en = strd_headroom_en;
+ tmpl->rxq.strd_scatter_en =
+ !!(offloads & DEV_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pkt_len,
tmpl->rxq.sges_n = sges_n;
max_lro_size = max_rx_pkt_len;
}
- if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+ if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
- "port %u MPRQ is requested but cannot be enabled"
- " (requested: packet size = %u, desc = %u,"
- " stride_sz = %u, stride_num = %u,"
- " supported: min_stride_sz = %u, max_stride_sz = %u).",
- dev->data->port_id, non_scatter_min_mbuf_size, desc,
+ "port %u MPRQ is requested but cannot be enabled\n"
+ " (requested: pkt_sz = %u, desc_num = %u,"
+ " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
+ " supported: min_rxqs_num = %u,"
+ " min_stride_sz = %u, max_stride_sz = %u).",
+ dev->data->port_id, non_scatter_min_mbuf_size,
+ desc, priv->rxqs_n,
config->mprq.stride_size_n ?
(1U << config->mprq.stride_size_n) :
(1U << mprq_stride_size),
config->mprq.stride_num_n ?
(1U << config->mprq.stride_num_n) :
(1U << mprq_stride_nums),
+ config->mprq.min_rxqs_num,
(1U << config->mprq.min_stride_size_n),
(1U << config->mprq.max_stride_size_n));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
#ifndef RTE_ARCH_64
- tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+ tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
- rte_free(tmpl);
+ mlx5_free(tmpl);
return NULL;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
- tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ SOCKET_ID_ANY);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
if (rxq_ctrl->dbr_umem_id_valid)
- claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+ claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+ rxq_ctrl->dbr_umem_id,
rxq_ctrl->dbr_offset));
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
- rte_free(rxq_ctrl);
+ mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
return 0;
}
struct mlx5_ind_table_obj *ind_tbl;
unsigned int i = 0, j = 0, k = 0;
- ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0);
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
log2above(queues_n) :
log2above(priv->config.ind_table_max_size));
- rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
- rqt_n * sizeof(uint32_t), 0);
+ rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
+ rqt_n * sizeof(uint32_t), 0,
+ SOCKET_ID_ANY);
if (!rqt_attr) {
DRV_LOG(ERR, "port %u cannot allocate RQT resources",
dev->data->port_id);
rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
rqt_attr);
- rte_free(rqt_attr);
+ mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "port %u cannot create DevX RQT",
dev->data->port_id);
error:
for (j = 0; j < i; j++)
mlx5_rxq_release(dev, ind_tbl->queues[j]);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
return 0;
}
return 1;
* Tunnel type.
*
* @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
*/
-struct mlx5_hrxq *
+uint32_t
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx = 0;
struct ibv_qp *qp = NULL;
struct mlx5_ind_table_obj *ind_tbl;
int err;
}
if (!ind_tbl) {
rte_errno = ENOMEM;
- return NULL;
+ return 0;
}
if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
goto error;
}
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+ hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
- LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- return hrxq;
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
+ hrxq, next);
+ return hrxq_idx;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl);
else if (tir)
claim_zero(mlx5_devx_cmd_destroy(tir));
rte_errno = err; /* Restore rte_errno. */
- return NULL;
+ return 0;
}
/**
* Number of queues.
*
* @return
- * An hash Rx queue on success.
+ * An hash Rx queue index on success.
*/
-struct mlx5_hrxq *
+uint32_t
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t idx;
queues_n = hash_fields ? queues_n : 1;
- LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+ hrxq, next) {
struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- return hrxq;
+ return idx;
}
- return NULL;
+ return 0;
}
/**
* @param dev
* Pointer to Ethernet device.
* @param hrxq
- * Pointer to Hash Rx queue to release.
+ * Index to Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ if (!hrxq)
+ return 0;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
mlx5_ind_table_obj_release(dev, hrxq->ind_table);
- LIST_REMOVE(hrxq, next);
- rte_free(hrxq);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
+ hrxq_idx, hrxq, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return 0;
}
claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
+ uint32_t idx;
int ret = 0;
- LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+ hrxq, next) {
DRV_LOG(DEBUG,
"port %u hash Rx queue %p still referenced",
dev->data->port_id, (void *)hrxq);
rte_errno = errno;
goto error;
}
- rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+ rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
if (!rxq) {
DEBUG("port %u cannot allocate drop Rx queue memory",
dev->data->port_id);
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
if (rxq->cq)
claim_zero(mlx5_glue->destroy_cq(rxq->cq));
- rte_free(rxq);
+ mlx5_free(rxq);
priv->drop_queue.rxq = NULL;
}
rte_errno = errno;
goto error;
}
- ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
+ SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
goto error;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
mlx5_rxq_obj_drop_release(dev);
- rte_free(ind_tbl);
+ mlx5_free(ind_tbl);
priv->drop_queue.hrxq->ind_table = NULL;
}
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
"port %u cannot allocate memory for drop queue",
mlx5_ind_table_obj_drop_release(dev);
if (hrxq) {
priv->drop_queue.hrxq = NULL;
- rte_free(hrxq);
+ mlx5_free(hrxq);
}
return NULL;
}
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
mlx5_ind_table_obj_drop_release(dev);
- rte_free(hrxq);
+ mlx5_free(hrxq);
priv->drop_queue.hrxq = NULL;
}
}
+
+
+/**
+ * Set the Rx queue timestamp conversion parameters
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ */
+void
+mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_rxq_data *data;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ data = (*priv->rxqs)[i];
+ data->sh = sh;
+ data->rt_timestamp = priv->config.rt_timestamp;
+ }
+}