#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
#include "mlx5_devx.h"
+#include "rte_pmd_mlx5.h"
/* Default RSS hash key also used for ConnectX-3. */
mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
RTE_ETH_RX_OFFLOAD_TIMESTAMP |
RTE_ETH_RX_OFFLOAD_RSS_HASH);
- if (!config->mprq.enabled)
+ if (!priv->config.mprq.enabled)
offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
- if (config->hw_fcs_strip)
+ if (priv->sh->config.hw_fcs_strip)
offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
- if (config->hw_csum)
+ if (priv->sh->dev_cap.hw_csum)
offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
- if (config->hw_vlan_strip)
+ if (priv->sh->dev_cap.hw_vlan_strip)
offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
- if (MLX5_LRO_SUPPORTED(dev))
+ if (priv->sh->dev_cap.lro_supported)
offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
}
if (conf->share_group > 0) {
- if (!priv->config.hca_attr.mem_rq_rmp) {
+ if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
dev->data->port_id, idx);
rte_errno = EINVAL;
rte_errno = ENOMEM;
return -rte_errno;
}
- rxq->priv = priv;
- rxq->idx = idx;
- (*priv->rxq_privs)[idx] = rxq;
- if (rxq_ctrl != NULL) {
- /* Join owner list. */
- LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
- rxq->ctrl = rxq_ctrl;
- } else {
- rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+ if (rxq_ctrl == NULL) {
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
n_seg);
if (rxq_ctrl == NULL) {
DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
dev->data->port_id, idx);
mlx5_free(rxq);
- (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
}
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ /* Join owner list. */
+ LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+ rxq->ctrl = rxq_ctrl;
mlx5_rxq_ref(dev, idx);
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
struct mlx5_rxq_data *rxq;
- if (rxq_ctrl == NULL ||
- rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
continue;
rxq = &rxq_ctrl->rxq;
n_ibv++;
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq_ctrl == NULL ||
- rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
continue;
rxq_ctrl->rxq.mprq_mp = mp;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+ if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
MLX5_MAX_TCP_HDR_OFFSET)
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
uint32_t *actual_log_stride_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
- uint32_t log_min_stride_num = config->mprq.log_min_stride_num;
- uint32_t log_max_stride_num = config->mprq.log_max_stride_num;
+ struct mlx5_port_config *config = &priv->config;
+ struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
+ uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
+ uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
uint32_t log_def_stride_num =
RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
log_min_stride_num),
log_max_stride_num);
- uint32_t log_min_stride_size = config->mprq.log_min_stride_size;
- uint32_t log_max_stride_size = config->mprq.log_max_stride_size;
+ uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
+ uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
uint32_t log_def_stride_size =
RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
log_min_stride_size),
}
log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
/* Check if WQE buffer size is supported by hardware. */
- if (log_stride_wqe_size < config->mprq.log_min_stride_wqe_size) {
+ if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
*actual_log_stride_num = log_def_stride_num;
*actual_log_stride_size = log_def_stride_size;
DRV_LOG(WARNING,
RTE_BIT32(log_def_stride_size));
log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
}
- MLX5_ASSERT(log_stride_wqe_size >= config->mprq.log_min_stride_wqe_size);
+ MLX5_ASSERT(log_stride_wqe_size >=
+ dev_cap->mprq.log_min_stride_wqe_size);
if (desc <= RTE_BIT32(*actual_log_stride_num))
goto unsupport;
if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
RTE_BIT32(config->mprq.log_stride_size),
RTE_BIT32(config->mprq.log_stride_num),
config->mprq.min_rxqs_num,
- RTE_BIT32(config->mprq.log_min_stride_wqe_size),
- RTE_BIT32(config->mprq.log_min_stride_size),
- RTE_BIT32(config->mprq.log_max_stride_size),
+ RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
+ RTE_BIT32(dev_cap->mprq.log_min_stride_size),
+ RTE_BIT32(dev_cap->mprq.log_max_stride_size),
rx_seg_en ? "" : "not ");
return -1;
}
*
* @param dev
* Pointer to Ethernet device.
- * @param rxq
- * RX queue private data.
+ * @param idx
+ * RX queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
- uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
- uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
return NULL;
}
LIST_INIT(&tmpl->owners);
- if (conf->share_group > 0) {
- tmpl->rxq.shared = 1;
- tmpl->share_group = conf->share_group;
- tmpl->share_qid = conf->share_qid;
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
- }
- rxq->ctrl = tmpl;
- LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
/*
* Save the original segment configuration in the shared queue
rte_errno = ENOSPC;
goto error;
}
- tmpl->type = MLX5_RXQ_TYPE_STANDARD;
+ tmpl->is_hairpin = false;
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
&priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
tmpl->rxq.crc_present = 0;
tmpl->rxq.lro = lro_on_queue;
if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
- if (config->hw_fcs_strip) {
+ if (priv->sh->config.hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
* configured to scatter the FCS.
tmpl->rxq.mprq_bufs =
(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
tmpl->rxq.idx = idx;
+ if (conf->share_group > 0) {
+ tmpl->rxq.shared = 1;
+ tmpl->share_group = conf->share_group;
+ tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ }
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
LIST_INIT(&tmpl->owners);
rxq->ctrl = tmpl;
LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
- tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
+ tmpl->is_hairpin = true;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (!rxq_ctrl->is_hairpin) {
if (!rxq_ctrl->started)
rxq_free_elts(rxq_ctrl);
dev->data->rx_queue_state[idx] =
} else { /* Refcnt zero, closing device. */
LIST_REMOVE(rxq, owner_entry);
if (LIST_EMPTY(&rxq_ctrl->owners)) {
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (!rxq_ctrl->is_hairpin)
mlx5_mr_btree_free
(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
if (rxq_ctrl->rxq.shared)
}
/**
- * Get a Rx queue type.
+ * Check whether RxQ type is Hairpin.
*
* @param dev
* Pointer to Ethernet device.
* Rx queue index.
*
* @return
- * The Rx queue type.
+ * True if Rx queue type is Hairpin, otherwise False.
*/
-enum mlx5_rxq_type
-mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
+bool
+mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
- if (idx < priv->rxqs_n && rxq_ctrl != NULL)
- return rxq_ctrl->type;
- return MLX5_RXQ_TYPE_UNDEFINED;
+ return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
}
/*
const struct rte_eth_hairpin_conf *
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && rxq != NULL) {
- if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq->hairpin_conf;
- }
- return NULL;
+ return mlx5_rxq_is_hairpin(dev, idx) ? &rxq->hairpin_conf : NULL;
}
/**
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
- * @param standalone
- * Indirection table for Standalone queue.
* @param deref_rxqs
* If true, then dereference RX queues related to indirection table.
* Otherwise, no additional action will be taken.
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone,
bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- if (!ret && !standalone)
+ if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
if (ret)
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
if (ref_qs)
for (i = 0; i != queues_n; ++i) {
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_obj *
+struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_ind_table_obj *ind_tbl;
int ret;
+ /*
+ * Allocate maximum queues for shared action as queue number
+ * maybe modified later.
+ */
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
+ (standalone ? priv->rxqs_n : queues_n) *
+ sizeof(uint16_t), 0, SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
mlx5_free(ind_tbl);
return NULL;
}
- if (!standalone) {
- rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ if (!standalone)
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- rte_rwlock_write_unlock(&priv->ind_tbls_lock);
- }
+ else
+ LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+
return ind_tbl;
}
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
MLX5_ASSERT(standalone);
RTE_SET_USED(standalone);
struct mlx5_priv *priv = dev->data->dev_private;
const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
log2above(ind_tbl->queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
unsigned int i;
int ret;
return (hrxq->rss_key_len != rss_desc->key_len ||
memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hws_flags != rss_desc->hws_flags ||
hrxq->hash_fields != rss_desc->hash_fields ||
hrxq->ind_table->queues_n != rss_desc->queue_num ||
memcmp(hrxq->ind_table->queues, rss_desc->queue,
}
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
- true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
}
rte_errno = err;
return -rte_errno;
struct mlx5_priv *priv = dev->data->dev_private;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
+ if (hrxq->hws_flags)
+ mlx5dr_action_destroy(hrxq->action);
+ else
+ mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ hrxq->hws_flags ?
+ (!!dev->data->dev_started) : true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
int ret;
queues_n = rss_desc->hash_fields ? queues_n : 1;
- if (!ind_tbl)
+ if (!ind_tbl && !rss_desc->hws_flags)
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- standalone,
+ standalone ||
+ rss_desc->hws_flags,
!!dev->data->dev_started);
if (!ind_tbl)
return NULL;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = rss_desc->hash_fields;
+ hrxq->hws_flags = rss_desc->hws_flags;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
if (ret < 0)
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
* RSS configuration for the Rx hash queue.
*
* @return
- * An hash Rx queue index on success.
+ * An hash Rx queue on success.
*/
-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
+ struct mlx5_hrxq *hrxq = NULL;
struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
} else {
entry = mlx5_list_register(priv->hrxqs, &ctx);
if (!entry)
- return 0;
+ return NULL;
hrxq = container_of(entry, typeof(*hrxq), entry);
}
- if (hrxq)
- return hrxq->idx;
- return 0;
+ return hrxq;
}
/**
* @param dev
* Pointer to Ethernet device.
* @param hrxq_idx
- * Index to Hash Rx queue to release.
+ * Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
-int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (!hrxq)
return 0;
if (!hrxq->standalone)
return 0;
}
+/**
+ * Release the hash Rx queue with index.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ return mlx5_hrxq_obj_release(dev, hrxq);
+}
+
/**
* Create a drop Rx Hash queue.
*
if (data == NULL)
continue;
data->sh = sh;
- data->rt_timestamp = priv->config.rt_timestamp;
+ data->rt_timestamp = sh->dev_cap.rt_timestamp;
+ }
+}
+
+/**
+ * Validate given external RxQ rte_plow index, and get pointer to concurrent
+ * external RxQ object to map/unmap.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] dpdk_idx
+ * Queue index in rte_flow.
+ *
+ * @return
+ * Pointer to concurrent external RxQ on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_external_rxq *
+mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (dpdk_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
+ DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
+ dpdk_idx, MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) {
+ DRV_LOG(ERR, "Port %u "
+ "external RxQ isn't supported on local PD and CTX.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ if (!mlx5_devx_obj_ops_en(priv->sh)) {
+ DRV_LOG(ERR,
+ "Port %u external RxQ isn't supported by Verbs API.",
+ port_id);
+ rte_errno = ENOTSUP;
+ return NULL;
}
+ /*
+ * When user configures remote PD and CTX and device creates RxQ by
+ * DevX, external RxQs array is allocated.
+ */
+ MLX5_ASSERT(priv->ext_rxqs != NULL);
+ return &priv->ext_rxqs[dpdk_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+}
+
+int
+rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
+ uint32_t hw_idx)
+{
+ struct mlx5_external_rxq *ext_rxq;
+ uint32_t unmapped = 0;
+
+ ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_rxq == NULL)
+ return -rte_errno;
+ if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (ext_rxq->hw_id != hw_idx) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u "
+ "is already mapped to HW index (requesting is "
+ "%u, existing is %u).",
+ port_id, dpdk_idx, hw_idx, ext_rxq->hw_id);
+ rte_errno = EEXIST;
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %u external RxQ index %u "
+ "is already mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+
+ } else {
+ ext_rxq->hw_id = hw_idx;
+ DRV_LOG(DEBUG, "Port %u external RxQ index %u "
+ "is successfully mapped to the requested HW index (%u)",
+ port_id, dpdk_idx, hw_idx);
+ }
+ return 0;
+}
+
+int
+rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
+{
+ struct mlx5_external_rxq *ext_rxq;
+ uint32_t mapped = 1;
+
+ ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
+ if (ext_rxq == NULL)
+ return -rte_errno;
+ if (ext_rxq->refcnt > 1) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u still referenced.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
+ port_id, dpdk_idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u external RxQ index %u is successfully unmapped.",
+ port_id, dpdk_idx);
+ return 0;
}