#include "mlx5.h"
#include "mlx5_common_os.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_tx.h"
+#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_devx.h"
#include "mlx5_flow.h"
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
- uint8_t dev_port)
+int
+mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
+ uint8_t dev_port)
{
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
int ret;
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array.
+ * @param rxq_data
+ * RX queue data.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
+ struct mlx5_rxq_data *rxq_data)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_common_device *cdev = priv->sh->cdev;
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_create_rq_attr rq_attr = { 0 };
rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
+ rq_attr.ts_format =
+ mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
/* Fill WQ attributes for this RQ. */
if (mlx5_rxq_mprq_enabled(rxq_data)) {
rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
- rq_attr.wq_attr.pd = priv->sh->pdn;
+ rq_attr.wq_attr.pd = cdev->pdn;
+ rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
- return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
- wqe_size, log_desc_n, &rq_attr,
- rxq_ctrl->socket);
+ return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
+ log_desc_n, &rq_attr, rxq_ctrl->socket);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array.
+ * @param rxq_data
+ * RX queue data.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
+ struct mlx5_rxq_data *rxq_data)
{
struct mlx5_devx_cq *cq_obj = 0;
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
log_cqe_n = log2above(cqe_n);
/* Create CQ using DevX API. */
- ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
- &cq_attr, sh->numa_node);
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
+ log_cqe_n, &cq_attr, sh->numa_node);
if (ret)
return ret;
cq_obj = &rxq_ctrl->obj->cq_obj;
attr.wq_attr.log_hairpin_num_packets =
attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
- tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+ attr.counter_set_id = priv->counter_set_id;
+ tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
rxq_ctrl->socket);
if (!tmpl->rq) {
DRV_LOG(ERR,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
tmpl->devx_channel = mlx5_os_devx_create_event_channel
- (priv->sh->ctx,
- devx_ev_flag);
+ (priv->sh->cdev->ctx,
+ devx_ev_flag);
if (!tmpl->devx_channel) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create event channel %d.",
tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
}
/* Create CQ using DevX API. */
- ret = mlx5_rxq_create_devx_cq_resources(dev, idx);
+ ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
if (ret) {
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
/* Create RQ using DevX API. */
- ret = mlx5_rxq_create_devx_rq_resources(dev, idx);
+ ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
if (ret) {
DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
dev->data->port_id, idx);
* Pointer to Ethernet device.
* @param log_n
* Log of number of queues in the array.
+ * @param queues
+ * List of RX queue indices or NULL, in which case
+ * the attribute will be filled by drop queue ID.
+ * @param queues_n
+ * Size of @p queues array or 0 if it is NULL.
* @param ind_tbl
* DevX indirection table object.
*
}
rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
rqt_attr->rqt_actual_size = rqt_n;
+ if (queues == NULL) {
+ for (i = 0; i < rqt_n; i++)
+ rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;
+ return rqt_attr;
+ }
for (i = 0; i != queues_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
struct mlx5_rxq_ctrl *rxq_ctrl =
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+ const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues :
+ NULL;
MLX5_ASSERT(ind_tbl);
- rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
- ind_tbl->queues,
- ind_tbl->queues_n);
+ rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues,
+ ind_tbl->queues_n);
if (!rqt_attr)
return -rte_errno;
- ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
+ ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
* @param[in] hash_fields
* Verbs protocol hash field to make the RSS on.
* @param[in] ind_tbl
- * Indirection table for TIR.
+ * Indirection table for TIR. If table queues array is NULL,
+ * a TIR for drop queue is assumed.
* @param[in] tunnel
* Tunnel type.
* @param[out] tir_attr
int tunnel, struct mlx5_devx_tir_attr *tir_attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
+ enum mlx5_rxq_type rxq_obj_type;
bool lro = true;
uint32_t i;
- /* Enable TIR LRO only if all the queues were configured for. */
- for (i = 0; i < ind_tbl->queues_n; ++i) {
- if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
- lro = false;
- break;
+ /* NULL queues designate drop queue. */
+ if (ind_tbl->queues != NULL) {
+ struct mlx5_rxq_data *rxq_data =
+ (*priv->rxqs)[ind_tbl->queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ rxq_obj_type = rxq_ctrl->type;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < ind_tbl->queues_n; ++i) {
+ if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
+ lro = false;
+ break;
+ }
}
+ } else {
+ rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;
}
memset(tir_attr, 0, sizeof(*tir_attr));
tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
hrxq->ind_table, tunnel, &tir_attr);
- hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+ hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
if (!hrxq->tir) {
DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
dev->data->port_id);
}
/**
- * Create a DevX drop action for Rx Hash queue.
+ * Create a DevX drop Rx queue.
*
* @param dev
* Pointer to Ethernet device.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
+mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
{
- (void)dev;
- DRV_LOG(ERR, "DevX drop action is not supported yet.");
- rte_errno = ENOTSUP;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int socket_id = dev->device->numa_node;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_obj *rxq = NULL;
+ int ret;
+
+ /*
+ * Initialize dummy control structures.
+ * They are required to hold pointers for cleanup
+ * and are only accessible via drop queue DevX objects.
+ */
+ rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
+ 0, socket_id);
+ if (rxq_ctrl == NULL) {
+ DRV_LOG(ERR, "Port %u could not allocate drop queue control",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
+ if (rxq == NULL) {
+ DRV_LOG(ERR, "Port %u could not allocate drop queue object",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq->rxq_ctrl = rxq_ctrl;
+ rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
+ rxq_ctrl->priv = priv;
+ rxq_ctrl->obj = rxq;
+ rxq_data = &rxq_ctrl->rxq;
+ /* Create CQ using DevX API. */
+ ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
+ dev->data->port_id);
+ goto error;
+ }
+ /* Create RQ using DevX API. */
+ ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
+ if (ret != 0)
+ goto error;
+ /* Initialize drop queue. */
+ priv->drop_queue.rxq = rxq;
+ return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (rxq != NULL) {
+ if (rxq->rq_obj.rq != NULL)
+ mlx5_devx_rq_destroy(&rxq->rq_obj);
+ if (rxq->cq_obj.cq != NULL)
+ mlx5_devx_cq_destroy(&rxq->cq_obj);
+ if (rxq->devx_channel)
+ mlx5_os_devx_destroy_event_channel
+ (rxq->devx_channel);
+ mlx5_free(rxq);
+ }
+ if (rxq_ctrl != NULL)
+ mlx5_free(rxq_ctrl);
+ rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
+/**
+ * Release drop Rx queue resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;
+
+ mlx5_rxq_devx_obj_release(rxq);
+ mlx5_free(rxq);
+ mlx5_free(rxq_ctrl);
+ priv->drop_queue.rxq = NULL;
+}
+
/**
* Release a drop hash Rx queue.
*
static void
mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
{
- (void)dev;
- DRV_LOG(ERR, "DevX drop action is not supported yet.");
- rte_errno = ENOTSUP;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+ if (hrxq->tir != NULL)
+ mlx5_devx_tir_destroy(hrxq);
+ if (hrxq->ind_table->ind_table != NULL)
+ mlx5_devx_ind_table_destroy(hrxq->ind_table);
+ if (priv->drop_queue.rxq->rq != NULL)
+ mlx5_rxq_devx_obj_drop_release(dev);
+}
+
+/**
+ * Create a DevX drop action for Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+ int ret;
+
+ ret = mlx5_rxq_devx_obj_drop_create(dev);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop RX queue");
+ return ret;
+ }
+ /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
+ ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
+ goto error;
+ }
+ ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Cannot create drop hash RX queue");
+ goto error;
+ }
+ return 0;
+error:
+ mlx5_devx_drop_action_destroy(dev);
+ return ret;
+}
+
+/**
+ * Select TXQ TIS number.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue_idx
+ * Queue index in DPDK Tx queue array.
+ *
+ * @return
+ * > 0 on success, a negative errno value otherwise.
+ */
+static uint32_t
+mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int tis_idx;
+
+ if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
+ MLX5_LAG_MODE_TIS) {
+ tis_idx = (priv->lag_affinity_idx + queue_idx) %
+ priv->sh->bond.n_port;
+ DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
+ dev->data->port_id, queue_idx, tis_idx + 1,
+ priv->sh->lag.tx_remap_affinity[tis_idx]);
+ } else {
+ tis_idx = 0;
+ }
+ MLX5_ASSERT(priv->sh->tis[tis_idx]);
+ return priv->sh->tis[tis_idx]->id;
}
/**
attr.wq_attr.log_hairpin_num_packets =
attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
- attr.tis_num = priv->sh->tis->id;
- tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
+
+ attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
+ tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
if (!tmpl->sq) {
DRV_LOG(ERR,
"Port %u tx hairpin queue %u can't create SQ object.",
uint16_t log_desc_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_common_device *cdev = priv->sh->cdev;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
.allow_swp = !!priv->config.swp,
.cqn = txq_obj->cq_obj.cq->id,
.tis_lst_sz = 1,
- .tis_num = priv->sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = priv->sh->pdn,
+ .pd = cdev->pdn,
.uar_page =
mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
},
+ .ts_format =
+ mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
+ .tis_num = mlx5_get_txq_tis_num(dev, idx),
};
/* Create Send Queue object with DevX. */
- return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
- &sq_attr, priv->sh->numa_node);
+ return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
+ log_desc_n, &sq_attr, priv->sh->numa_node);
}
#endif
};
void *reg_addr;
uint32_t cqe_n, log_desc_n;
- uint32_t wqe_n;
+ uint32_t wqe_n, wqe_size;
int ret = 0;
MLX5_ASSERT(txq_data);
return 0;
}
/* Create completion queue object with DevX. */
- ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
&cq_attr, priv->sh->numa_node);
if (ret) {
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
txq_data->cq_pi = 0;
txq_data->cq_db = txq_obj->cq_obj.db_rec;
*txq_data->cq_db = 0;
+ /*
+ * Adjust the amount of WQEs depending on inline settings.
+ * The number of descriptors should be enough to handle
+ * the specified number of packets. If queue is being created
+ * with Verbs the rdma-core does queue size adjustment
+ * internally in the mlx5_calc_sq_size(), we do the same
+ * for the queue being created with DevX at this point.
+ */
+ wqe_size = txq_data->tso_en ?
+ RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
+ wqe_size += sizeof(struct mlx5_wqe_cseg) +
+ sizeof(struct mlx5_wqe_eseg) +
+ sizeof(struct mlx5_wqe_dseg);
+ if (txq_data->inlen_send)
+ wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
+ sizeof(struct mlx5_wqe_eseg) +
+ RTE_ALIGN(txq_data->inlen_send +
+ sizeof(uint32_t),
+ MLX5_WSEG_SIZE));
+ wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
/* Create Send Queue object with DevX. */
- wqe_n = RTE_MIN(1UL << txq_data->elts_n,
+ wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
(uint32_t)priv->sh->device_attr.max_qp_wr);
log_desc_n = log2above(wqe_n);
ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
txq_data->wqe_pi = 0;
txq_data->wqe_comp = 0;
txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_data->qp_db = txq_obj->sq_obj.db_rec;
+ txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR];
*txq_data->qp_db = 0;
txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
/* Change Send Queue state to Ready-to-Send. */
- ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
+ ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
if (ret) {
rte_errno = errno;
DRV_LOG(ERR,
.drop_action_create = mlx5_devx_drop_action_create,
.drop_action_destroy = mlx5_devx_drop_action_destroy,
.txq_obj_new = mlx5_txq_devx_obj_new,
- .txq_obj_modify = mlx5_devx_modify_sq,
+ .txq_obj_modify = mlx5_txq_devx_modify,
.txq_obj_release = mlx5_txq_devx_obj_release,
+ .lb_dummy_queue_create = NULL,
+ .lb_dummy_queue_release = NULL,
};