#include <stddef.h>
#include <stdint.h>
#include <string.h>
+#include <inttypes.h>
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#include <rte_common.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include "mlx4.h"
-#include "mlx4_autoconf.h"
+#include "mlx4_prm.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-/**
- * Allocate Tx queue elements.
- *
- * @param txq
- * Pointer to Tx queue structure.
- * @param elts_n
- * Number of elements to allocate.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_txq_alloc_elts(struct txq *txq, unsigned int elts_n)
-{
- unsigned int i;
- struct txq_elt (*elts)[elts_n] =
- rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
- int ret = 0;
-
- if (elts == NULL) {
- ERROR("%p: can't allocate packets array", (void *)txq);
- ret = ENOMEM;
- goto error;
- }
- for (i = 0; (i != elts_n); ++i) {
- struct txq_elt *elt = &(*elts)[i];
-
- elt->buf = NULL;
- }
- DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
- txq->elts_n = elts_n;
- txq->elts = elts;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- /*
- * Request send completion every MLX4_PMD_TX_PER_COMP_REQ packets or
- * at least 4 times per ring.
- */
- txq->elts_comp_cd_init =
- ((MLX4_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ?
- MLX4_PMD_TX_PER_COMP_REQ : (elts_n / 4));
- txq->elts_comp_cd = txq->elts_comp_cd_init;
- assert(ret == 0);
- return 0;
-error:
- rte_free(elts);
- DEBUG("%p: failed, freed everything", (void *)txq);
- assert(ret > 0);
- rte_errno = ret;
- return -rte_errno;
-}
-
/**
* Free Tx queue elements.
*
static void
mlx4_txq_free_elts(struct txq *txq)
{
- unsigned int elts_n = txq->elts_n;
unsigned int elts_head = txq->elts_head;
unsigned int elts_tail = txq->elts_tail;
- struct txq_elt (*elts)[elts_n] = txq->elts;
+ struct txq_elt (*elts)[txq->elts_n] = txq->elts;
+ unsigned int elts_m = txq->elts_n - 1;
DEBUG("%p: freeing WRs", (void *)txq);
- txq->elts_n = 0;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- txq->elts_comp_cd = 0;
- txq->elts_comp_cd_init = 0;
- txq->elts = NULL;
- if (elts == NULL)
- return;
while (elts_tail != elts_head) {
- struct txq_elt *elt = &(*elts)[elts_tail];
+ struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m];
assert(elt->buf != NULL);
rte_pktmbuf_free(elt->buf);
-#ifndef NDEBUG
- /* Poisoning. */
- memset(elt, 0x77, sizeof(*elt));
-#endif
- if (++elts_tail == elts_n)
- elts_tail = 0;
+ elt->buf = NULL;
+ elt->wqe = NULL;
}
- rte_free(elts);
+ txq->elts_tail = txq->elts_head;
}
struct txq_mp2mr_mbuf_check_data {
mlx4_txq_mp2mr(txq, mp);
}
+/**
+ * Retrieves information needed in order to directly access the Tx queue.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mlxdv
+ * Pointer to device information for this Tx queue.
+ */
+static void
+mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ struct mlx4_cq *cq = &txq->mcq;
+ struct mlx4dv_qp *dqp = mlxdv->qp.out;
+ struct mlx4dv_cq *dcq = mlxdv->cq.out;
+
+ /* Total length, including headroom and spare WQEs. */
+ sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
+ sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
+ sq->eob = sq->buf + sq->size;
+ uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
+ /* Continuous headroom size bytes must always stay freed. */
+ sq->remain_size = sq->size - headroom_size;
+ sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT);
+ sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
+ (0 << MLX4_SQ_OWNER_BIT));
+ sq->db = dqp->sdb;
+ sq->doorbell_qpn = dqp->doorbell_qpn;
+ cq->buf = dcq->buf.buf;
+ cq->cqe_cnt = dcq->cqe_cnt;
+ cq->set_ci_db = dcq->set_ci_db;
+ cq->cqe_64 = (dcq->cqe_size & 64) ? 1 : 0;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = mlx4_get_tx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
/**
* DPDK callback to configure a Tx queue.
*
unsigned int socket, const struct rte_eth_txconf *conf)
{
struct priv *priv = dev->data->dev_private;
+ struct mlx4dv_obj mlxdv;
+ struct mlx4dv_qp dv_qp;
+ struct mlx4dv_cq dv_cq;
+ struct txq_elt (*elts)[rte_align32pow2(desc)];
struct ibv_qp_init_attr qp_init_attr;
struct txq *txq;
+ uint8_t *bounce_buf;
+ struct mlx4_malloc_vec vec[] = {
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*txq),
+ .addr = (void **)&txq,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*elts),
+ .addr = (void **)&elts,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = MLX4_MAX_WQE_SIZE,
+ .addr = (void **)&bounce_buf,
+ },
+ };
int ret;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_get_tx_port_offloads(priv));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
ERROR("%p: invalid number of Tx descriptors", (void *)dev);
return -rte_errno;
}
+ if (desc != RTE_DIM(*elts)) {
+ desc = RTE_DIM(*elts);
+ WARN("%p: increased number of descriptors in Tx queue %u"
+ " to the next power of two (%u)",
+ (void *)dev, idx, desc);
+ }
/* Allocate and initialize Tx queue. */
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
+ mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket);
if (!txq) {
- rte_errno = ENOMEM;
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
return -rte_errno;
}
*txq = (struct txq){
.priv = priv,
- .stats.idx = idx,
+ .stats = {
+ .idx = idx,
+ },
.socket = socket,
+ .elts_n = desc,
+ .elts = elts,
+ .elts_head = 0,
+ .elts_tail = 0,
+ /*
+ * Request send completion every MLX4_PMD_TX_PER_COMP_REQ
+ * packets or at least 4 times per ring.
+ */
+ .elts_comp_cd =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .elts_comp_cd_init =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .csum = priv->hw_csum &&
+ (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+ /* Enable Tx loopback for VF devices. */
+ .lb = !!priv->vf,
+ .bounce_buf = bounce_buf,
};
txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
if (!txq->cq) {
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = mlx4_txq_alloc_elts(txq, desc);
- if (ret) {
- ERROR("%p: TXQ allocation failed: %s",
- (void *)dev, strerror(rte_errno));
- goto error;
- }
ret = ibv_modify_qp
(txq->qp,
&(struct ibv_qp_attr){
(void *)dev, strerror(rte_errno));
goto error;
}
+ /* Retrieve device queue information. */
+ mlxdv.cq.in = txq->cq;
+ mlxdv.cq.out = &dv_cq;
+ mlxdv.qp.in = txq->qp;
+ mlxdv.qp.out = &dv_qp;
+ ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = EINVAL;
+ ERROR("%p: failed to obtain information needed for"
+ " accessing the device queues", (void *)dev);
+ goto error;
+ }
+ mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
+ /* Save first wqe pointer in the first element. */
+ (&(*txq->elts)[0])->wqe =
+ (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
/* Pre-register known mempools. */
rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
if (!txq->mp2mr[i].mp)
break;
assert(txq->mp2mr[i].mr);
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ mlx4_mr_put(txq->mp2mr[i].mr);
}
rte_free(txq);
}