#include "mlx5_utils.h"
#include "mlx5_defs.h"
+/* Initialization data for hash RX queues. */
+static const struct hash_rxq_init hash_rxq_init[] = {
+ [HASH_RXQ_TCPV4] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+ IBV_EXP_RX_HASH_DST_IPV4 |
+ IBV_EXP_RX_HASH_SRC_PORT_TCP |
+ IBV_EXP_RX_HASH_DST_PORT_TCP),
+ },
+ [HASH_RXQ_UDPV4] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+ IBV_EXP_RX_HASH_DST_IPV4 |
+ IBV_EXP_RX_HASH_SRC_PORT_UDP |
+ IBV_EXP_RX_HASH_DST_PORT_UDP),
+ },
+ [HASH_RXQ_IPV4] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+ IBV_EXP_RX_HASH_DST_IPV4),
+ },
+ [HASH_RXQ_ETH] = {
+ .hash_fields = 0,
+ },
+};
+
+/* Number of entries in hash_rxq_init[]. */
+static const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
+/* Initialization data for hash RX queue indirection tables. */
+static const struct ind_table_init ind_table_init[] = {
+ {
+ .max_size = -1u, /* Superseded by HW limitations. */
+ .hash_types =
+ 1 << HASH_RXQ_TCPV4 |
+ 1 << HASH_RXQ_UDPV4 |
+ 1 << HASH_RXQ_IPV4 |
+ 0,
+ .hash_types_n = 3,
+ },
+ {
+ .max_size = 1,
+ .hash_types = 1 << HASH_RXQ_ETH,
+ .hash_types_n = 1,
+ },
+};
+
+#define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
+
+/* Default RSS hash key also used for ConnectX-3. */
+static uint8_t hash_rxq_default_key[] = {
+ 0x2c, 0xc6, 0x81, 0xd1,
+ 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19,
+ 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9,
+ 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7,
+ 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3,
+ 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+/**
+ * Return nearest power of two above input value.
+ *
+ * @param v
+ * Input value.
+ *
+ * @return
+ * Nearest power of two above input value.
+ */
+static unsigned int
+log2above(unsigned int v)
+{
+ unsigned int l;
+ unsigned int r;
+
+ for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
+ r |= (v & 1);
+ return (l + r);
+}
+
+/**
+ * Return the type corresponding to the n'th bit set.
+ *
+ * @param table
+ * The indirection table.
+ * @param n
+ * The n'th bit set.
+ *
+ * @return
+ * The corresponding hash_rxq_type.
+ */
+static enum hash_rxq_type
+hash_rxq_type_from_n(const struct ind_table_init *table, unsigned int n)
+{
+ assert(n < table->hash_types_n);
+ while (((table->hash_types >> n) & 0x1) == 0)
+ ++n;
+ return n;
+}
+
+/**
+ * Filter out disabled hash RX queue types from ind_table_init[].
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] table
+ * Output table.
+ *
+ * @return
+ * Number of table entries.
+ */
+static unsigned int
+priv_make_ind_table_init(struct priv *priv,
+ struct ind_table_init (*table)[IND_TABLE_INIT_N])
+{
+ unsigned int i;
+ unsigned int j;
+ unsigned int table_n = 0;
+ /* Mandatory to receive frames not handled by normal hash RX queues. */
+ unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
+
+ /* Process other protocols only if more than one queue. */
+ if (priv->rxqs_n > 1)
+ for (i = 0; (i != hash_rxq_init_n); ++i)
+ if (hash_rxq_init[i].hash_fields)
+ hash_types_sup |= (1 << i);
+
+ /* Filter out entries whose protocols are not in the set. */
+ for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
+ unsigned int nb;
+ unsigned int h;
+
+ /* j is increased only if the table has valid protocols. */
+ assert(j <= i);
+ (*table)[j] = ind_table_init[i];
+ (*table)[j].hash_types &= hash_types_sup;
+ for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
+ if (((*table)[j].hash_types >> h) & 0x1)
+ ++nb;
+ (*table)[i].hash_types_n = nb;
+ if (nb) {
+ ++table_n;
+ ++j;
+ }
+ }
+ return table_n;
+}
+
+/**
+ * Initialize hash RX queues and indirection table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+priv_create_hash_rxqs(struct priv *priv)
+{
+ /* If the requested number of WQs is not a power of two, use the
+ * maximum indirection table size for better balancing.
+ * The result is always rounded to the next power of two. */
+ unsigned int wqs_n =
+ (1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ?
+ priv->ind_table_max_size :
+ priv->rxqs_n));
+ struct ibv_exp_wq *wqs[wqs_n];
+ struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
+ unsigned int ind_tables_n =
+ priv_make_ind_table_init(priv, &ind_table_init);
+ unsigned int hash_rxqs_n = 0;
+ struct hash_rxq (*hash_rxqs)[] = NULL;
+ struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
+ unsigned int i;
+ unsigned int j;
+ unsigned int k;
+ int err = 0;
+
+ assert(priv->ind_tables == NULL);
+ assert(priv->ind_tables_n == 0);
+ assert(priv->hash_rxqs == NULL);
+ assert(priv->hash_rxqs_n == 0);
+ assert(priv->pd != NULL);
+ assert(priv->ctx != NULL);
+ if (priv->rxqs_n == 0)
+ return EINVAL;
+ assert(priv->rxqs != NULL);
+ if (ind_tables_n == 0) {
+ ERROR("all hash RX queue types have been filtered out,"
+ " indirection table cannot be created");
+ return EINVAL;
+ }
+ if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) {
+ ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n);
+ err = ERANGE;
+ goto error;
+ }
+ if (wqs_n != priv->rxqs_n) {
+ INFO("%u RX queues are configured, consider rounding this"
+ " number to the next power of two for better balancing",
+ priv->rxqs_n);
+ DEBUG("indirection table extended to assume %u WQs", wqs_n);
+ }
+ /* When the number of RX queues is not a power of two, the remaining
+ * table entries are padded with reused WQs and hashes are not spread
+ * uniformly. */
+ for (i = 0, j = 0; (i != wqs_n); ++i) {
+ wqs[i] = (*priv->rxqs)[j]->wq;
+ if (++j == priv->rxqs_n)
+ j = 0;
+ }
+ /* Get number of hash RX queues to configure. */
+ for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
+ hash_rxqs_n += ind_table_init[i].hash_types_n;
+ DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
+ hash_rxqs_n, priv->rxqs_n, ind_tables_n);
+ /* Create indirection tables. */
+ ind_tables = rte_calloc(__func__, ind_tables_n,
+ sizeof((*ind_tables)[0]), 0);
+ if (ind_tables == NULL) {
+ err = ENOMEM;
+ ERROR("cannot allocate indirection tables container: %s",
+ strerror(err));
+ goto error;
+ }
+ for (i = 0; (i != ind_tables_n); ++i) {
+ struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
+ .pd = priv->pd,
+ .log_ind_tbl_size = 0, /* Set below. */
+ .ind_tbl = wqs,
+ .comp_mask = 0,
+ };
+ unsigned int ind_tbl_size = ind_table_init[i].max_size;
+ struct ibv_exp_rwq_ind_table *ind_table;
+
+ if (wqs_n < ind_tbl_size)
+ ind_tbl_size = wqs_n;
+ ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
+ errno = 0;
+ ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
+ &ind_init_attr);
+ if (ind_table != NULL) {
+ (*ind_tables)[i] = ind_table;
+ continue;
+ }
+ /* Not clear whether errno is set. */
+ err = (errno ? errno : EINVAL);
+ ERROR("RX indirection table creation failed with error %d: %s",
+ err, strerror(err));
+ goto error;
+ }
+ /* Allocate array that holds hash RX queues and related data. */
+ hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
+ sizeof((*hash_rxqs)[0]), 0);
+ if (hash_rxqs == NULL) {
+ err = ENOMEM;
+ ERROR("cannot allocate hash RX queues container: %s",
+ strerror(err));
+ goto error;
+ }
+ for (i = 0, j = 0, k = 0;
+ ((i != hash_rxqs_n) && (j != ind_tables_n));
+ ++i) {
+ struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
+ enum hash_rxq_type type =
+ hash_rxq_type_from_n(&ind_table_init[j], k);
+ struct ibv_exp_rx_hash_conf hash_conf = {
+ .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = sizeof(hash_rxq_default_key),
+ .rx_hash_key = hash_rxq_default_key,
+ .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
+ .rwq_ind_tbl = (*ind_tables)[j],
+ };
+ struct ibv_exp_qp_init_attr qp_init_attr = {
+ .max_inl_recv = 0, /* Currently not supported. */
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH),
+ .pd = priv->pd,
+ .rx_hash_conf = &hash_conf,
+ .port_num = priv->port,
+ };
+
+ DEBUG("using indirection table %u for hash RX queue %u",
+ j, i);
+ *hash_rxq = (struct hash_rxq){
+ .priv = priv,
+ .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
+ .type = type,
+ };
+ if (hash_rxq->qp == NULL) {
+ err = (errno ? errno : EINVAL);
+ ERROR("Hash RX QP creation failure: %s",
+ strerror(err));
+ goto error;
+ }
+ if (++k < ind_table_init[j].hash_types_n)
+ continue;
+ /* Switch to the next indirection table and reset hash RX
+ * queue type array index. */
+ ++j;
+ k = 0;
+ }
+ priv->ind_tables = ind_tables;
+ priv->ind_tables_n = ind_tables_n;
+ priv->hash_rxqs = hash_rxqs;
+ priv->hash_rxqs_n = hash_rxqs_n;
+ assert(err == 0);
+ return 0;
+error:
+ if (hash_rxqs != NULL) {
+ for (i = 0; (i != hash_rxqs_n); ++i) {
+ struct ibv_qp *qp = (*hash_rxqs)[i].qp;
+
+ if (qp == NULL)
+ continue;
+ claim_zero(ibv_destroy_qp(qp));
+ }
+ rte_free(hash_rxqs);
+ }
+ if (ind_tables != NULL) {
+ for (j = 0; (j != ind_tables_n); ++j) {
+ struct ibv_exp_rwq_ind_table *ind_table =
+ (*ind_tables)[j];
+
+ if (ind_table == NULL)
+ continue;
+ claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+ }
+ rte_free(ind_tables);
+ }
+ return err;
+}
+
+/**
+ * Clean up hash RX queues and indirection table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_destroy_hash_rxqs(struct priv *priv)
+{
+ unsigned int i;
+
+ DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
+ if (priv->hash_rxqs_n == 0) {
+ assert(priv->hash_rxqs == NULL);
+ assert(priv->ind_tables == NULL);
+ return;
+ }
+ for (i = 0; (i != priv->hash_rxqs_n); ++i) {
+ struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
+ unsigned int j, k;
+
+ assert(hash_rxq->priv == priv);
+ assert(hash_rxq->qp != NULL);
+ /* Also check that there are no remaining flows. */
+ assert(hash_rxq->allmulti_flow == NULL);
+ assert(hash_rxq->promisc_flow == NULL);
+ for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
+ for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
+ assert(hash_rxq->mac_flow[j][k] == NULL);
+ claim_zero(ibv_destroy_qp(hash_rxq->qp));
+ }
+ priv->hash_rxqs_n = 0;
+ rte_free(priv->hash_rxqs);
+ priv->hash_rxqs = NULL;
+ for (i = 0; (i != priv->ind_tables_n); ++i) {
+ struct ibv_exp_rwq_ind_table *ind_table =
+ (*priv->ind_tables)[i];
+
+ assert(ind_table != NULL);
+ claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
+ }
+ priv->ind_tables_n = 0;
+ rte_free(priv->ind_tables);
+ priv->ind_tables = NULL;
+}
+
+/**
+ * Allocate RX queue elements with scattered packets support.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param elts_n
+ * Number of elements to allocate.
+ * @param[in] pool
+ * If not NULL, fetch buffers from this array instead of allocating them
+ * with rte_pktmbuf_alloc().
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n,
+ struct rte_mbuf **pool)
+{
+ unsigned int i;
+ struct rxq_elt_sp (*elts)[elts_n] =
+ rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
+ rxq->socket);
+ int ret = 0;
+
+ if (elts == NULL) {
+ ERROR("%p: can't allocate packets array", (void *)rxq);
+ ret = ENOMEM;
+ goto error;
+ }
+ /* For each WR (packet). */
+ for (i = 0; (i != elts_n); ++i) {
+ unsigned int j;
+ struct rxq_elt_sp *elt = &(*elts)[i];
+ struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges;
+
+ /* These two arrays must have the same size. */
+ assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs));
+ /* For each SGE (segment). */
+ for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
+ struct ibv_sge *sge = &(*sges)[j];
+ struct rte_mbuf *buf;
+
+ if (pool != NULL) {
+ buf = *(pool++);
+ assert(buf != NULL);
+ rte_pktmbuf_reset(buf);
+ } else
+ buf = rte_pktmbuf_alloc(rxq->mp);
+ if (buf == NULL) {
+ assert(pool == NULL);
+ ERROR("%p: empty mbuf pool", (void *)rxq);
+ ret = ENOMEM;
+ goto error;
+ }
+ elt->bufs[j] = buf;
+ /* Headroom is reserved by rte_pktmbuf_alloc(). */
+ assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ /* Buffer is supposed to be empty. */
+ assert(rte_pktmbuf_data_len(buf) == 0);
+ assert(rte_pktmbuf_pkt_len(buf) == 0);
+ /* sge->addr must be able to store a pointer. */
+ assert(sizeof(sge->addr) >= sizeof(uintptr_t));
+ if (j == 0) {
+ /* The first SGE keeps its headroom. */
+ sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ sge->length = (buf->buf_len -
+ RTE_PKTMBUF_HEADROOM);
+ } else {
+ /* Subsequent SGEs lose theirs. */
+ assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ SET_DATA_OFF(buf, 0);
+ sge->addr = (uintptr_t)buf->buf_addr;
+ sge->length = buf->buf_len;
+ }
+ sge->lkey = rxq->mr->lkey;
+ /* Redundant check for tailroom. */
+ assert(sge->length == rte_pktmbuf_tailroom(buf));
+ }
+ }
+ DEBUG("%p: allocated and configured %u WRs (%zu segments)",
+ (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges)));
+ rxq->elts_n = elts_n;
+ rxq->elts_head = 0;
+ rxq->elts.sp = elts;
+ assert(ret == 0);
+ return 0;
+error:
+ if (elts != NULL) {
+ assert(pool == NULL);
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ unsigned int j;
+ struct rxq_elt_sp *elt = &(*elts)[i];
+
+ for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
+ struct rte_mbuf *buf = elt->bufs[j];
+
+ if (buf != NULL)
+ rte_pktmbuf_free_seg(buf);
+ }
+ }
+ rte_free(elts);
+ }
+ DEBUG("%p: failed, freed everything", (void *)rxq);
+ assert(ret > 0);
+ return ret;
+}
+
+/**
+ * Free RX queue elements with scattered packets support.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_sp(struct rxq *rxq)
+{
+ unsigned int i;
+ unsigned int elts_n = rxq->elts_n;
+ struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp;
+
+ DEBUG("%p: freeing WRs", (void *)rxq);
+ rxq->elts_n = 0;
+ rxq->elts.sp = NULL;
+ if (elts == NULL)
+ return;
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ unsigned int j;
+ struct rxq_elt_sp *elt = &(*elts)[i];
+
+ for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
+ struct rte_mbuf *buf = elt->bufs[j];
+
+ if (buf != NULL)
+ rte_pktmbuf_free_seg(buf);
+ }
+ }
+ rte_free(elts);
+}
+
/**
* Allocate RX queue elements.
*
/* For each WR (packet). */
for (i = 0; (i != elts_n); ++i) {
struct rxq_elt *elt = &(*elts)[i];
- struct ibv_recv_wr *wr = &elt->wr;
struct ibv_sge *sge = &(*elts)[i].sge;
struct rte_mbuf *buf;
ret = ENOMEM;
goto error;
}
- /* Configure WR. Work request ID contains its own index in
- * the elts array and the offset between SGE buffer header and
- * its data. */
- WR_ID(wr->wr_id).id = i;
- WR_ID(wr->wr_id).offset =
- (((uintptr_t)buf->buf_addr + RTE_PKTMBUF_HEADROOM) -
- (uintptr_t)buf);
- wr->next = &(*elts)[(i + 1)].wr;
- wr->sg_list = sge;
- wr->num_sge = 1;
+ elt->buf = buf;
/* Headroom is reserved by rte_pktmbuf_alloc(). */
assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
sge->lkey = rxq->mr->lkey;
/* Redundant check for tailroom. */
assert(sge->length == rte_pktmbuf_tailroom(buf));
- /* Make sure elts index and SGE mbuf pointer can be deduced
- * from WR ID. */
- if ((WR_ID(wr->wr_id).id != i) ||
- ((void *)((uintptr_t)sge->addr -
- WR_ID(wr->wr_id).offset) != buf)) {
- ERROR("%p: cannot store index and offset in WR ID",
- (void *)rxq);
- sge->addr = 0;
- rte_pktmbuf_free(buf);
- ret = EOVERFLOW;
- goto error;
- }
}
- /* The last WR pointer must be NULL. */
- (*elts)[(i - 1)].wr.next = NULL;
DEBUG("%p: allocated and configured %u single-segment WRs",
(void *)rxq, elts_n);
rxq->elts_n = elts_n;
assert(pool == NULL);
for (i = 0; (i != RTE_DIM(*elts)); ++i) {
struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf;
+ struct rte_mbuf *buf = elt->buf;
- if (elt->sge.addr == 0)
- continue;
- assert(WR_ID(elt->wr.wr_id).id == i);
- buf = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(elt->wr.wr_id).offset);
- rte_pktmbuf_free_seg(buf);
+ if (buf != NULL)
+ rte_pktmbuf_free_seg(buf);
}
rte_free(elts);
}
return;
for (i = 0; (i != RTE_DIM(*elts)); ++i) {
struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf;
+ struct rte_mbuf *buf = elt->buf;
- if (elt->sge.addr == 0)
- continue;
- assert(WR_ID(elt->wr.wr_id).id == i);
- buf = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(elt->wr.wr_id).offset);
- rte_pktmbuf_free_seg(buf);
+ if (buf != NULL)
+ rte_pktmbuf_free_seg(buf);
}
rte_free(elts);
}
struct ibv_exp_release_intf_params params;
DEBUG("cleaning up %p", (void *)rxq);
- rxq_free_elts(rxq);
- if (rxq->if_qp != NULL) {
+ if (rxq->sp)
+ rxq_free_elts_sp(rxq);
+ else
+ rxq_free_elts(rxq);
+ if (rxq->if_wq != NULL) {
assert(rxq->priv != NULL);
assert(rxq->priv->ctx != NULL);
- assert(rxq->qp != NULL);
+ assert(rxq->wq != NULL);
params = (struct ibv_exp_release_intf_params){
.comp_mask = 0,
};
claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
- rxq->if_qp,
+ rxq->if_wq,
¶ms));
}
if (rxq->if_cq != NULL) {
rxq->if_cq,
¶ms));
}
- if (rxq->qp != NULL) {
- claim_zero(ibv_destroy_qp(rxq->qp));
- }
+ if (rxq->wq != NULL)
+ claim_zero(ibv_exp_destroy_wq(rxq->wq));
if (rxq->cq != NULL)
claim_zero(ibv_destroy_cq(rxq->cq));
if (rxq->rd != NULL) {
}
/**
- * Allocate a Queue Pair.
- * Optionally setup inline receive if supported.
+ * Reconfigure a RX queue with new parameters.
*
- * @param priv
- * Pointer to private structure.
- * @param cq
- * Completion queue to associate with QP.
- * @param desc
- * Number of descriptors in QP (hint only).
+ * rxq_rehash() does not allocate mbufs, which, if not done from the right
+ * thread (such as a control thread), may corrupt the pool.
+ * In case of failure, the queue is left untouched.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rxq
+ * RX queue pointer.
*
* @return
- * QP pointer or NULL in case of error.
+ * 0 on success, errno value on failure.
*/
-static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- struct ibv_exp_res_domain *rd)
+int
+rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
{
- struct ibv_exp_qp_init_attr attr = {
- /* CQ to be associated with the send queue. */
- .send_cq = cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX5_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX5_PMD_SGE_WR_N),
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
- .pd = priv->pd,
- .res_domain = rd,
+ struct priv *priv = rxq->priv;
+ struct rxq tmpl = *rxq;
+ unsigned int mbuf_n;
+ unsigned int desc_n;
+ struct rte_mbuf **pool;
+ unsigned int i, k;
+ struct ibv_exp_wq_attr mod;
+ int err;
+
+ DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
+ /* Number of descriptors and mbufs currently allocated. */
+ desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1));
+ mbuf_n = desc_n;
+ /* Toggle RX checksum offload if hardware supports it. */
+ if (priv->hw_csum) {
+ tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->csum = tmpl.csum;
+ }
+ if (priv->hw_csum_l2tun) {
+ tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->csum_l2tun = tmpl.csum_l2tun;
+ }
+ /* Enable scattered packets support for this queue if necessary. */
+ if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
+ tmpl.sp = 1;
+ desc_n /= MLX5_PMD_SGE_WR_N;
+ } else
+ tmpl.sp = 0;
+ DEBUG("%p: %s scattered packets support (%u WRs)",
+ (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n);
+ /* If scatter mode is the same as before, nothing to do. */
+ if (tmpl.sp == rxq->sp) {
+ DEBUG("%p: nothing to do", (void *)dev);
+ return 0;
+ }
+ /* From now on, any failure will render the queue unusable.
+ * Reinitialize WQ. */
+ mod = (struct ibv_exp_wq_attr){
+ .attr_mask = IBV_EXP_WQ_ATTR_STATE,
+ .wq_state = IBV_EXP_WQS_RESET,
};
+ err = ibv_exp_modify_wq(tmpl.wq, &mod);
+ if (err) {
+ ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
+ assert(err > 0);
+ return err;
+ }
+ /* Allocate pool. */
+ pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
+ if (pool == NULL) {
+ ERROR("%p: cannot allocate memory", (void *)dev);
+ return ENOBUFS;
+ }
+ /* Snatch mbufs from original queue. */
+ k = 0;
+ if (rxq->sp) {
+ struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
- return ibv_exp_create_qp(priv->ctx, &attr);
-}
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ struct rxq_elt_sp *elt = &(*elts)[i];
+ unsigned int j;
-#ifdef RSS_SUPPORT
+ for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
+ assert(elt->bufs[j] != NULL);
+ pool[k++] = elt->bufs[j];
+ }
+ }
+ } else {
+ struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
-/**
- * Allocate a RSS Queue Pair.
- * Optionally setup inline receive if supported.
- *
- * @param priv
- * Pointer to private structure.
- * @param cq
- * Completion queue to associate with QP.
- * @param desc
- * Number of descriptors in QP (hint only).
- * @param parent
- * If nonzero, create a parent QP, otherwise a child.
- *
- * @return
- * QP pointer or NULL in case of error.
- */
-static struct ibv_qp *
-rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- int parent, struct ibv_exp_res_domain *rd)
-{
- struct ibv_exp_qp_init_attr attr = {
- /* CQ to be associated with the send queue. */
- .send_cq = cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX5_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX5_PMD_SGE_WR_N),
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
- IBV_EXP_QP_INIT_ATTR_QPG),
- .pd = priv->pd,
- .res_domain = rd,
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ struct rxq_elt *elt = &(*elts)[i];
+ struct rte_mbuf *buf = elt->buf;
+
+ pool[k++] = buf;
+ }
+ }
+ assert(k == mbuf_n);
+ tmpl.elts_n = 0;
+ tmpl.elts.sp = NULL;
+ assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp);
+ err = ((tmpl.sp) ?
+ rxq_alloc_elts_sp(&tmpl, desc_n, pool) :
+ rxq_alloc_elts(&tmpl, desc_n, pool));
+ if (err) {
+ ERROR("%p: cannot reallocate WRs, aborting", (void *)dev);
+ rte_free(pool);
+ assert(err > 0);
+ return err;
+ }
+ assert(tmpl.elts_n == desc_n);
+ assert(tmpl.elts.sp != NULL);
+ rte_free(pool);
+ /* Clean up original data. */
+ rxq->elts_n = 0;
+ rte_free(rxq->elts.sp);
+ rxq->elts.sp = NULL;
+ /* Change queue state to ready. */
+ mod = (struct ibv_exp_wq_attr){
+ .attr_mask = IBV_EXP_WQ_ATTR_STATE,
+ .wq_state = IBV_EXP_WQS_RDY,
};
+ err = ibv_exp_modify_wq(tmpl.wq, &mod);
+ if (err) {
+ ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
+ (void *)dev, strerror(err));
+ goto error;
+ }
+ /* Post SGEs. */
+ assert(tmpl.if_wq != NULL);
+ if (tmpl.sp) {
+ struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
- if (parent) {
- attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
- /* TSS isn't necessary. */
- attr.qpg.parent_attrib.tss_child_count = 0;
- attr.qpg.parent_attrib.rss_child_count = priv->rxqs_n;
- DEBUG("initializing parent RSS queue");
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ err = tmpl.if_wq->recv_sg_list
+ (tmpl.wq,
+ (*elts)[i].sges,
+ RTE_DIM((*elts)[i].sges));
+ if (err)
+ break;
+ }
} else {
- attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
- attr.qpg.qpg_parent = priv->rxq_parent.qp;
- DEBUG("initializing child RSS queue");
+ struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
+
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ err = tmpl.if_wq->recv_burst(
+ tmpl.wq,
+ &(*elts)[i].sge,
+ 1);
+ if (err)
+ break;
+ }
}
- return ibv_exp_create_qp(priv->ctx, &attr);
+ if (err) {
+ ERROR("%p: failed to post SGEs with error %d",
+ (void *)dev, err);
+ /* Set err because it does not contain a valid errno value. */
+ err = EIO;
+ goto error;
+ }
+error:
+ *rxq = tmpl;
+ assert(err >= 0);
+ return err;
}
-#endif /* RSS_SUPPORT */
-
/**
* Configure a RX queue.
*
.mp = mp,
.socket = socket
};
- struct ibv_exp_qp_attr mod;
+ struct ibv_exp_wq_attr mod;
union {
struct ibv_exp_query_intf_params params;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_res_domain_init_attr rd;
+ struct ibv_exp_wq_init_attr wq;
} attr;
enum ibv_exp_query_intf_status status;
- struct ibv_recv_wr *bad_wr;
struct rte_mbuf *buf;
int ret = 0;
- int parent = (rxq == &priv->rxq_parent);
+ unsigned int i;
+ unsigned int cq_size = desc;
(void)conf; /* Thresholds configuration (ignored). */
- /*
- * If this is a parent queue, hardware must support RSS and
- * RSS must be enabled.
- */
- assert((!parent) || ((priv->hw_rss) && (priv->rss)));
- if (parent) {
- /* Even if unused, ibv_create_cq() requires at least one
- * descriptor. */
- desc = 1;
- goto skip_mr;
- }
if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) {
ERROR("%p: invalid number of RX descriptors (must be a"
" multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
rte_pktmbuf_free(buf);
+ /* Toggle RX checksum offload if hardware supports it. */
+ if (priv->hw_csum)
+ tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ if (priv->hw_csum_l2tun)
+ tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ /* Enable scattered packets support for this queue if necessary. */
+ if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
+ tmpl.sp = 1;
+ desc /= MLX5_PMD_SGE_WR_N;
+ }
+ DEBUG("%p: %s scattered packets support (%u WRs)",
+ (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
/* Use the entire RX mempool as the memory region. */
tmpl.mr = ibv_reg_mr(priv->pd,
(void *)mp->elt_va_start,
(void *)dev, strerror(ret));
goto error;
}
-skip_mr:
attr.rd = (struct ibv_exp_res_domain_init_attr){
.comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
IBV_EXP_RES_DOMAIN_MSG_MODEL),
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd,
};
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0,
+ &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
priv->device_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
-#ifdef RSS_SUPPORT
- if (priv->rss)
- tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
- tmpl.rd);
- else
-#endif /* RSS_SUPPORT */
- tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
- if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
+ attr.wq = (struct ibv_exp_wq_init_attr){
+ .wq_context = NULL, /* Could be useful in the future. */
+ .wq_type = IBV_EXP_WQT_RQ,
+ /* Max number of outstanding WRs. */
+ .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ?
+ priv->device_attr.max_qp_wr :
+ (int)cq_size),
+ /* Max number of scatter/gather elements in a WR. */
+ .max_recv_sge = ((priv->device_attr.max_sge <
+ MLX5_PMD_SGE_WR_N) ?
+ priv->device_attr.max_sge :
+ MLX5_PMD_SGE_WR_N),
+ .pd = priv->pd,
+ .cq = tmpl.cq,
+ .comp_mask = IBV_EXP_CREATE_WQ_RES_DOMAIN,
+ .res_domain = tmpl.rd,
};
- ret = ibv_exp_modify_qp(tmpl.qp, &mod,
- (IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
- IBV_EXP_QP_PORT));
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
+ tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
+ if (tmpl.wq == NULL) {
+ ret = (errno ? errno : EINVAL);
+ ERROR("%p: WQ creation failure: %s",
(void *)dev, strerror(ret));
goto error;
}
- /* Allocate descriptors for RX queues, except for the RSS parent. */
- if (parent)
- goto skip_alloc;
- ret = rxq_alloc_elts(&tmpl, desc, NULL);
+ if (tmpl.sp)
+ ret = rxq_alloc_elts_sp(&tmpl, desc, NULL);
+ else
+ ret = rxq_alloc_elts(&tmpl, desc, NULL);
if (ret) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(ret));
goto error;
}
- ret = ibv_post_recv(tmpl.qp,
- &(*tmpl.elts.no_sp)[0].wr,
- &bad_wr);
- if (ret) {
- ERROR("%p: ibv_post_recv() failed for WR %p: %s",
- (void *)dev,
- (void *)bad_wr,
- strerror(ret));
- goto error;
- }
-skip_alloc:
- mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
/* Save port ID. */
tmpl.port_id = dev->data->port_id;
DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
}
attr.params = (struct ibv_exp_query_intf_params){
.intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .obj = tmpl.qp,
+ .intf = IBV_EXP_INTF_WQ,
+ .obj = tmpl.wq,
};
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ERROR("%p: QP interface family query failed with status %d",
+ tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+ if (tmpl.if_wq == NULL) {
+ ERROR("%p: WQ interface family query failed with status %d",
(void *)dev, status);
goto error;
}
+ /* Change queue state to ready. */
+ mod = (struct ibv_exp_wq_attr){
+ .attr_mask = IBV_EXP_WQ_ATTR_STATE,
+ .wq_state = IBV_EXP_WQS_RDY,
+ };
+ ret = ibv_exp_modify_wq(tmpl.wq, &mod);
+ if (ret) {
+ ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
+ /* Post SGEs. */
+ if (tmpl.sp) {
+ struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
+
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ ret = tmpl.if_wq->recv_sg_list
+ (tmpl.wq,
+ (*elts)[i].sges,
+ RTE_DIM((*elts)[i].sges));
+ if (ret)
+ break;
+ }
+ } else {
+ struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
+
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ ret = tmpl.if_wq->recv_burst(
+ tmpl.wq,
+ &(*elts)[i].sge,
+ 1);
+ if (ret)
+ break;
+ }
+ }
+ if (ret) {
+ ERROR("%p: failed to post SGEs with error %d",
+ (void *)dev, ret);
+ /* Set ret because it does not contain a valid errno value. */
+ ret = EIO;
+ goto error;
+ }
/* Clean up rxq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
rxq_cleanup(rxq);
if (ret)
rte_free(rxq);
else {
+ rxq->stats.idx = idx;
DEBUG("%p: adding RX queue %p to list",
(void *)dev, (void *)rxq);
(*priv->rxqs)[idx] = rxq;
/* Update receive callback. */
- dev->rx_pkt_burst = mlx5_rx_burst;
+ if (rxq->sp)
+ dev->rx_pkt_burst = mlx5_rx_burst_sp;
+ else
+ dev->rx_pkt_burst = mlx5_rx_burst;
}
priv_unlock(priv);
return -ret;
return;
priv = rxq->priv;
priv_lock(priv);
- assert(rxq != &priv->rxq_parent);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",