X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=ff58c49213334626ebdd924dd4866e99a970e47e;hb=8fd92a66c60a7310cf5ab91996b9b09447512a61;hp=41f8811508c7cd51f4164d9a2685919e4e9b9440;hpb=95e16ef3254f7e75076c7ed2bdd4e8275c5e6894;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 41f8811508..ff58c49213 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. */ #include @@ -36,82 +8,37 @@ #include #include #include +#include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include +#include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" -#endif #include #include -#include +#include #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" -#endif +#include +#include +#include #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_utils.h" +#include "mlx5_autoconf.h" #include "mlx5_defs.h" - -/* Initialization data for hash RX queues. */ -static const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | - IBV_EXP_RX_HASH_DST_IPV4 | - IBV_EXP_RX_HASH_SRC_PORT_TCP | - IBV_EXP_RX_HASH_DST_PORT_TCP), - }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | - IBV_EXP_RX_HASH_DST_IPV4 | - IBV_EXP_RX_HASH_SRC_PORT_UDP | - IBV_EXP_RX_HASH_DST_PORT_UDP), - }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | - IBV_EXP_RX_HASH_DST_IPV4), - }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - }, -}; - -/* Number of entries in hash_rxq_init[]. */ -static const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/* Initialization data for hash RX queue indirection tables. */ -static const struct ind_table_init ind_table_init[] = { - { - .max_size = -1u, /* Superseded by HW limitations. */ - .hash_types = - 1 << HASH_RXQ_TCPV4 | - 1 << HASH_RXQ_UDPV4 | - 1 << HASH_RXQ_IPV4 | - 0, - .hash_types_n = 3, - }, - { - .max_size = 1, - .hash_types = 1 << HASH_RXQ_ETH, - .hash_types_n = 1, - }, -}; - -#define IND_TABLE_INIT_N RTE_DIM(ind_table_init) +#include "mlx5_glue.h" /* Default RSS hash key also used for ConnectX-3. */ -static uint8_t hash_rxq_default_key[] = { +uint8_t rss_hash_default_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7, 0xfc, 0xa2, 0x83, 0x19, @@ -124,1132 +51,1431 @@ static uint8_t hash_rxq_default_key[] = { 0xfc, 0x1f, 0xdc, 0x2a, }; +/* Length of the default RSS hash key. */ +const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); + /** - * Return nearest power of two above input value. + * Allocate RX queue elements. * - * @param v - * Input value. + * @param rxq_ctrl + * Pointer to RX queue structure. * * @return - * Nearest power of two above input value. + * 0 on success, errno value on failure. + */ +int +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; + unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; + unsigned int i; + int ret = 0; + + /* Iterate on segments. */ + for (i = 0; (i != elts_n); ++i) { + struct rte_mbuf *buf; + + buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); + if (buf == NULL) { + ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); + ret = ENOMEM; + goto error; + } + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + assert(rte_pktmbuf_data_len(buf) == 0); + assert(rte_pktmbuf_pkt_len(buf) == 0); + assert(!buf->next); + /* Only the first segment keeps headroom. */ + if (i % sges_n) + SET_DATA_OFF(buf, 0); + PORT(buf) = rxq_ctrl->rxq.port_id; + DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); + PKT_LEN(buf) = DATA_LEN(buf); + NB_SEGS(buf) = 1; + (*rxq_ctrl->rxq.elts)[i] = buf; + } + /* If Rx vector is activated. */ + if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; + int j; + + /* Initialize default rearm_data for vPMD. */ + mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; + rte_mbuf_refcnt_set(mbuf_init, 1); + mbuf_init->nb_segs = 1; + mbuf_init->port = rxq->port_id; + /* + * prevent compiler reordering: + * rearm_data covers previous fields. + */ + rte_compiler_barrier(); + rxq->mbuf_initializer = + *(uint64_t *)&mbuf_init->rearm_data; + /* Padding with a fake mbuf for vectorized Rx. */ + for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) + (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; + } + DEBUG("%p: allocated and configured %u segments (max %u packets)", + (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); + assert(ret == 0); + return 0; +error: + elts_n = i; + for (i = 0; (i != elts_n); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; + } + DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); + assert(ret > 0); + return ret; +} + +/** + * Free RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. */ -static unsigned int -log2above(unsigned int v) +static void +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) { - unsigned int l; - unsigned int r; + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + const uint16_t q_n = (1 << rxq->elts_n); + const uint16_t q_mask = q_n - 1; + uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); + uint16_t i; - for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) - r |= (v & 1); - return (l + r); + DEBUG("%p: freeing WRs", (void *)rxq_ctrl); + if (rxq->elts == NULL) + return; + /** + * Some mbuf in the Ring belongs to the application. They cannot be + * freed. + */ + if (rxq_check_vec_support(rxq) > 0) { + for (i = 0; i < used; ++i) + (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; + rxq->rq_pi = rxq->rq_ci; + } + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq->elts)[i]); + (*rxq->elts)[i] = NULL; + } } /** - * Return the type corresponding to the n'th bit set. + * Clean up a RX queue. * - * @param table - * The indirection table. - * @param n - * The n'th bit set. + * Destroy objects, free allocated memory and reset the structure for reuse. * - * @return - * The corresponding hash_rxq_type. + * @param rxq_ctrl + * Pointer to RX queue structure. */ -static enum hash_rxq_type -hash_rxq_type_from_n(const struct ind_table_init *table, unsigned int n) +void +mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { - assert(n < table->hash_types_n); - while (((table->hash_types >> n) & 0x1) == 0) - ++n; - return n; + DEBUG("cleaning up %p", (void *)rxq_ctrl); + if (rxq_ctrl->ibv) + mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** - * Filter out disabled hash RX queue types from ind_table_init[]. + * Returns the per-queue supported offloads. * * @param priv * Pointer to private structure. - * @param[out] table - * Output table. * * @return - * Number of table entries. + * Supported Rx offloads. */ -static unsigned int -priv_make_ind_table_init(struct priv *priv, - struct ind_table_init (*table)[IND_TABLE_INIT_N]) +uint64_t +mlx5_priv_get_rx_queue_offloads(struct priv *priv) { - unsigned int i; - unsigned int j; - unsigned int table_n = 0; - /* Mandatory to receive frames not handled by normal hash RX queues. */ - unsigned int hash_types_sup = 1 << HASH_RXQ_ETH; - - /* Process other protocols only if more than one queue. */ - if (priv->rxqs_n > 1) - for (i = 0; (i != hash_rxq_init_n); ++i) - if (hash_rxq_init[i].hash_fields) - hash_types_sup |= (1 << i); - - /* Filter out entries whose protocols are not in the set. */ - for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) { - unsigned int nb; - unsigned int h; - - /* j is increased only if the table has valid protocols. */ - assert(j <= i); - (*table)[j] = ind_table_init[i]; - (*table)[j].hash_types &= hash_types_sup; - for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h) - if (((*table)[j].hash_types >> h) & 0x1) - ++nb; - (*table)[i].hash_types_n = nb; - if (nb) { - ++table_n; - ++j; - } - } - return table_n; + struct mlx5_dev_config *config = &priv->config; + uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_JUMBO_FRAME); + + if (config->hw_fcs_strip) + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + if (config->hw_csum) + offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM); + if (config->hw_vlan_strip) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + return offloads; } + /** - * Initialize hash RX queues and indirection table. + * Returns the per-port supported offloads. * * @param priv * Pointer to private structure. - * * @return - * 0 on success, errno value on failure. + * Supported Rx offloads. */ -int -priv_create_hash_rxqs(struct priv *priv) +uint64_t +mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) { - /* If the requested number of WQs is not a power of two, use the - * maximum indirection table size for better balancing. - * The result is always rounded to the next power of two. */ - unsigned int wqs_n = - (1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ? - priv->ind_table_max_size : - priv->rxqs_n)); - struct ibv_exp_wq *wqs[wqs_n]; - struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; - unsigned int ind_tables_n = - priv_make_ind_table_init(priv, &ind_table_init); - unsigned int hash_rxqs_n = 0; - struct hash_rxq (*hash_rxqs)[] = NULL; - struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL; - unsigned int i; - unsigned int j; - unsigned int k; - int err = 0; - - assert(priv->ind_tables == NULL); - assert(priv->ind_tables_n == 0); - assert(priv->hash_rxqs == NULL); - assert(priv->hash_rxqs_n == 0); - assert(priv->pd != NULL); - assert(priv->ctx != NULL); - if (priv->rxqs_n == 0) - return EINVAL; - assert(priv->rxqs != NULL); - if (ind_tables_n == 0) { - ERROR("all hash RX queue types have been filtered out," - " indirection table cannot be created"); - return EINVAL; - } - if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) { - ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n); - err = ERANGE; - goto error; - } - if (wqs_n != priv->rxqs_n) { - INFO("%u RX queues are configured, consider rounding this" - " number to the next power of two for better balancing", - priv->rxqs_n); - DEBUG("indirection table extended to assume %u WQs", wqs_n); - } - /* When the number of RX queues is not a power of two, the remaining - * table entries are padded with reused WQs and hashes are not spread - * uniformly. */ - for (i = 0, j = 0; (i != wqs_n); ++i) { - wqs[i] = (*priv->rxqs)[j]->wq; - if (++j == priv->rxqs_n) - j = 0; - } - /* Get number of hash RX queues to configure. */ - for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) - hash_rxqs_n += ind_table_init[i].hash_types_n; - DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables", - hash_rxqs_n, priv->rxqs_n, ind_tables_n); - /* Create indirection tables. */ - ind_tables = rte_calloc(__func__, ind_tables_n, - sizeof((*ind_tables)[0]), 0); - if (ind_tables == NULL) { - err = ENOMEM; - ERROR("cannot allocate indirection tables container: %s", - strerror(err)); - goto error; - } - for (i = 0; (i != ind_tables_n); ++i) { - struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = { - .pd = priv->pd, - .log_ind_tbl_size = 0, /* Set below. */ - .ind_tbl = wqs, - .comp_mask = 0, - }; - unsigned int ind_tbl_size = ind_table_init[i].max_size; - struct ibv_exp_rwq_ind_table *ind_table; - - if (wqs_n < ind_tbl_size) - ind_tbl_size = wqs_n; - ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); - errno = 0; - ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, - &ind_init_attr); - if (ind_table != NULL) { - (*ind_tables)[i] = ind_table; - continue; - } - /* Not clear whether errno is set. */ - err = (errno ? errno : EINVAL); - ERROR("RX indirection table creation failed with error %d: %s", - err, strerror(err)); - goto error; - } - /* Allocate array that holds hash RX queues and related data. */ - hash_rxqs = rte_calloc(__func__, hash_rxqs_n, - sizeof((*hash_rxqs)[0]), 0); - if (hash_rxqs == NULL) { - err = ENOMEM; - ERROR("cannot allocate hash RX queues container: %s", - strerror(err)); - goto error; - } - for (i = 0, j = 0, k = 0; - ((i != hash_rxqs_n) && (j != ind_tables_n)); - ++i) { - struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; - enum hash_rxq_type type = - hash_rxq_type_from_n(&ind_table_init[j], k); - struct ibv_exp_rx_hash_conf hash_conf = { - .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = sizeof(hash_rxq_default_key), - .rx_hash_key = hash_rxq_default_key, - .rx_hash_fields_mask = hash_rxq_init[type].hash_fields, - .rwq_ind_tbl = (*ind_tables)[j], - }; - struct ibv_exp_qp_init_attr qp_init_attr = { - .max_inl_recv = 0, /* Currently not supported. */ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_RX_HASH), - .pd = priv->pd, - .rx_hash_conf = &hash_conf, - .port_num = priv->port, - }; - - DEBUG("using indirection table %u for hash RX queue %u", - j, i); - *hash_rxq = (struct hash_rxq){ - .priv = priv, - .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr), - .type = type, - }; - if (hash_rxq->qp == NULL) { - err = (errno ? errno : EINVAL); - ERROR("Hash RX QP creation failure: %s", - strerror(err)); - goto error; - } - if (++k < ind_table_init[j].hash_types_n) - continue; - /* Switch to the next indirection table and reset hash RX - * queue type array index. */ - ++j; - k = 0; - } - priv->ind_tables = ind_tables; - priv->ind_tables_n = ind_tables_n; - priv->hash_rxqs = hash_rxqs; - priv->hash_rxqs_n = hash_rxqs_n; - assert(err == 0); - return 0; -error: - if (hash_rxqs != NULL) { - for (i = 0; (i != hash_rxqs_n); ++i) { - struct ibv_qp *qp = (*hash_rxqs)[i].qp; + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; - if (qp == NULL) - continue; - claim_zero(ibv_destroy_qp(qp)); - } - rte_free(hash_rxqs); - } - if (ind_tables != NULL) { - for (j = 0; (j != ind_tables_n); ++j) { - struct ibv_exp_rwq_ind_table *ind_table = - (*ind_tables)[j]; - - if (ind_table == NULL) - continue; - claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table)); - } - rte_free(ind_tables); - } - return err; + return offloads; } /** - * Clean up hash RX queues and indirection table. + * Checks if the per-queue offload configuration is valid. * * @param priv * Pointer to private structure. + * @param offloads + * Per-queue offloads configuration. + * + * @return + * 1 if the configuration is valid, 0 otherwise. */ -void -priv_destroy_hash_rxqs(struct priv *priv) +static int +priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) { - unsigned int i; + uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = + mlx5_priv_get_rx_queue_offloads(priv); + uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv); - DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n); - if (priv->hash_rxqs_n == 0) { - assert(priv->hash_rxqs == NULL); - assert(priv->ind_tables == NULL); - return; - } - for (i = 0; (i != priv->hash_rxqs_n); ++i) { - struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; - unsigned int j, k; - - assert(hash_rxq->priv == priv); - assert(hash_rxq->qp != NULL); - /* Also check that there are no remaining flows. */ - assert(hash_rxq->allmulti_flow == NULL); - assert(hash_rxq->promisc_flow == NULL); - for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j) - for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k) - assert(hash_rxq->mac_flow[j][k] == NULL); - claim_zero(ibv_destroy_qp(hash_rxq->qp)); - } - priv->hash_rxqs_n = 0; - rte_free(priv->hash_rxqs); - priv->hash_rxqs = NULL; - for (i = 0; (i != priv->ind_tables_n); ++i) { - struct ibv_exp_rwq_ind_table *ind_table = - (*priv->ind_tables)[i]; - - assert(ind_table != NULL); - claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table)); - } - priv->ind_tables_n = 0; - rte_free(priv->ind_tables); - priv->ind_tables = NULL; + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return 0; + if (((port_offloads ^ offloads) & port_supp_offloads)) + return 0; + return 1; } /** - * Allocate RX queue elements with scattered packets support. * - * @param rxq - * Pointer to RX queue structure. - * @param elts_n - * Number of elements to allocate. - * @param[in] pool - * If not NULL, fetch buffers from this array instead of allocating them - * with rte_pktmbuf_alloc(). + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative errno value on failure. */ -static int -rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n, - struct rte_mbuf **pool) +int +mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - unsigned int i; - struct rxq_elt_sp (*elts)[elts_n] = - rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, - rxq->socket); + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); int ret = 0; - if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)rxq); - ret = ENOMEM; - goto error; + priv_lock(priv); + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in RX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); } - /* For each WR (packet). */ - for (i = 0; (i != elts_n); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges; - - /* These two arrays must have the same size. */ - assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs)); - /* For each SGE (segment). */ - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct ibv_sge *sge = &(*sges)[j]; - struct rte_mbuf *buf; - - if (pool != NULL) { - buf = *(pool++); - assert(buf != NULL); - rte_pktmbuf_reset(buf); - } else - buf = rte_pktmbuf_alloc(rxq->mp); - if (buf == NULL) { - assert(pool == NULL); - ERROR("%p: empty mbuf pool", (void *)rxq); - ret = ENOMEM; - goto error; - } - elt->bufs[j] = buf; - /* Headroom is reserved by rte_pktmbuf_alloc(). */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); - /* Buffer is supposed to be empty. */ - assert(rte_pktmbuf_data_len(buf) == 0); - assert(rte_pktmbuf_pkt_len(buf) == 0); - /* sge->addr must be able to store a pointer. */ - assert(sizeof(sge->addr) >= sizeof(uintptr_t)); - if (j == 0) { - /* The first SGE keeps its headroom. */ - sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); - sge->length = (buf->buf_len - - RTE_PKTMBUF_HEADROOM); - } else { - /* Subsequent SGEs lose theirs. */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); - SET_DATA_OFF(buf, 0); - sge->addr = (uintptr_t)buf->buf_addr; - sge->length = buf->buf_len; - } - sge->lkey = rxq->mr->lkey; - /* Redundant check for tailroom. */ - assert(sge->length == rte_pktmbuf_tailroom(buf)); - } + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->rxqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->rxqs_n); + priv_unlock(priv); + return -EOVERFLOW; } - DEBUG("%p: allocated and configured %u WRs (%zu segments)", - (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges))); - rxq->elts_n = elts_n; - rxq->elts_head = 0; - rxq->elts.sp = elts; - assert(ret == 0); - return 0; -error: - if (elts != NULL) { - assert(pool == NULL); - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct rte_mbuf *buf = elt->bufs[j]; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - } - rte_free(elts); + if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { + ret = ENOTSUP; + ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " + "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, + (void *)dev, conf->offloads, + dev->data->dev_conf.rxmode.offloads, + (mlx5_priv_get_rx_port_offloads(priv) | + mlx5_priv_get_rx_queue_offloads(priv))); + goto out; } - DEBUG("%p: failed, freed everything", (void *)rxq); - assert(ret > 0); - return ret; + if (!mlx5_priv_rxq_releasable(priv, idx)) { + ret = EBUSY; + ERROR("%p: unable to release queue index %u", + (void *)dev, idx); + goto out; + } + mlx5_priv_rxq_release(priv, idx); + rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp); + if (!rxq_ctrl) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + ret = ENOMEM; + goto out; + } + DEBUG("%p: adding RX queue %p to list", + (void *)dev, (void *)rxq_ctrl); + (*priv->rxqs)[idx] = &rxq_ctrl->rxq; +out: + priv_unlock(priv); + return -ret; } /** - * Free RX queue elements with scattered packets support. + * DPDK callback to release a RX queue. * - * @param rxq - * Pointer to RX queue structure. + * @param dpdk_rxq + * Generic RX queue pointer. */ -static void -rxq_free_elts_sp(struct rxq *rxq) +void +mlx5_rx_queue_release(void *dpdk_rxq) { - unsigned int i; - unsigned int elts_n = rxq->elts_n; - struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp; + struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; + struct mlx5_rxq_ctrl *rxq_ctrl; + struct priv *priv; - DEBUG("%p: freeing WRs", (void *)rxq); - rxq->elts_n = 0; - rxq->elts.sp = NULL; - if (elts == NULL) + if (rxq == NULL) return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct rte_mbuf *buf = elt->bufs[j]; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - } - rte_free(elts); + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + priv = rxq_ctrl->priv; + priv_lock(priv); + if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) + rte_panic("Rx queue %p is still used by a flow and cannot be" + " removed\n", (void *)rxq_ctrl); + mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); + priv_unlock(priv); } /** - * Allocate RX queue elements. + * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param rxq - * Pointer to RX queue structure. - * @param elts_n - * Number of elements to allocate. - * @param[in] pool - * If not NULL, fetch buffers from this array instead of allocating them - * with rte_pktmbuf_alloc(). + * @param priv + * Pointer to private structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative on failure. */ -static int -rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool) +int +priv_rx_intr_vec_enable(struct priv *priv) { unsigned int i; - struct rxq_elt (*elts)[elts_n] = - rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, - rxq->socket); - int ret = 0; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + unsigned int count = 0; + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; - if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)rxq); - ret = ENOMEM; - goto error; + if (!priv->dev->data->dev_conf.intr_conf.rxq) + return 0; + priv_rx_intr_vec_disable(priv); + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); + if (intr_handle->intr_vec == NULL) { + ERROR("failed to allocate memory for interrupt vector," + " Rx interrupts will not be supported"); + return -ENOMEM; } - /* For each WR (packet). */ - for (i = 0; (i != elts_n); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct ibv_sge *sge = &(*elts)[i].sge; - struct rte_mbuf *buf; + intr_handle->type = RTE_INTR_HANDLE_EXT; + for (i = 0; i != n; ++i) { + /* This rxq ibv must not be released in this function. */ + struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + int fd; + int flags; + int rc; - if (pool != NULL) { - buf = *(pool++); - assert(buf != NULL); - rte_pktmbuf_reset(buf); - } else - buf = rte_pktmbuf_alloc(rxq->mp); - if (buf == NULL) { - assert(pool == NULL); - ERROR("%p: empty mbuf pool", (void *)rxq); - ret = ENOMEM; - goto error; + /* Skip queues that cannot request interrupts. */ + if (!rxq_ibv || !rxq_ibv->channel) { + /* Use invalid intr_vec[] index to disable entry. */ + intr_handle->intr_vec[i] = + RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID; + continue; } - elt->buf = buf; - /* Headroom is reserved by rte_pktmbuf_alloc(). */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); - /* Buffer is supposed to be empty. */ - assert(rte_pktmbuf_data_len(buf) == 0); - assert(rte_pktmbuf_pkt_len(buf) == 0); - /* sge->addr must be able to store a pointer. */ - assert(sizeof(sge->addr) >= sizeof(uintptr_t)); - /* SGE keeps its headroom. */ - sge->addr = (uintptr_t) - ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM); - sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM); - sge->lkey = rxq->mr->lkey; - /* Redundant check for tailroom. */ - assert(sge->length == rte_pktmbuf_tailroom(buf)); - } - DEBUG("%p: allocated and configured %u single-segment WRs", - (void *)rxq, elts_n); - rxq->elts_n = elts_n; - rxq->elts_head = 0; - rxq->elts.no_sp = elts; - assert(ret == 0); - return 0; -error: - if (elts != NULL) { - assert(pool == NULL); - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); + if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { + ERROR("too many Rx queues for interrupt vector size" + " (%d), Rx interrupts cannot be enabled", + RTE_MAX_RXTX_INTR_VEC_ID); + priv_rx_intr_vec_disable(priv); + return -1; } - rte_free(elts); + fd = rxq_ibv->channel->fd; + flags = fcntl(fd, F_GETFL); + rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + ERROR("failed to make Rx interrupt file descriptor" + " %d non-blocking for queue index %d", fd, i); + priv_rx_intr_vec_disable(priv); + return -1; + } + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; + intr_handle->efds[count] = fd; + count++; } - DEBUG("%p: failed, freed everything", (void *)rxq); - assert(ret > 0); - return ret; + if (!count) + priv_rx_intr_vec_disable(priv); + else + intr_handle->nb_efd = count; + return 0; } /** - * Free RX queue elements. + * Clean up Rx interrupts handler. * - * @param rxq - * Pointer to RX queue structure. + * @param priv + * Pointer to private structure. */ -static void -rxq_free_elts(struct rxq *rxq) +void +priv_rx_intr_vec_disable(struct priv *priv) { + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; unsigned int i; - unsigned int elts_n = rxq->elts_n; - struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); - DEBUG("%p: freeing WRs", (void *)rxq); - rxq->elts_n = 0; - rxq->elts.no_sp = NULL; - if (elts == NULL) + if (!priv->dev->data->dev_conf.intr_conf.rxq) return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; + if (!intr_handle->intr_vec) + goto free; + for (i = 0; i != n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq_data; - if (buf != NULL) - rte_pktmbuf_free_seg(buf); + if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID) + continue; + /** + * Need to access directly the queue to release the reference + * kept in priv_rx_intr_vec_enable(). + */ + rxq_data = (*priv->rxqs)[i]; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); } - rte_free(elts); +free: + rte_intr_free_epoll_fd(intr_handle); + if (intr_handle->intr_vec) + free(intr_handle->intr_vec); + intr_handle->nb_efd = 0; + intr_handle->intr_vec = NULL; } /** - * Clean up a RX queue. + * MLX5 CQ notification . * - * Destroy objects, free allocated memory and reset the structure for reuse. + * @param rxq + * Pointer to receive queue structure. + * @param sq_n_rxq + * Sequence number per receive queue . + */ +static inline void +mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) +{ + int sq_n = 0; + uint32_t doorbell_hi; + uint64_t doorbell; + void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL; + + sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; + doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); + doorbell = (uint64_t)doorbell_hi << 32; + doorbell |= rxq->cqn; + rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); + rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); +} + +/** + * DPDK callback for Rx queue interrupt enable. * - * @param rxq - * Pointer to RX queue structure. + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Rx queue number. + * + * @return + * 0 on success, negative on failure. */ -void -rxq_cleanup(struct rxq *rxq) +int +mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct ibv_exp_release_intf_params params; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + int ret = 0; - DEBUG("cleaning up %p", (void *)rxq); - if (rxq->sp) - rxq_free_elts_sp(rxq); - else - rxq_free_elts(rxq); - if (rxq->if_wq != NULL) { - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - assert(rxq->wq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(rxq->priv->ctx, - rxq->if_wq, - ¶ms)); - } - if (rxq->if_cq != NULL) { - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - assert(rxq->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(rxq->priv->ctx, - rxq->if_cq, - ¶ms)); + priv_lock(priv); + rxq_data = (*priv->rxqs)[rx_queue_id]; + if (!rxq_data) { + ret = EINVAL; + goto exit; } - if (rxq->wq != NULL) - claim_zero(ibv_exp_destroy_wq(rxq->wq)); - if (rxq->cq != NULL) - claim_zero(ibv_destroy_cq(rxq->cq)); - if (rxq->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl->irq) { + struct mlx5_rxq_ibv *rxq_ibv; - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx, - rxq->rd, - &attr)); + rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + if (!rxq_ibv) { + ret = EINVAL; + goto exit; + } + mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); + mlx5_priv_rxq_ibv_release(priv, rxq_ibv); } - if (rxq->mr != NULL) - claim_zero(ibv_dereg_mr(rxq->mr)); - memset(rxq, 0, sizeof(*rxq)); +exit: + priv_unlock(priv); + if (ret) + WARN("unable to arm interrupt on rx queue %d", rx_queue_id); + return -ret; } /** - * Reconfigure a RX queue with new parameters. - * - * rxq_rehash() does not allocate mbufs, which, if not done from the right - * thread (such as a control thread), may corrupt the pool. - * In case of failure, the queue is left untouched. + * DPDK callback for Rx queue interrupt disable. * * @param dev * Pointer to Ethernet device structure. - * @param rxq - * RX queue pointer. + * @param rx_queue_id + * Rx queue number. * * @return - * 0 on success, errno value on failure. + * 0 on success, negative on failure. */ int -rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) +mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = rxq->priv; - struct rxq tmpl = *rxq; - unsigned int mbuf_n; - unsigned int desc_n; - struct rte_mbuf **pool; - unsigned int i, k; - struct ibv_exp_wq_attr mod; - int err; - - DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); - /* Number of descriptors and mbufs currently allocated. */ - desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1)); - mbuf_n = desc_n; - /* Toggle RX checksum offload if hardware supports it. */ - if (priv->hw_csum) { - tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq->csum = tmpl.csum; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_ibv *rxq_ibv = NULL; + struct ibv_cq *ev_cq; + void *ev_ctx; + int ret = 0; + + priv_lock(priv); + rxq_data = (*priv->rxqs)[rx_queue_id]; + if (!rxq_data) { + ret = EINVAL; + goto exit; } - if (priv->hw_csum_l2tun) { - tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq->csum_l2tun = tmpl.csum_l2tun; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (!rxq_ctrl->irq) + goto exit; + rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + if (!rxq_ibv) { + ret = EINVAL; + goto exit; } - /* Enable scattered packets support for this queue if necessary. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { - tmpl.sp = 1; - desc_n /= MLX5_PMD_SGE_WR_N; - } else - tmpl.sp = 0; - DEBUG("%p: %s scattered packets support (%u WRs)", - (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n); - /* If scatter mode is the same as before, nothing to do. */ - if (tmpl.sp == rxq->sp) { - DEBUG("%p: nothing to do", (void *)dev); - return 0; - } - /* From now on, any failure will render the queue unusable. - * Reinitialize WQ. */ - mod = (struct ibv_exp_wq_attr){ - .attr_mask = IBV_EXP_WQ_ATTR_STATE, - .wq_state = IBV_EXP_WQS_RESET, - }; - err = ibv_exp_modify_wq(tmpl.wq, &mod); - if (err) { - ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err)); - assert(err > 0); - return err; - } - /* Allocate pool. */ - pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0); - if (pool == NULL) { - ERROR("%p: cannot allocate memory", (void *)dev); - return ENOBUFS; - } - /* Snatch mbufs from original queue. */ - k = 0; - if (rxq->sp) { - struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt_sp *elt = &(*elts)[i]; - unsigned int j; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - assert(elt->bufs[j] != NULL); - pool[k++] = elt->bufs[j]; - } - } - } else { - struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - - pool[k++] = buf; - } - } - assert(k == mbuf_n); - tmpl.elts_n = 0; - tmpl.elts.sp = NULL; - assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp); - err = ((tmpl.sp) ? - rxq_alloc_elts_sp(&tmpl, desc_n, pool) : - rxq_alloc_elts(&tmpl, desc_n, pool)); - if (err) { - ERROR("%p: cannot reallocate WRs, aborting", (void *)dev); - rte_free(pool); - assert(err > 0); - return err; - } - assert(tmpl.elts_n == desc_n); - assert(tmpl.elts.sp != NULL); - rte_free(pool); - /* Clean up original data. */ - rxq->elts_n = 0; - rte_free(rxq->elts.sp); - rxq->elts.sp = NULL; - /* Change queue state to ready. */ - mod = (struct ibv_exp_wq_attr){ - .attr_mask = IBV_EXP_WQ_ATTR_STATE, - .wq_state = IBV_EXP_WQS_RDY, - }; - err = ibv_exp_modify_wq(tmpl.wq, &mod); - if (err) { - ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", - (void *)dev, strerror(err)); - goto error; - } - /* Post SGEs. */ - assert(tmpl.if_wq != NULL); - if (tmpl.sp) { - struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - err = tmpl.if_wq->recv_sg_list - (tmpl.wq, - (*elts)[i].sges, - RTE_DIM((*elts)[i].sges)); - if (err) - break; - } - } else { - struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - err = tmpl.if_wq->recv_burst( - tmpl.wq, - &(*elts)[i].sge, - 1); - if (err) - break; - } - } - if (err) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, err); - /* Set err because it does not contain a valid errno value. */ - err = EIO; - goto error; + ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); + if (ret || ev_cq != rxq_ibv->cq) { + ret = EINVAL; + goto exit; } -error: - *rxq = tmpl; - assert(err >= 0); - return err; + rxq_data->cq_arm_sn++; + mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); +exit: + if (rxq_ibv) + mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + priv_unlock(priv); + if (ret) + WARN("unable to disable interrupt on rx queue %d", + rx_queue_id); + return -ret; } /** - * Configure a RX queue. + * Create the Rx queue Verbs object. * - * @param dev - * Pointer to Ethernet device structure. - * @param rxq - * Pointer to RX queue structure. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param[in] conf - * Thresholds parameters. - * @param mp - * Memory pool for buffer allocations. + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array * * @return - * 0 on success, errno value on failure. + * The Verbs object initialised if it can be created. */ -int -rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +struct mlx5_rxq_ibv* +mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) { - struct priv *priv = dev->data->dev_private; - struct rxq tmpl = { - .priv = priv, - .mp = mp, - .socket = socket - }; - struct ibv_exp_wq_attr mod; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct ibv_wq_attr mod; union { - struct ibv_exp_query_intf_params params; - struct ibv_exp_cq_init_attr cq; - struct ibv_exp_res_domain_init_attr rd; - struct ibv_exp_wq_init_attr wq; + struct { + struct ibv_cq_init_attr_ex ibv; + struct mlx5dv_cq_init_attr mlx5; + } cq; + struct ibv_wq_init_attr wq; + struct ibv_cq_ex cq_attr; } attr; - enum ibv_exp_query_intf_status status; - struct rte_mbuf *buf; - int ret = 0; + unsigned int cqe_n = (1 << rxq_data->elts_n) - 1; + struct mlx5_rxq_ibv *tmpl; + struct mlx5dv_cq cq_info; + struct mlx5dv_rwq rwq; unsigned int i; - unsigned int cq_size = desc; + int ret = 0; + struct mlx5dv_obj obj; + struct mlx5_dev_config *config = &priv->config; - (void)conf; /* Thresholds configuration (ignored). */ - if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { - ERROR("%p: invalid number of RX descriptors (must be a" - " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); - return EINVAL; - } - /* Get mbuf length. */ - buf = rte_pktmbuf_alloc(mp); - if (buf == NULL) { - ERROR("%p: unable to allocate mbuf", (void *)dev); - return ENOMEM; - } - tmpl.mb_len = buf->buf_len; - assert((rte_pktmbuf_headroom(buf) + - rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); - assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); - rte_pktmbuf_free(buf); - /* Toggle RX checksum offload if hardware supports it. */ - if (priv->hw_csum) - tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - if (priv->hw_csum_l2tun) - tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - /* Enable scattered packets support for this queue if necessary. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { - tmpl.sp = 1; - desc /= MLX5_PMD_SGE_WR_N; + assert(rxq_data); + assert(!rxq_ctrl->ibv); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; + priv->verbs_alloc_ctx.obj = rxq_ctrl; + tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, + rxq_ctrl->socket); + if (!tmpl) { + ERROR("%p: cannot allocate verbs resources", + (void *)rxq_ctrl); + goto error; } - DEBUG("%p: %s scattered packets support (%u WRs)", - (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc); + tmpl->rxq_ctrl = rxq_ctrl; /* Use the entire RX mempool as the memory region. */ - tmpl.mr = ibv_reg_mr(priv->pd, - (void *)mp->elt_va_start, - (mp->elt_va_end - mp->elt_va_start), - (IBV_ACCESS_LOCAL_WRITE | - IBV_ACCESS_REMOTE_WRITE)); - if (tmpl.mr == NULL) { - ret = EINVAL; - ERROR("%p: MR creation failure: %s", - (void *)dev, strerror(ret)); - goto error; + tmpl->mr = priv_mr_get(priv, rxq_data->mp); + if (!tmpl->mr) { + tmpl->mr = priv_mr_new(priv, rxq_data->mp); + if (!tmpl->mr) { + ERROR("%p: MR creation failure", (void *)rxq_ctrl); + goto error; + } } - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, - }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); - goto error; + if (rxq_ctrl->irq) { + tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); + if (!tmpl->channel) { + ERROR("%p: Comp Channel creation failure", + (void *)rxq_ctrl); + goto error; + } } - attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, + attr.cq.ibv = (struct ibv_cq_init_attr_ex){ + .cqe = cqe_n, + .channel = tmpl->channel, + .comp_mask = 0, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0, - &attr.cq); - if (tmpl.cq == NULL) { - ret = ENOMEM; - ERROR("%p: CQ creation failure: %s", - (void *)dev, strerror(ret)); + attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){ + .comp_mask = 0, + }; + if (config->cqe_comp && !rxq_data->hw_timestamp) { + attr.cq.mlx5.comp_mask |= + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; + /* + * For vectorized Rx, it must not be doubled in order to + * make cq_ci and rq_ci aligned. + */ + if (rxq_check_vec_support(rxq_data) < 0) + attr.cq.ibv.cqe *= 2; + } else if (config->cqe_comp && rxq_data->hw_timestamp) { + DEBUG("Rx CQE compression is disabled for HW timestamp"); + } + tmpl->cq = mlx5_glue->cq_ex_to_cq + (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, + &attr.cq.mlx5)); + if (tmpl->cq == NULL) { + ERROR("%p: CQ creation failure", (void *)rxq_ctrl); goto error; } DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.max_qp_wr); + priv->device_attr.orig_attr.max_qp_wr); DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.max_sge); - attr.wq = (struct ibv_exp_wq_init_attr){ + priv->device_attr.orig_attr.max_sge); + attr.wq = (struct ibv_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ - .wq_type = IBV_EXP_WQT_RQ, + .wq_type = IBV_WQT_RQ, /* Max number of outstanding WRs. */ - .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ? - priv->device_attr.max_qp_wr : - (int)cq_size), + .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ - .max_recv_sge = ((priv->device_attr.max_sge < - MLX5_PMD_SGE_WR_N) ? - priv->device_attr.max_sge : - MLX5_PMD_SGE_WR_N), + .max_sge = 1 << rxq_data->sges_n, .pd = priv->pd, - .cq = tmpl.cq, - .comp_mask = IBV_EXP_CREATE_WQ_RES_DOMAIN, - .res_domain = tmpl.rd, + .cq = tmpl->cq, + .comp_mask = + IBV_WQ_FLAGS_CVLAN_STRIPPING | + 0, + .create_flags = (rxq_data->vlan_strip ? + IBV_WQ_FLAGS_CVLAN_STRIPPING : + 0), }; - tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq); - if (tmpl.wq == NULL) { - ret = (errno ? errno : EINVAL); - ERROR("%p: WQ creation failure: %s", - (void *)dev, strerror(ret)); - goto error; + /* By default, FCS (CRC) is stripped by hardware. */ + if (rxq_data->crc_present) { + attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } - if (tmpl.sp) - ret = rxq_alloc_elts_sp(&tmpl, desc, NULL); - else - ret = rxq_alloc_elts(&tmpl, desc, NULL); - if (ret) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(ret)); - goto error; +#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING + if (config->hw_padding) { + attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; + attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } - /* Save port ID. */ - tmpl.port_id = dev->data->port_id; - DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id); - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); +#endif + tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); + if (tmpl->wq == NULL) { + ERROR("%p: WQ creation failure", (void *)rxq_ctrl); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_WQ, - .obj = tmpl.wq, - }; - tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_wq == NULL) { - ERROR("%p: WQ interface family query failed with status %d", - (void *)dev, status); + /* + * Make sure number of WRs*SGEs match expectations since a queue + * cannot allocate more than "desc" buffers. + */ + if (((int)attr.wq.max_wr != + ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || + ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { + ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", + (void *)rxq_ctrl, + ((1 << rxq_data->elts_n) >> rxq_data->sges_n), + (1 << rxq_data->sges_n), + attr.wq.max_wr, attr.wq.max_sge); goto error; } /* Change queue state to ready. */ - mod = (struct ibv_exp_wq_attr){ - .attr_mask = IBV_EXP_WQ_ATTR_STATE, - .wq_state = IBV_EXP_WQS_RDY, + mod = (struct ibv_wq_attr){ + .attr_mask = IBV_WQ_ATTR_STATE, + .wq_state = IBV_WQS_RDY, }; - ret = ibv_exp_modify_wq(tmpl.wq, &mod); + ret = mlx5_glue->modify_wq(tmpl->wq, &mod); if (ret) { - ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", - (void *)dev, strerror(ret)); + ERROR("%p: WQ state to IBV_WQS_RDY failed", + (void *)rxq_ctrl); goto error; } - /* Post SGEs. */ - if (tmpl.sp) { - struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - ret = tmpl.if_wq->recv_sg_list - (tmpl.wq, - (*elts)[i].sges, - RTE_DIM((*elts)[i].sges)); - if (ret) - break; - } - } else { - struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - ret = tmpl.if_wq->recv_burst( - tmpl.wq, - &(*elts)[i].sge, - 1); - if (ret) - break; - } - } - if (ret) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, ret); - /* Set ret because it does not contain a valid errno value. */ - ret = EIO; + obj.cq.in = tmpl->cq; + obj.cq.out = &cq_info; + obj.rwq.in = tmpl->wq; + obj.rwq.out = &rwq; + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); + if (ret != 0) + goto error; + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); goto error; } - /* Clean up rxq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq); - rxq_cleanup(rxq); - *rxq = tmpl; - DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl); - assert(ret == 0); - return 0; + /* Fill the rings. */ + rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[]) + (uintptr_t)rwq.buf; + for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) { + struct rte_mbuf *buf = (*rxq_data->elts)[i]; + volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i]; + + /* scat->addr must be able to store a pointer. */ + assert(sizeof(scat->addr) >= sizeof(uintptr_t)); + *scat = (struct mlx5_wqe_data_seg){ + .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = tmpl->mr->lkey, + }; + } + rxq_data->rq_db = rwq.dbrec; + rxq_data->cqe_n = log2above(cq_info.cqe_cnt); + rxq_data->cq_ci = 0; + rxq_data->rq_ci = 0; + rxq_data->rq_pi = 0; + rxq_data->zip = (struct rxq_zip){ + .ai = 0, + }; + rxq_data->cq_db = cq_info.dbrec; + rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; + rxq_data->cq_uar = cq_info.cq_uar; + rxq_data->cqn = cq_info.cqn; + rxq_data->cq_arm_sn = 0; + /* Update doorbell counter. */ + rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; + rte_wmb(); + *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); + DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); + rte_atomic32_inc(&tmpl->refcnt); + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return tmpl; error: - rxq_cleanup(&tmpl); - assert(ret > 0); + if (tmpl->wq) + claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); + if (tmpl->cq) + claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); + if (tmpl->channel) + claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); + if (tmpl->mr) + priv_mr_release(priv, tmpl->mr); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return NULL; +} + +/** + * Get an Rx queue Verbs object. + * + * @param priv + * Pointer to private structure. + * @param idx + * Queue index in DPDK Rx queue array + * + * @return + * The Verbs object if it exists. + */ +struct mlx5_rxq_ibv* +mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +{ + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (idx >= priv->rxqs_n) + return NULL; + if (!rxq_data) + return NULL; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl->ibv) { + priv_mr_get(priv, rxq_data->mp); + rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + (void *)rxq_ctrl->ibv, + rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); + } + return rxq_ctrl->ibv; +} + +/** + * Release an Rx verbs queue object. + * + * @param priv + * Pointer to private structure. + * @param rxq_ibv + * Verbs Rx queue object. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +{ + int ret; + + assert(rxq_ibv); + assert(rxq_ibv->wq); + assert(rxq_ibv->cq); + assert(rxq_ibv->mr); + ret = priv_mr_release(priv, rxq_ibv->mr); + if (!ret) + rxq_ibv->mr = NULL; + DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, + (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); + if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { + rxq_free_elts(rxq_ibv->rxq_ctrl); + claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); + claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq)); + if (rxq_ibv->channel) + claim_zero(mlx5_glue->destroy_comp_channel + (rxq_ibv->channel)); + LIST_REMOVE(rxq_ibv, next); + rte_free(rxq_ibv); + return 0; + } + return EBUSY; +} + +/** + * Verify the Verbs Rx queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_rxq_ibv_verify(struct priv *priv) +{ + int ret = 0; + struct mlx5_rxq_ibv *rxq_ibv; + + LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { + DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, + (void *)rxq_ibv); + ++ret; + } return ret; } /** - * DPDK callback to configure a RX queue. + * Return true if a single reference exists on the object. * - * @param dev - * Pointer to Ethernet device structure. + * @param priv + * Pointer to private structure. + * @param rxq_ibv + * Verbs Rx queue object. + */ +int +mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +{ + (void)priv; + assert(rxq_ibv); + return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); +} + +/** + * Create a DPDK Rx queue. + * + * @param priv + * Pointer to private structure. * @param idx - * RX queue index. + * TX queue index. * @param desc * Number of descriptors to configure in queue. * @param socket * NUMA socket on which memory must be allocated. - * @param[in] conf - * Thresholds parameters. - * @param mp - * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * A DPDK queue object on success. + */ +struct mlx5_rxq_ctrl* +mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct rte_eth_dev *dev = priv->dev; + struct mlx5_rxq_ctrl *tmpl; + unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + struct mlx5_dev_config *config = &priv->config; + /* + * Always allocate extra slots, even if eventually + * the vector Rx will not be used. + */ + const uint16_t desc_n = + desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + + tmpl = rte_calloc_socket("RXQ", 1, + sizeof(*tmpl) + + desc_n * sizeof(struct rte_mbuf *), + 0, socket); + if (!tmpl) + return NULL; + tmpl->socket = socket; + if (priv->dev->data->dev_conf.intr_conf.rxq) + tmpl->irq = 1; + /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { + tmpl->rxq.sges_n = 0; + } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + unsigned int size = + RTE_PKTMBUF_HEADROOM + + dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int sges_n; + + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + tmpl->rxq.sges_n = sges_n; + /* Make sure rxq.sges_n did not overflow. */ + size = mb_len * (1 << tmpl->rxq.sges_n); + size -= RTE_PKTMBUF_HEADROOM; + if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + ERROR("%p: too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + (void *)dev, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + goto error; + } + } else { + WARN("%p: the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + (void *)dev, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); + } + DEBUG("%p: maximum number of segments per packet: %u", + (void *)dev, 1 << tmpl->rxq.sges_n); + if (desc % (1 << tmpl->rxq.sges_n)) { + ERROR("%p: number of RX queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + (void *)dev, + desc, + 1 << tmpl->rxq.sges_n); + goto error; + } + /* Toggle RX checksum offload if hardware supports it. */ + tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && + priv->config.hw_csum_l2tun); + tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); + /* Configure VLAN stripping. */ + tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + /* By default, FCS (CRC) is stripped by hardware. */ + if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { + tmpl->rxq.crc_present = 0; + } else if (config->hw_fcs_strip) { + tmpl->rxq.crc_present = 1; + } else { + WARN("%p: CRC stripping has been disabled but will still" + " be performed by hardware, make sure MLNX_OFED and" + " firmware are up to date", + (void *)dev); + tmpl->rxq.crc_present = 0; + } + DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + (void *)dev, + tmpl->rxq.crc_present ? "disabled" : "enabled", + tmpl->rxq.crc_present << 2); + /* Save port ID. */ + tmpl->rxq.rss_hash = priv->rxqs_n > 1; + tmpl->rxq.port_id = dev->data->port_id; + tmpl->priv = priv; + tmpl->rxq.mp = mp; + tmpl->rxq.stats.idx = idx; + tmpl->rxq.elts_n = log2above(desc); + tmpl->rxq.elts = + (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); + rte_atomic32_inc(&tmpl->refcnt); + DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; +error: + rte_free(tmpl); + return NULL; +} + +/** + * Get a Rx queue. + * + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * A pointer to the queue if it exists. + */ +struct mlx5_rxq_ctrl* +mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +{ + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + + if ((*priv->rxqs)[idx]) { + rxq_ctrl = container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, + rxq); + + mlx5_priv_rxq_ibv_get(priv, idx); + rte_atomic32_inc(&rxq_ctrl->refcnt); + DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + } + return rxq_ctrl; +} + +/** + * Release a Rx queue. + * + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * 0 on success, errno value on failure. */ int -mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) { - struct priv *priv = dev->data->dev_private; - struct rxq *rxq = (*priv->rxqs)[idx]; - int ret; + struct mlx5_rxq_ctrl *rxq_ctrl; - priv_lock(priv); - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); - if (idx >= priv->rxqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); - return -EOVERFLOW; + if (!(*priv->rxqs)[idx]) + return 0; + rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); + assert(rxq_ctrl->priv); + if (rxq_ctrl->ibv) { + int ret; + + ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + if (!ret) + rxq_ctrl->ibv = NULL; } - if (rxq != NULL) { - DEBUG("%p: reusing already allocated queue index %u (%p)", - (void *)dev, idx, (void *)rxq); - if (priv->started) { - priv_unlock(priv); - return -EEXIST; - } + DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, + (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { + LIST_REMOVE(rxq_ctrl, next); + rte_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; - rxq_cleanup(rxq); - } else { - rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket); - if (rxq == NULL) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - priv_unlock(priv); - return -ENOMEM; - } + return 0; } - ret = rxq_setup(dev, rxq, desc, socket, conf, mp); - if (ret) - rte_free(rxq); - else { - rxq->stats.idx = idx; - DEBUG("%p: adding RX queue %p to list", - (void *)dev, (void *)rxq); - (*priv->rxqs)[idx] = rxq; - /* Update receive callback. */ - if (rxq->sp) - dev->rx_pkt_burst = mlx5_rx_burst_sp; - else - dev->rx_pkt_burst = mlx5_rx_burst; + return EBUSY; +} + +/** + * Verify if the queue can be released. + * + * @param priv + * Pointer to private structure. + * @param idx + * TX queue index. + * + * @return + * 1 if the queue can be released. + */ +int +mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +{ + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (!(*priv->rxqs)[idx]) + return -1; + rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); + return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); +} + +/** + * Verify the Rx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_rxq_verify(struct priv *priv) +{ + struct mlx5_rxq_ctrl *rxq_ctrl; + int ret = 0; + + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { + DEBUG("%p: Rx Queue %p still referenced", (void *)priv, + (void *)rxq_ctrl); + ++ret; } - priv_unlock(priv); - return -ret; + return ret; } /** - * DPDK callback to release a RX queue. + * Create an indirection table. * - * @param dpdk_rxq - * Generic RX queue pointer. + * @param priv + * Pointer to private structure. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * A new indirection table. */ -void -mlx5_rx_queue_release(void *dpdk_rxq) +struct mlx5_ind_table_ibv* +mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], + uint16_t queues_n) { - struct rxq *rxq = (struct rxq *)dpdk_rxq; - struct priv *priv; + struct mlx5_ind_table_ibv *ind_tbl; + const unsigned int wq_n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + struct ibv_wq *wq[1 << wq_n]; unsigned int i; + unsigned int j; - if (rxq == NULL) - return; - priv = rxq->priv; - priv_lock(priv); - for (i = 0; (i != priv->rxqs_n); ++i) - if ((*priv->rxqs)[i] == rxq) { - DEBUG("%p: removing RX queue %p from list", - (void *)priv->dev, (void *)rxq); - (*priv->rxqs)[i] = NULL; + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + + queues_n * sizeof(uint16_t), 0); + if (!ind_tbl) + return NULL; + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq = + mlx5_priv_rxq_get(priv, queues[i]); + + if (!rxq) + goto error; + wq[i] = rxq->ibv->wq; + ind_tbl->queues[i] = queues[i]; + } + ind_tbl->queues_n = queues_n; + /* Finalise indirection table. */ + for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j) + wq[i] = wq[j]; + ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table + (priv->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = wq_n, + .ind_tbl = wq, + .comp_mask = 0, + }); + if (!ind_tbl->ind_table) + goto error; + rte_atomic32_inc(&ind_tbl->refcnt); + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + return ind_tbl; +error: + rte_free(ind_tbl); + DEBUG("%p cannot create indirection table", (void *)priv); + return NULL; +} + +/** + * Get an indirection table. + * + * @param priv + * Pointer to private structure. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * An indirection table if found. + */ +struct mlx5_ind_table_ibv* +mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], + uint16_t queues_n) +{ + struct mlx5_ind_table_ibv *ind_tbl; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + if ((ind_tbl->queues_n == queues_n) && + (memcmp(ind_tbl->queues, queues, + ind_tbl->queues_n * sizeof(ind_tbl->queues[0])) + == 0)) break; + } + if (ind_tbl) { + unsigned int i; + + rte_atomic32_inc(&ind_tbl->refcnt); + DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + for (i = 0; i != ind_tbl->queues_n; ++i) + mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + } + return ind_tbl; +} + +/** + * Release an indirection table. + * + * @param priv + * Pointer to private structure. + * @param ind_table + * Indirection table to release. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_priv_ind_table_ibv_release(struct priv *priv, + struct mlx5_ind_table_ibv *ind_tbl) +{ + unsigned int i; + + DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, + (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + claim_zero(mlx5_glue->destroy_rwq_ind_table + (ind_tbl->ind_table)); + for (i = 0; i != ind_tbl->queues_n; ++i) + claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + if (!rte_atomic32_read(&ind_tbl->refcnt)) { + LIST_REMOVE(ind_tbl, next); + rte_free(ind_tbl); + return 0; + } + return EBUSY; +} + +/** + * Verify the Rx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_ind_table_ibv_verify(struct priv *priv) +{ + struct mlx5_ind_table_ibv *ind_tbl; + int ret = 0; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + DEBUG("%p: Verbs indirection table %p still referenced", + (void *)priv, (void *)ind_tbl); + ++ret; + } + return ret; +} + +/** + * Create an Rx Hash queue. + * + * @param priv + * Pointer to private structure. + * @param rss_key + * RSS key for the Rx hash queue. + * @param rss_key_len + * RSS key length. + * @param hash_fields + * Verbs protocol hash field to make the RSS on. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * + * @return + * An hash Rx queue on success. + */ +struct mlx5_hrxq* +mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +{ + struct mlx5_hrxq *hrxq; + struct mlx5_ind_table_ibv *ind_tbl; + struct ibv_qp *qp; + + queues_n = hash_fields ? queues_n : 1; + ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + if (!ind_tbl) + ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + if (!ind_tbl) + return NULL; + qp = mlx5_glue->create_qp_ex + (priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = rss_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->pd, + }); + if (!qp) + goto error; + hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); + if (!hrxq) + goto error; + hrxq->ind_table = ind_tbl; + hrxq->qp = qp; + hrxq->rss_key_len = rss_key_len; + hrxq->hash_fields = hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + rte_atomic32_inc(&hrxq->refcnt); + LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + return hrxq; +error: + mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + if (qp) + claim_zero(mlx5_glue->destroy_qp(qp)); + return NULL; +} + +/** + * Get an Rx Hash queue. + * + * @param priv + * Pointer to private structure. + * @param rss_conf + * RSS configuration for the Rx hash queue. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * + * @return + * An hash Rx queue on success. + */ +struct mlx5_hrxq* +mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +{ + struct mlx5_hrxq *hrxq; + + queues_n = hash_fields ? queues_n : 1; + LIST_FOREACH(hrxq, &priv->hrxqs, next) { + struct mlx5_ind_table_ibv *ind_tbl; + + if (hrxq->rss_key_len != rss_key_len) + continue; + if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) + continue; + if (hrxq->hash_fields != hash_fields) + continue; + ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + if (!ind_tbl) + continue; + if (ind_tbl != hrxq->ind_table) { + mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + continue; } - rxq_cleanup(rxq); - rte_free(rxq); - priv_unlock(priv); + rte_atomic32_inc(&hrxq->refcnt); + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + return hrxq; + } + return NULL; +} + +/** + * Release the hash Rx queue. + * + * @param priv + * Pointer to private structure. + * @param hrxq + * Pointer to Hash Rx queue to release. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +{ + DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, + (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + LIST_REMOVE(hrxq, next); + rte_free(hrxq); + return 0; + } + claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); + return EBUSY; +} + +/** + * Verify the Rx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_hrxq_ibv_verify(struct priv *priv) +{ + struct mlx5_hrxq *hrxq; + int ret = 0; + + LIST_FOREACH(hrxq, &priv->hrxqs, next) { + DEBUG("%p: Verbs Hash Rx queue %p still referenced", + (void *)priv, (void *)hrxq); + ++ret; + } + return ret; }